summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2009-12-15 09:54:21 +0000
committerSteve Block <steveblock@google.com>2009-12-15 09:54:21 +0000
commitd0582a6c46733687d045e4188a1bcd0123c758a1 (patch)
tree4139657adad206f69647f3d03f6fb2da2e8ae14e
parent3ce2e2076e8e3e60cf1810eec160ea2d8557e9e7 (diff)
downloadandroid_external_v8-d0582a6c46733687d045e4188a1bcd0123c758a1.tar.gz
android_external_v8-d0582a6c46733687d045e4188a1bcd0123c758a1.tar.bz2
android_external_v8-d0582a6c46733687d045e4188a1bcd0123c758a1.zip
Update V8 to r3431 as required by WebKit r51976.
Change-Id: I567392c3f8c0a0d5201a4249611ac4ccf468cd5b
-rw-r--r--AUTHORS1
-rw-r--r--ChangeLog83
-rwxr-xr-xSConstruct24
-rw-r--r--V8_MERGE_REVISION4
-rw-r--r--include/v8-debug.h17
-rw-r--r--include/v8.h50
-rwxr-xr-xsrc/SConscript5
-rw-r--r--src/accessors.cc63
-rw-r--r--src/accessors.h10
-rw-r--r--src/allocation.cc6
-rw-r--r--src/allocation.h2
-rw-r--r--src/api.cc118
-rw-r--r--src/api.h9
-rw-r--r--src/arguments.h6
-rw-r--r--src/arm/assembler-arm-inl.h8
-rw-r--r--src/arm/assembler-arm.cc294
-rw-r--r--src/arm/assembler-arm.h182
-rw-r--r--src/arm/builtins-arm.cc44
-rw-r--r--src/arm/codegen-arm-inl.h13
-rw-r--r--src/arm/codegen-arm.cc732
-rw-r--r--src/arm/codegen-arm.h52
-rw-r--r--src/arm/constants-arm.cc20
-rw-r--r--src/arm/constants-arm.h51
-rw-r--r--src/arm/cpu-arm.cc3
-rw-r--r--src/arm/debug-arm.cc2
-rw-r--r--src/arm/disasm-arm.cc199
-rw-r--r--src/arm/fast-codegen-arm.cc1635
-rw-r--r--src/arm/frames-arm.cc15
-rw-r--r--src/arm/frames-arm.h2
-rw-r--r--src/arm/ic-arm.cc13
-rw-r--r--src/arm/macro-assembler-arm.cc82
-rw-r--r--src/arm/macro-assembler-arm.h20
-rw-r--r--src/arm/regexp-macro-assembler-arm.cc27
-rw-r--r--src/arm/regexp-macro-assembler-arm.h15
-rw-r--r--src/arm/simulator-arm.cc345
-rw-r--r--src/arm/simulator-arm.h79
-rw-r--r--src/arm/stub-cache-arm.cc5
-rw-r--r--src/arm/virtual-frame-arm.cc35
-rw-r--r--src/arm/virtual-frame-arm.h1
-rw-r--r--src/array.js36
-rw-r--r--src/assembler.cc44
-rw-r--r--src/assembler.h21
-rw-r--r--src/ast.cc8
-rw-r--r--src/ast.h57
-rw-r--r--src/bootstrapper.cc71
-rw-r--r--src/bootstrapper.h18
-rw-r--r--src/builtins.cc6
-rw-r--r--src/checks.cc2
-rw-r--r--src/code-stubs.cc91
-rw-r--r--src/code-stubs.h68
-rw-r--r--src/codegen.cc120
-rw-r--r--src/codegen.h31
-rw-r--r--src/compiler.cc508
-rw-r--r--src/compiler.h13
-rw-r--r--src/conversions.cc31
-rw-r--r--src/d8.cc8
-rw-r--r--src/debug-agent.cc10
-rw-r--r--src/debug-delay.js21
-rw-r--r--src/debug.cc21
-rw-r--r--src/debug.h16
-rw-r--r--src/disassembler.cc4
-rw-r--r--src/dtoa-config.c8
-rw-r--r--src/execution.cc39
-rw-r--r--src/execution.h41
-rw-r--r--src/factory.cc10
-rw-r--r--src/factory.h9
-rw-r--r--src/fast-codegen.cc426
-rw-r--r--src/fast-codegen.h64
-rw-r--r--src/flag-definitions.h11
-rw-r--r--src/flags.cc4
-rw-r--r--src/frames.cc22
-rw-r--r--src/frames.h25
-rw-r--r--src/global-handles.cc139
-rw-r--r--src/global-handles.h28
-rw-r--r--src/globals.h30
-rw-r--r--src/handles.cc95
-rw-r--r--src/handles.h20
-rw-r--r--src/heap-inl.h4
-rw-r--r--src/heap-profiler.cc38
-rw-r--r--src/heap-profiler.h4
-rw-r--r--src/heap.cc427
-rw-r--r--src/heap.h165
-rw-r--r--src/ia32/assembler-ia32-inl.h2
-rw-r--r--src/ia32/assembler-ia32.cc80
-rw-r--r--src/ia32/assembler-ia32.h43
-rw-r--r--src/ia32/builtins-ia32.cc73
-rw-r--r--src/ia32/codegen-ia32.cc811
-rw-r--r--src/ia32/codegen-ia32.h104
-rw-r--r--src/ia32/debug-ia32.cc8
-rw-r--r--src/ia32/disasm-ia32.cc312
-rw-r--r--src/ia32/fast-codegen-ia32.cc1634
-rw-r--r--src/ia32/frames-ia32.cc13
-rw-r--r--src/ia32/frames-ia32.h2
-rw-r--r--src/ia32/ic-ia32.cc39
-rw-r--r--src/ia32/macro-assembler-ia32.cc247
-rw-r--r--src/ia32/macro-assembler-ia32.h58
-rw-r--r--src/ia32/regexp-macro-assembler-ia32.cc32
-rw-r--r--src/ia32/register-allocator-ia32.cc4
-rw-r--r--src/ia32/simulator-ia32.h9
-rw-r--r--src/ia32/stub-cache-ia32.cc50
-rw-r--r--src/ia32/virtual-frame-ia32.cc18
-rw-r--r--src/ic.cc3
-rw-r--r--src/interpreter-irregexp.cc23
-rw-r--r--src/jsregexp.cc157
-rw-r--r--src/jsregexp.h11
-rw-r--r--src/list.h5
-rw-r--r--src/log-inl.h2
-rw-r--r--src/log-utils.cc10
-rw-r--r--src/log-utils.h7
-rw-r--r--src/log.cc180
-rw-r--r--src/log.h22
-rw-r--r--src/macros.py4
-rw-r--r--src/mark-compact.cc19
-rw-r--r--src/math.js25
-rw-r--r--src/messages.js65
-rw-r--r--src/mirror-delay.js89
-rw-r--r--src/mksnapshot.cc106
-rw-r--r--src/objects-debug.cc77
-rw-r--r--src/objects-inl.h255
-rw-r--r--src/objects.cc430
-rw-r--r--src/objects.h700
-rw-r--r--src/parser.cc70
-rw-r--r--src/platform-freebsd.cc4
-rw-r--r--src/platform-linux.cc65
-rw-r--r--src/platform-macos.cc7
-rw-r--r--src/platform-nullos.cc11
-rw-r--r--src/platform-openbsd.cc597
-rw-r--r--src/platform-posix.cc9
-rw-r--r--src/platform-win32.cc40
-rw-r--r--src/platform.h11
-rw-r--r--src/prettyprinter.cc3
-rw-r--r--src/regexp-macro-assembler.cc13
-rw-r--r--src/regexp-macro-assembler.h16
-rw-r--r--src/regexp-stack.cc2
-rw-r--r--src/regexp-stack.h4
-rw-r--r--src/runtime.cc291
-rw-r--r--src/runtime.h16
-rw-r--r--src/runtime.js15
-rw-r--r--src/scanner.cc189
-rw-r--r--src/scanner.h160
-rw-r--r--src/scopeinfo.cc9
-rw-r--r--src/scopes.cc18
-rw-r--r--src/serialize.cc1787
-rw-r--r--src/serialize.h429
-rw-r--r--src/simulator.h41
-rw-r--r--src/snapshot-common.cc52
-rw-r--r--src/spaces.cc91
-rw-r--r--src/spaces.h8
-rw-r--r--src/string-stream.cc2
-rw-r--r--src/string.js22
-rw-r--r--src/stub-cache.cc13
-rw-r--r--src/stub-cache.h4
-rw-r--r--src/third_party/valgrind/valgrind.h57
-rw-r--r--src/token.cc105
-rw-r--r--src/token.h9
-rw-r--r--src/top.cc131
-rw-r--r--src/top.h56
-rw-r--r--src/utils.cc17
-rw-r--r--src/utils.h18
-rw-r--r--src/v8-counters.h5
-rw-r--r--src/v8.cc14
-rw-r--r--src/v8.h2
-rw-r--r--src/v8natives.js9
-rw-r--r--src/version.cc6
-rw-r--r--src/x64/assembler-x64-inl.h8
-rw-r--r--src/x64/assembler-x64.cc21
-rw-r--r--src/x64/assembler-x64.h75
-rw-r--r--src/x64/builtins-x64.cc67
-rw-r--r--src/x64/codegen-x64.cc644
-rw-r--r--src/x64/codegen-x64.h123
-rw-r--r--src/x64/cpu-x64.cc13
-rw-r--r--src/x64/debug-x64.cc7
-rw-r--r--src/x64/disasm-x64.cc379
-rw-r--r--src/x64/fast-codegen-x64.cc1613
-rw-r--r--src/x64/frames-x64.cc14
-rw-r--r--src/x64/frames-x64.h2
-rw-r--r--src/x64/ic-x64.cc43
-rw-r--r--src/x64/macro-assembler-x64.cc69
-rw-r--r--src/x64/macro-assembler-x64.h17
-rw-r--r--src/x64/regexp-macro-assembler-x64.cc37
-rw-r--r--src/x64/simulator-x64.h9
-rw-r--r--src/x64/stub-cache-x64.cc8
-rw-r--r--src/x64/virtual-frame-x64.cc17
-rw-r--r--src/zone.h2
-rw-r--r--test/cctest/SConscript2
-rw-r--r--test/cctest/cctest.cc3
-rw-r--r--test/cctest/cctest.h136
-rw-r--r--test/cctest/cctest.status17
-rw-r--r--test/cctest/test-accessors.cc450
-rw-r--r--test/cctest/test-alloc.cc10
-rw-r--r--test/cctest/test-api.cc867
-rw-r--r--test/cctest/test-assembler-ia32.cc16
-rw-r--r--test/cctest/test-assembler-x64.cc14
-rw-r--r--test/cctest/test-debug.cc369
-rw-r--r--test/cctest/test-disasm-ia32.cc23
-rw-r--r--test/cctest/test-flags.cc18
-rw-r--r--test/cctest/test-heap-profiler.cc4
-rw-r--r--test/cctest/test-heap.cc7
-rw-r--r--test/cctest/test-log-stack-tracer.cc22
-rw-r--r--test/cctest/test-log-utils.cc3
-rw-r--r--test/cctest/test-log.cc163
-rwxr-xr-xtest/cctest/test-macro-assembler-x64.cc56
-rwxr-xr-xtest/cctest/test-parsing.cc129
-rw-r--r--test/cctest/test-regexp.cc6
-rw-r--r--test/cctest/test-serialize.cc118
-rw-r--r--test/cctest/test-strings.cc282
-rw-r--r--test/cctest/test-thread-termination.cc45
-rw-r--r--test/cctest/test-utils.cc6
-rw-r--r--test/mjsunit/arguments-read-and-assignment.js164
-rw-r--r--test/mjsunit/compiler/function-call.js (renamed from src/location.h)43
-rw-r--r--test/mjsunit/compiler/globals.js10
-rw-r--r--test/mjsunit/compiler/jsnatives.js33
-rw-r--r--test/mjsunit/compiler/literals-assignment.js33
-rw-r--r--test/mjsunit/compiler/loops.js35
-rw-r--r--test/mjsunit/compiler/objectliterals.js57
-rw-r--r--test/mjsunit/compiler/property-simple.js39
-rw-r--r--test/mjsunit/compiler/thisfunction.js35
-rw-r--r--test/mjsunit/cyrillic.js199
-rw-r--r--test/mjsunit/debug-stepnext-do-while.js79
-rw-r--r--test/mjsunit/deep-recursion.js22
-rw-r--r--test/mjsunit/eval-typeof-non-existing.js5
-rw-r--r--test/mjsunit/fuzz-natives.js4
-rw-r--r--test/mjsunit/math-min-max.js17
-rw-r--r--test/mjsunit/mjsunit.js3
-rw-r--r--test/mjsunit/mjsunit.status18
-rw-r--r--test/mjsunit/parse-int-float.js3
-rw-r--r--test/mjsunit/regress/regress-124.js4
-rw-r--r--test/mjsunit/regress/regress-2249423.js40
-rwxr-xr-xtest/mjsunit/regress/regress-485.js64
-rw-r--r--test/mjsunit/regress/regress-486.js30
-rw-r--r--test/mjsunit/regress/regress-490.js48
-rw-r--r--test/mjsunit/regress/regress-491.js47
-rw-r--r--test/mjsunit/regress/regress-492.js52
-rw-r--r--test/mjsunit/regress/regress-496.js39
-rw-r--r--test/mjsunit/regress/regress-502.js38
-rw-r--r--test/mjsunit/regress/regress-503.js63
-rw-r--r--test/mjsunit/regress/regress-515.js40
-rw-r--r--test/mjsunit/regress/regress-526.js32
-rw-r--r--test/mjsunit/regress/regress-540.js47
-rw-r--r--test/mjsunit/regress/regress-r3391.js77
-rw-r--r--test/mjsunit/string-add.js20
-rw-r--r--test/mjsunit/string-charcodeat.js97
-rw-r--r--test/mjsunit/string-indexof-1.js (renamed from test/mjsunit/string-indexof.js)43
-rw-r--r--test/mjsunit/string-indexof-2.js68
-rw-r--r--test/mjsunit/typeof.js40
-rw-r--r--test/mjsunit/unicode-case-overoptimization.js35
-rw-r--r--test/mozilla/testcfg.py2
-rw-r--r--test/sputnik/README6
-rw-r--r--test/sputnik/sputnik.status318
-rw-r--r--test/sputnik/testcfg.py112
-rw-r--r--tools/codemap.js2
-rw-r--r--tools/gyp/v8.gyp54
-rwxr-xr-xtools/js2c.py2
-rwxr-xr-xtools/presubmit.py73
-rwxr-xr-xtools/process-heap-prof.py54
-rwxr-xr-xtools/test.py17
-rw-r--r--tools/utils.py2
-rw-r--r--tools/v8.xcodeproj/project.pbxproj16
-rw-r--r--tools/visual_studio/v8_base.vcproj4
-rw-r--r--tools/visual_studio/v8_base_arm.vcproj4
260 files changed, 18344 insertions, 7222 deletions
diff --git a/AUTHORS b/AUTHORS
index de8cabb0..4fd7aa5b 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -19,3 +19,4 @@ Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de>
Ryan Dahl <coldredlemur@gmail.com>
Patrick Gansterer <paroga@paroga.com>
+John Jozwiak <jjozwiak@codeaurora.org>
diff --git a/ChangeLog b/ChangeLog
index d13d74f5..825431cd 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,84 @@
+2009-12-03: Version 2.0.3
+
+ Optimized handling and adding of strings, for-in and Array.join.
+
+ Heap serialization is now non-destructive.
+
+ Improved profiler support with information on time spend in C++
+ callbacks registered through the API.
+
+ Added commands to the debugger protocol for starting/stopping
+ profiling.
+
+ Enabled the non-optimizing compiler for top-level code.
+
+ Changed the API to only allow strings to be set as data objects on
+ Contexts and scripts to avoid potentially keeping global objects
+ around for too long (issue 528).
+
+ OpenBSD support patch by Peter Valchev <pvalchev@gmail.com>.
+
+ Fixed bugs.
+
+
+2009-11-24: Version 2.0.2
+
+ Improved profiler support.
+
+ Fixed bug that broke compilation of d8 with readline support.
+
+
+2009-11-20: Version 2.0.1
+
+ Fixed crash bug in String.prototype.replace.
+
+ Reverted a change which caused Chromium interactive ui test
+ failures.
+
+
+2009-11-18: Version 2.0.0
+
+ Added support for VFP on ARM.
+
+ Added TryCatch::ReThrow method to the API.
+
+ Reduced the size of snapshots and improved the snapshot load time.
+
+ Improved heap profiler support.
+
+ 64-bit version now supported on Windows.
+
+ Fixed a number of debugger issues.
+
+ Fixed bugs.
+
+
+2009-10-29: Version 1.3.18
+
+ Reverted a change which caused crashes in RegExp replace.
+
+ Reverted a change which caused Chromium ui_tests failure.
+
+
+2009-10-28: Version 1.3.17
+
+ Added API method to get simple heap statistics.
+
+ Improved heap profiler support.
+
+ Fixed the implementation of the resource constraint API so it
+ works when using snapshots.
+
+ Fixed a number of issues in the Windows 64-bit version.
+
+ Optimized calls to API getters.
+
+ Added valgrind notification on code modification to the 64-bit version.
+
+ Fixed issue where we logged shared library addresses on Windows at
+ startup and never used them.
+
+
2009-10-16: Version 1.3.16
X64: Convert smis to holding 32 bits of payload.
@@ -41,7 +122,7 @@
Ensure V8 is initialized before locking and unlocking threads.
Implemented a new JavaScript minifier for compressing the source of
- the built-in JavaScript. This Remove non-Open Source code from Douglas
+ the built-in JavaScript. This removes non-Open Source code from Douglas
Crockford from the project.
Added a missing optimization in StringCharAt.
diff --git a/SConstruct b/SConstruct
index 2b2ce1d0..2087a94f 100755
--- a/SConstruct
+++ b/SConstruct
@@ -149,6 +149,11 @@ LIBRARY_FLAGS = {
'LIBPATH' : ['/usr/local/lib'],
'CCFLAGS': ['-ansi'],
},
+ 'os:openbsd': {
+ 'CPPPATH' : ['/usr/local/include'],
+ 'LIBPATH' : ['/usr/local/lib'],
+ 'CCFLAGS': ['-ansi'],
+ },
'os:win32': {
'CCFLAGS': ['-DWIN32'],
'CXXFLAGS': ['-DWIN32'],
@@ -273,7 +278,7 @@ V8_EXTRA_FLAGS = {
'WARNINGFLAGS': ['/W3']
},
'arch:x64': {
- 'WARNINGFLAGS': ['/W2']
+ 'WARNINGFLAGS': ['/W3']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
@@ -299,6 +304,9 @@ MKSNAPSHOT_EXTRA_FLAGS = {
'os:freebsd': {
'LIBS': ['execinfo', 'pthread']
},
+ 'os:openbsd': {
+ 'LIBS': ['execinfo', 'pthread']
+ },
'os:win32': {
'LIBS': ['winmm', 'ws2_32'],
},
@@ -345,6 +353,9 @@ CCTEST_EXTRA_FLAGS = {
'os:freebsd': {
'LIBS': ['execinfo', 'pthread']
},
+ 'os:openbsd': {
+ 'LIBS': ['execinfo', 'pthread']
+ },
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
},
@@ -398,7 +409,11 @@ SAMPLE_FLAGS = {
},
'os:freebsd': {
'LIBPATH' : ['/usr/local/lib'],
- 'LIBS': ['execinfo', 'pthread']
+ 'LIBS': ['execinfo', 'pthread']
+ },
+ 'os:openbsd': {
+ 'LIBPATH' : ['/usr/local/lib'],
+ 'LIBS': ['execinfo', 'pthread']
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
@@ -505,6 +520,9 @@ D8_FLAGS = {
'os:freebsd': {
'LIBS': ['pthread'],
},
+ 'os:openbsd': {
+ 'LIBS': ['pthread'],
+ },
'os:android': {
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
@@ -555,7 +573,7 @@ SIMPLE_OPTIONS = {
'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
},
'os': {
- 'values': ['freebsd', 'linux', 'macos', 'win32', 'android'],
+ 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd'],
'default': OS_GUESS,
'help': 'the os to build for (' + OS_GUESS + ')'
},
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 613ecd4e..be360d84 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,4 @@
We sync with Chromium release revision, which has both webkit revision and V8 revision.
-http://src.chromium.org/svn/branches/229/src/DEPS@30923
-http://v8.googlecode.com/svn/branches/bleeding_edge@3121
+http://src.chromium.org/svn/releases/4.0.269.0/DEPS
+http://v8.googlecode.com/svn/branches/bleeding_edge@3431
diff --git a/include/v8-debug.h b/include/v8-debug.h
index 3c5c923b..b27bacc1 100644
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -188,6 +188,11 @@ class EXPORT Debug {
*/
typedef void (*HostDispatchHandler)();
+ /**
+ * Callback function for the host to ensure debug messages are processed.
+ */
+ typedef void (*DebugMessageDispatchHandler)();
+
// Set a C debug event listener.
static bool SetDebugEventListener(EventCallback that,
Handle<Value> data = Handle<Value>());
@@ -211,6 +216,18 @@ class EXPORT Debug {
static void SetHostDispatchHandler(HostDispatchHandler handler,
int period = 100);
+ /**
+ * Register a callback function to be called when a debug message has been
+ * received and is ready to be processed. For the debug messages to be
+ * processed V8 needs to be entered, and in certain embedding scenarios this
+ * callback can be used to make sure V8 is entered for the debug message to
+ * be processed. Note that debug messages will only be processed if there is
+ * a V8 break. This can happen automatically by using the option
+ * --debugger-auto-break.
+ */
+ static void SetDebugMessageDispatchHandler(
+ DebugMessageDispatchHandler handler);
+
/**
* Run a JavaScript function in the debugger.
* \param fun the function to call
diff --git a/include/v8.h b/include/v8.h
index b2a3fb74..a8ee8d43 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -129,8 +129,9 @@ class Data;
namespace internal {
-class Object;
class Arguments;
+class Object;
+class Top;
}
@@ -452,8 +453,8 @@ class V8EXPORT HandleScope {
void* operator new(size_t size);
void operator delete(void*, size_t);
- // This Data class is accessible internally through a typedef in the
- // ImplementationUtilities class.
+ // This Data class is accessible internally as HandleScopeData through a
+ // typedef in the ImplementationUtilities class.
class V8EXPORT Data {
public:
int extensions;
@@ -597,7 +598,7 @@ class V8EXPORT Script {
* with the debugger as this data object is only available through the
* debugger API.
*/
- void SetData(Handle<Value> data);
+ void SetData(Handle<String> data);
};
@@ -2473,6 +2474,15 @@ class V8EXPORT TryCatch {
bool CanContinue() const;
/**
+ * Throws the exception caught by this TryCatch in a way that avoids
+ * it being caught again by this same TryCatch. As with ThrowException
+ * it is illegal to execute any JavaScript operations after calling
+ * ReThrow; the caller must return immediately to where the exception
+ * is caught.
+ */
+ Handle<Value> ReThrow();
+
+ /**
* Returns the exception caught by this try/catch block. If no exception has
* been caught an empty handle is returned.
*
@@ -2523,14 +2533,16 @@ class V8EXPORT TryCatch {
*/
void SetCaptureMessage(bool value);
- public:
- TryCatch* next_;
+ private:
+ void* next_;
void* exception_;
void* message_;
- bool is_verbose_;
- bool can_continue_;
- bool capture_message_;
- void* js_handler_;
+ bool is_verbose_ : 1;
+ bool can_continue_ : 1;
+ bool capture_message_ : 1;
+ bool rethrow_ : 1;
+
+ friend class v8::internal::Top;
};
@@ -2622,7 +2634,7 @@ class V8EXPORT Context {
* with the debugger to provide additional information on the context through
* the debugger API.
*/
- void SetData(Handle<Value> data);
+ void SetData(Handle<String> data);
Local<Value> GetData();
/**
@@ -2807,6 +2819,18 @@ template <> struct SmiConstants<8> {
const int kSmiShiftSize = SmiConstants<sizeof(void*)>::kSmiShiftSize;
const int kSmiValueSize = SmiConstants<sizeof(void*)>::kSmiValueSize;
+template <size_t ptr_size> struct InternalConstants;
+
+// Internal constants for 32-bit systems.
+template <> struct InternalConstants<4> {
+ static const int kStringResourceOffset = 3 * sizeof(void*);
+};
+
+// Internal constants for 64-bit systems.
+template <> struct InternalConstants<8> {
+ static const int kStringResourceOffset = 2 * sizeof(void*);
+};
+
/**
* This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't
@@ -2819,7 +2843,9 @@ class Internals {
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
static const int kMapInstanceTypeOffset = sizeof(void*) + sizeof(int);
- static const int kStringResourceOffset = 2 * sizeof(void*);
+ static const int kStringResourceOffset =
+ InternalConstants<sizeof(void*)>::kStringResourceOffset;
+
static const int kProxyProxyOffset = sizeof(void*);
static const int kJSObjectHeaderSize = 3 * sizeof(void*);
static const int kFullStringRepresentationMask = 0x07;
diff --git a/src/SConscript b/src/SConscript
index 85fd7249..3b0df171 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -159,6 +159,7 @@ SOURCES = {
"""),
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
+ 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
@@ -187,6 +188,9 @@ D8_FILES = {
'os:freebsd': [
'd8-posix.cc'
],
+ 'os:openbsd': [
+ 'd8-posix.cc'
+ ],
'os:win32': [
'd8-windows.cc'
],
@@ -264,7 +268,6 @@ def ConfigureObjectFiles():
else:
snapshot_cc = Command('snapshot.cc', [], [])
snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
- libraries_obj = context.ConfigureObject(env, libraries_empty_src, CPPPATH=['.'])
else:
snapshot_obj = empty_snapshot_obj
library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
diff --git a/src/accessors.cc b/src/accessors.cc
index 82ae702f..56cf1359 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -315,7 +315,11 @@ Object* Accessors::ScriptGetLineEnds(Object* object, void*) {
HandleScope scope;
Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
InitScriptLineEnds(script);
- return script->line_ends();
+ ASSERT(script->line_ends()->IsFixedArray());
+ Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+ Handle<FixedArray> copy = Factory::CopyFixedArray(line_ends);
+ Handle<JSArray> js_array = Factory::NewJSArrayWithElements(copy);
+ return *js_array;
}
@@ -345,29 +349,38 @@ const AccessorDescriptor Accessors::ScriptContextData = {
//
-// Accessors::ScriptGetEvalFromFunction
+// Accessors::ScriptGetEvalFromScript
//
-Object* Accessors::ScriptGetEvalFromFunction(Object* object, void*) {
+Object* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->eval_from_function();
+ if (!Script::cast(script)->eval_from_shared()->IsUndefined()) {
+ Handle<SharedFunctionInfo> eval_from_shared(
+ SharedFunctionInfo::cast(Script::cast(script)->eval_from_shared()));
+
+ if (eval_from_shared->script()->IsScript()) {
+ Handle<Script> eval_from_script(Script::cast(eval_from_shared->script()));
+ return *GetScriptWrapper(eval_from_script);
+ }
+ }
+ return Heap::undefined_value();
}
-const AccessorDescriptor Accessors::ScriptEvalFromFunction = {
- ScriptGetEvalFromFunction,
+const AccessorDescriptor Accessors::ScriptEvalFromScript = {
+ ScriptGetEvalFromScript,
IllegalSetter,
0
};
//
-// Accessors::ScriptGetEvalFromPosition
+// Accessors::ScriptGetEvalFromScriptPosition
//
-Object* Accessors::ScriptGetEvalFromPosition(Object* object, void*) {
+Object* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
HandleScope scope;
Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
@@ -379,14 +392,42 @@ Object* Accessors::ScriptGetEvalFromPosition(Object* object, void*) {
// Get the function from where eval was called and find the source position
// from the instruction offset.
- Handle<Code> code(JSFunction::cast(script->eval_from_function())->code());
+ Handle<Code> code(SharedFunctionInfo::cast(
+ script->eval_from_shared())->code());
return Smi::FromInt(code->SourcePosition(code->instruction_start() +
script->eval_from_instructions_offset()->value()));
}
-const AccessorDescriptor Accessors::ScriptEvalFromPosition = {
- ScriptGetEvalFromPosition,
+const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = {
+ ScriptGetEvalFromScriptPosition,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptGetEvalFromFunctionName
+//
+
+
+Object* Accessors::ScriptGetEvalFromFunctionName(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(
+ Script::cast(script)->eval_from_shared()));
+
+
+ // Find the name of the function calling eval.
+ if (!shared->name()->IsUndefined()) {
+ return shared->name();
+ } else {
+ return shared->inferred_name();
+ }
+}
+
+
+const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
+ ScriptGetEvalFromFunctionName,
IllegalSetter,
0
};
diff --git a/src/accessors.h b/src/accessors.h
index 51d322ec..7a840a19 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -51,8 +51,9 @@ namespace internal {
V(ScriptCompilationType) \
V(ScriptLineEnds) \
V(ScriptContextData) \
- V(ScriptEvalFromFunction) \
- V(ScriptEvalFromPosition) \
+ V(ScriptEvalFromScript) \
+ V(ScriptEvalFromScriptPosition) \
+ V(ScriptEvalFromFunctionName) \
V(ObjectPrototype)
// Accessors contains all predefined proxy accessors.
@@ -95,8 +96,9 @@ class Accessors : public AllStatic {
static Object* ScriptGetCompilationType(Object* object, void*);
static Object* ScriptGetLineEnds(Object* object, void*);
static Object* ScriptGetContextData(Object* object, void*);
- static Object* ScriptGetEvalFromFunction(Object* object, void*);
- static Object* ScriptGetEvalFromPosition(Object* object, void*);
+ static Object* ScriptGetEvalFromScript(Object* object, void*);
+ static Object* ScriptGetEvalFromScriptPosition(Object* object, void*);
+ static Object* ScriptGetEvalFromFunctionName(Object* object, void*);
static Object* ObjectGetPrototype(Object* receiver, void*);
static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*);
diff --git a/src/allocation.cc b/src/allocation.cc
index 41724b68..678f4fd7 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -80,7 +80,7 @@ void AllStatic::operator delete(void* p) {
char* StrDup(const char* str) {
- int length = strlen(str);
+ int length = StrLength(str);
char* result = NewArray<char>(length + 1);
memcpy(result, str, length * kCharSize);
result[length] = '\0';
@@ -88,8 +88,8 @@ char* StrDup(const char* str) {
}
-char* StrNDup(const char* str, size_t n) {
- size_t length = strlen(str);
+char* StrNDup(const char* str, int n) {
+ int length = StrLength(str);
if (n < length) length = n;
char* result = NewArray<char>(length + 1);
memcpy(result, str, length * kCharSize);
diff --git a/src/allocation.h b/src/allocation.h
index 586c4fd0..70a3a038 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -124,7 +124,7 @@ static void DeleteArray(T* array) {
// and StrNDup uses new and calls the FatalProcessOutOfMemory handler
// if allocation fails.
char* StrDup(const char* str);
-char* StrNDup(const char* str, size_t n);
+char* StrNDup(const char* str, int n);
// Allocation policy for allocating in the C free store using malloc
diff --git a/src/api.cc b/src/api.cc
index b457aad0..93807a7c 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -37,6 +37,7 @@
#include "platform.h"
#include "serialize.h"
#include "snapshot.h"
+#include "utils.h"
#include "v8threads.h"
#include "version.h"
@@ -125,6 +126,48 @@ static FatalErrorCallback& GetFatalErrorHandler() {
// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
// The default fatal error handler is called and execution is stopped.
void i::V8::FatalProcessOutOfMemory(const char* location) {
+ i::HeapStats heap_stats;
+ int start_marker;
+ heap_stats.start_marker = &start_marker;
+ int new_space_size;
+ heap_stats.new_space_size = &new_space_size;
+ int new_space_capacity;
+ heap_stats.new_space_capacity = &new_space_capacity;
+ int old_pointer_space_size;
+ heap_stats.old_pointer_space_size = &old_pointer_space_size;
+ int old_pointer_space_capacity;
+ heap_stats.old_pointer_space_capacity = &old_pointer_space_capacity;
+ int old_data_space_size;
+ heap_stats.old_data_space_size = &old_data_space_size;
+ int old_data_space_capacity;
+ heap_stats.old_data_space_capacity = &old_data_space_capacity;
+ int code_space_size;
+ heap_stats.code_space_size = &code_space_size;
+ int code_space_capacity;
+ heap_stats.code_space_capacity = &code_space_capacity;
+ int map_space_size;
+ heap_stats.map_space_size = &map_space_size;
+ int map_space_capacity;
+ heap_stats.map_space_capacity = &map_space_capacity;
+ int cell_space_size;
+ heap_stats.cell_space_size = &cell_space_size;
+ int cell_space_capacity;
+ heap_stats.cell_space_capacity = &cell_space_capacity;
+ int lo_space_size;
+ heap_stats.lo_space_size = &lo_space_size;
+ int global_handle_count;
+ heap_stats.global_handle_count = &global_handle_count;
+ int weak_global_handle_count;
+ heap_stats.weak_global_handle_count = &weak_global_handle_count;
+ int pending_global_handle_count;
+ heap_stats.pending_global_handle_count = &pending_global_handle_count;
+ int near_death_global_handle_count;
+ heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
+ int destroyed_global_handle_count;
+ heap_stats.destroyed_global_handle_count = &destroyed_global_handle_count;
+ int end_marker;
+ heap_stats.end_marker = &end_marker;
+ i::Heap::RecordStats(&heap_stats);
i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
{
@@ -450,7 +493,7 @@ void Context::Exit() {
}
-void Context::SetData(v8::Handle<Value> data) {
+void Context::SetData(v8::Handle<String> data) {
if (IsDeadCheck("v8::Context::SetData()")) return;
ENTER_V8;
{
@@ -1174,7 +1217,7 @@ Local<Value> Script::Id() {
}
-void Script::SetData(v8::Handle<Value> data) {
+void Script::SetData(v8::Handle<String> data) {
ON_BAILOUT("v8::Script::SetData()", return);
LOG_API("Script::SetData");
{
@@ -1191,19 +1234,26 @@ void Script::SetData(v8::Handle<Value> data) {
v8::TryCatch::TryCatch()
- : next_(i::Top::try_catch_handler()),
+ : next_(i::Top::try_catch_handler_address()),
exception_(i::Heap::the_hole_value()),
message_(i::Smi::FromInt(0)),
is_verbose_(false),
can_continue_(true),
capture_message_(true),
- js_handler_(NULL) {
+ rethrow_(false) {
i::Top::RegisterTryCatchHandler(this);
}
v8::TryCatch::~TryCatch() {
- i::Top::UnregisterTryCatchHandler(this);
+ if (rethrow_) {
+ v8::HandleScope scope;
+ v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
+ i::Top::UnregisterTryCatchHandler(this);
+ v8::ThrowException(exc);
+ } else {
+ i::Top::UnregisterTryCatchHandler(this);
+ }
}
@@ -1217,6 +1267,13 @@ bool v8::TryCatch::CanContinue() const {
}
+v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
+ if (!HasCaught()) return v8::Local<v8::Value>();
+ rethrow_ = true;
+ return v8::Undefined();
+}
+
+
v8::Local<Value> v8::TryCatch::Exception() const {
if (HasCaught()) {
// Check for out of memory exception.
@@ -2032,11 +2089,11 @@ Local<String> v8::Object::ObjectProtoToString() {
Local<String> str = Utils::ToLocal(class_name);
const char* postfix = "]";
- size_t prefix_len = strlen(prefix);
- size_t str_len = str->Length();
- size_t postfix_len = strlen(postfix);
+ int prefix_len = i::StrLength(prefix);
+ int str_len = str->Length();
+ int postfix_len = i::StrLength(postfix);
- size_t buf_len = prefix_len + str_len + postfix_len;
+ int buf_len = prefix_len + str_len + postfix_len;
char* buf = i::NewArray<char>(buf_len);
// Write prefix.
@@ -2621,11 +2678,8 @@ bool v8::V8::Initialize() {
if (i::V8::IsRunning()) return true;
ENTER_V8;
HandleScope scope;
- if (i::Snapshot::Initialize()) {
- return true;
- } else {
- return i::V8::Initialize(NULL);
- }
+ if (i::Snapshot::Initialize()) return true;
+ return i::V8::Initialize(NULL);
}
@@ -2653,10 +2707,8 @@ bool v8::V8::IdleNotification() {
void v8::V8::LowMemoryNotification() {
-#if defined(ANDROID)
if (!i::V8::IsRunning()) return;
i::Heap::CollectAllGarbage(true);
-#endif
}
@@ -2952,7 +3004,7 @@ Local<String> v8::String::New(const char* data, int length) {
LOG_API("String::New(char)");
if (length == 0) return Empty();
ENTER_V8;
- if (length == -1) length = strlen(data);
+ if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
return Utils::ToLocal(result);
@@ -2975,7 +3027,7 @@ Local<String> v8::String::NewUndetectable(const char* data, int length) {
EnsureInitialized("v8::String::NewUndetectable()");
LOG_API("String::NewUndetectable(char)");
ENTER_V8;
- if (length == -1) length = strlen(data);
+ if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
result->MarkAsUndetectable();
@@ -3043,7 +3095,8 @@ static void DisposeExternalString(v8::Persistent<v8::Value> obj,
v8::String::ExternalStringResource* resource =
reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
if (resource != NULL) {
- const size_t total_size = resource->length() * sizeof(*resource->data());
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Decrement(total_size);
// The object will continue to live in the JavaScript heap until the
@@ -3073,7 +3126,8 @@ static void DisposeExternalAsciiString(v8::Persistent<v8::Value> obj,
v8::String::ExternalAsciiStringResource* resource =
reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
if (resource != NULL) {
- const size_t total_size = resource->length() * sizeof(*resource->data());
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Decrement(total_size);
// The object will continue to live in the JavaScript heap until the
@@ -3095,7 +3149,8 @@ Local<String> v8::String::NewExternal(
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
- const size_t total_size = resource->length() * sizeof(*resource->data());
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalStringHandle(resource);
i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
@@ -3130,7 +3185,8 @@ Local<String> v8::String::NewExternal(
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
- const size_t total_size = resource->length() * sizeof(*resource->data());
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
@@ -3185,6 +3241,10 @@ Local<v8::Object> v8::Object::New() {
Local<v8::Value> v8::Date::New(double time) {
EnsureInitialized("v8::Date::New()");
LOG_API("Date::New");
+ if (isnan(time)) {
+ // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
+ time = i::OS::nan_value();
+ }
ENTER_V8;
EXCEPTION_PREAMBLE();
i::Handle<i::Object> obj =
@@ -3248,7 +3308,7 @@ Local<String> v8::String::NewSymbol(const char* data, int length) {
EnsureInitialized("v8::String::NewSymbol()");
LOG_API("String::NewSymbol(char)");
ENTER_V8;
- if (length == -1) length = strlen(data);
+ if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
i::Factory::LookupSymbol(i::Vector<const char>(data, length));
return Utils::ToLocal(result);
@@ -3257,6 +3317,10 @@ Local<String> v8::String::NewSymbol(const char* data, int length) {
Local<Number> v8::Number::New(double value) {
EnsureInitialized("v8::Number::New()");
+ if (isnan(value)) {
+ // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
+ value = i::OS::nan_value();
+ }
ENTER_V8;
i::Handle<i::Object> result = i::Factory::NewNumber(value);
return Utils::NumberToLocal(result);
@@ -3712,6 +3776,14 @@ void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
}
+void Debug::SetDebugMessageDispatchHandler(
+ DebugMessageDispatchHandler handler) {
+ EnsureInitialized("v8::Debug::SetDebugMessageDispatchHandler");
+ ENTER_V8;
+ i::Debugger::SetDebugMessageDispatchHandler(handler);
+}
+
+
Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
v8::Handle<v8::Value> data) {
if (!i::V8::IsRunning()) return Local<Value>();
diff --git a/src/api.h b/src/api.h
index 1221f352..a28e1f07 100644
--- a/src/api.h
+++ b/src/api.h
@@ -125,6 +125,15 @@ static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
}
+class ApiFunction {
+ public:
+ explicit ApiFunction(v8::internal::Address addr) : addr_(addr) { }
+ v8::internal::Address address() { return addr_; }
+ private:
+ v8::internal::Address addr_;
+};
+
+
v8::Arguments::Arguments(v8::Local<v8::Value> data,
v8::Local<v8::Object> holder,
v8::Local<v8::Function> callee,
diff --git a/src/arguments.h b/src/arguments.h
index d2f1bfce..3fed2231 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -77,9 +77,9 @@ class Arguments BASE_EMBEDDED {
// can.
class CustomArguments : public Relocatable {
public:
- inline CustomArguments(Object *data,
- JSObject *self,
- JSObject *holder) {
+ inline CustomArguments(Object* data,
+ JSObject* self,
+ JSObject* holder) {
values_[3] = self;
values_[2] = holder;
values_[1] = Smi::FromInt(0);
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 48cc0908..5f47cb79 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -85,7 +85,7 @@ Object* RelocInfo::target_object() {
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
@@ -245,6 +245,12 @@ Address Assembler::target_address_at(Address pc) {
}
+void Assembler::set_target_at(Address constant_pool_entry,
+ Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
void Assembler::set_target_address_at(Address pc, Address target) {
Memory::Address_at(target_address_address_at(pc)) = target;
// Intuitively, we would think it is necessary to flush the instruction cache
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index bc3b8e64..d9247288 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -42,6 +42,34 @@
namespace v8 {
namespace internal {
+// Safe default is no features.
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::enabled_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
+void CpuFeatures::Probe() {
+ // If the compiler is allowed to use vfp then we can use vfp too in our
+ // code generation.
+#if !defined(__arm__)
+ // For the simulator=arm build, always use VFP since the arm simulator has
+ // VFP support.
+ supported_ |= 1u << VFP3;
+#else
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
+
+ if (OS::ArmCpuHasFeature(VFP3)) {
+ // This implementation also sets the VFP flags if
+ // runtime detection of VFP returns true.
+ supported_ |= 1u << VFP3;
+ found_by_runtime_probing_ |= 1u << VFP3;
+ }
+#endif
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of Register and CRegister
@@ -84,6 +112,57 @@ CRegister cr13 = { 13 };
CRegister cr14 = { 14 };
CRegister cr15 = { 15 };
+// Support for the VFP registers s0 to s31 (d0 to d15).
+// Note that "sN:sM" is the same as "dN/2".
+Register s0 = { 0 };
+Register s1 = { 1 };
+Register s2 = { 2 };
+Register s3 = { 3 };
+Register s4 = { 4 };
+Register s5 = { 5 };
+Register s6 = { 6 };
+Register s7 = { 7 };
+Register s8 = { 8 };
+Register s9 = { 9 };
+Register s10 = { 10 };
+Register s11 = { 11 };
+Register s12 = { 12 };
+Register s13 = { 13 };
+Register s14 = { 14 };
+Register s15 = { 15 };
+Register s16 = { 16 };
+Register s17 = { 17 };
+Register s18 = { 18 };
+Register s19 = { 19 };
+Register s20 = { 20 };
+Register s21 = { 21 };
+Register s22 = { 22 };
+Register s23 = { 23 };
+Register s24 = { 24 };
+Register s25 = { 25 };
+Register s26 = { 26 };
+Register s27 = { 27 };
+Register s28 = { 28 };
+Register s29 = { 29 };
+Register s30 = { 30 };
+Register s31 = { 31 };
+
+Register d0 = { 0 };
+Register d1 = { 1 };
+Register d2 = { 2 };
+Register d3 = { 3 };
+Register d4 = { 4 };
+Register d5 = { 5 };
+Register d6 = { 6 };
+Register d7 = { 7 };
+Register d8 = { 8 };
+Register d9 = { 9 };
+Register d10 = { 10 };
+Register d11 = { 11 };
+Register d12 = { 12 };
+Register d13 = { 13 };
+Register d14 = { 14 };
+Register d15 = { 15 };
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -203,10 +282,14 @@ enum {
B4 = 1 << 4,
B5 = 1 << 5,
+ B6 = 1 << 6,
B7 = 1 << 7,
B8 = 1 << 8,
+ B9 = 1 << 9,
B12 = 1 << 12,
B16 = 1 << 16,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
B20 = 1 << 20,
B21 = 1 << 21,
B22 = 1 << 22,
@@ -523,6 +606,11 @@ static bool fits_shifter(uint32_t imm32,
// encoded.
static bool MustUseIp(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
return Serializer::enabled();
} else if (rmode == RelocInfo::NONE) {
return false;
@@ -1282,6 +1370,187 @@ void Assembler::stc2(Coprocessor coproc,
}
+// Support for VFP.
+void Assembler::fmdrr(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dm = <Rt,Rt2>.
+ // Instruction details available in ARM DDI 0406A, A8-646.
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!src1.is(pc) && !src2.is(pc));
+ emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
+ src1.code()*B12 | 0xB*B8 | B4 | dst.code());
+}
+
+
+void Assembler::fmrrd(const Register dst1,
+ const Register dst2,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // <Rt,Rt2> = Dm.
+ // Instruction details available in ARM DDI 0406A, A8-646.
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!dst1.is(pc) && !dst2.is(pc));
+ emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
+ dst1.code()*B12 | 0xB*B8 | B4 | src.code());
+}
+
+
+void Assembler::fmsr(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Sn = Rt.
+ // Instruction details available in ARM DDI 0406A, A8-642.
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!src.is(pc));
+ emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
+ src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
+}
+
+
+void Assembler::fmrs(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Rt = Sn.
+ // Instruction details available in ARM DDI 0406A, A8-642.
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!dst.is(pc));
+ emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
+ dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
+}
+
+
+void Assembler::fsitod(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
+ // Instruction details available in ARM DDI 0406A, A8-576.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
+ dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
+ (0x1 & src.code())*B5 | (src.code() >> 1));
+}
+
+
+void Assembler::ftosid(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
+ // Instruction details available in ARM DDI 0406A, A8-576.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
+ 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
+ 0x5*B9 | B8 | B7 | B6 | src.code());
+}
+
+
+void Assembler::faddd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = faddd(Dn, Dm) double precision floating point addition.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-536.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::fsubd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fsubd(Dn, Dm) double precision floating point subtraction.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::fmuld(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fmuld(Dn, Dm) double precision floating point multiplication.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::fdivd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fdivd(Dn, Dm) double precision floating point division.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-584.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::fcmp(const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // vcmp(Dd, Dm) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406A, A8-570.
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
+ src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::vmrs(Register dst, Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-652.
+ // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
+ // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0xF*B20 | B16 |
+ dst.code()*B12 | 0xA*B8 | B4);
+}
+
+
// Pseudo instructions
void Assembler::lea(Register dst,
const MemOperand& x,
@@ -1311,6 +1580,18 @@ void Assembler::lea(Register dst,
}
+bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
+ uint32_t dummy1;
+ uint32_t dummy2;
+ return fits_shifter(imm32, &dummy1, &dummy2, NULL);
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+ BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
// Debugging
void Assembler::RecordJSReturn() {
WriteRecordedPositions();
@@ -1429,10 +1710,15 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !Serializer::enabled() &&
- !FLAG_debug_code) {
- return;
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !FLAG_debug_code) {
+ return;
+ }
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index d1df08c5..86bc18a2 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -41,6 +41,7 @@
#define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include "assembler.h"
+#include "serialize.h"
namespace v8 {
namespace internal {
@@ -102,6 +103,57 @@ extern Register sp;
extern Register lr;
extern Register pc;
+// Support for VFP registers s0 to s32 (d0 to d16).
+// Note that "sN:sM" is the same as "dN/2".
+extern Register s0;
+extern Register s1;
+extern Register s2;
+extern Register s3;
+extern Register s4;
+extern Register s5;
+extern Register s6;
+extern Register s7;
+extern Register s8;
+extern Register s9;
+extern Register s10;
+extern Register s11;
+extern Register s12;
+extern Register s13;
+extern Register s14;
+extern Register s15;
+extern Register s16;
+extern Register s17;
+extern Register s18;
+extern Register s19;
+extern Register s20;
+extern Register s21;
+extern Register s22;
+extern Register s23;
+extern Register s24;
+extern Register s25;
+extern Register s26;
+extern Register s27;
+extern Register s28;
+extern Register s29;
+extern Register s30;
+extern Register s31;
+
+extern Register d0;
+extern Register d1;
+extern Register d2;
+extern Register d3;
+extern Register d4;
+extern Register d5;
+extern Register d6;
+extern Register d7;
+extern Register d8;
+extern Register d9;
+extern Register d10;
+extern Register d11;
+extern Register d12;
+extern Register d13;
+extern Register d14;
+extern Register d15;
// Coprocessor register
struct CRegister {
@@ -372,6 +424,51 @@ class MemOperand BASE_EMBEDDED {
friend class Assembler;
};
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ if (f == VFP3 && !FLAG_enable_vfp3) return false;
+ return (supported_ & (1u << f)) != 0;
+ }
+
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(CpuFeature f) {
+ return (enabled_ & (1u << f)) != 0;
+ }
+
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ explicit Scope(CpuFeature f) {
+ ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (found_by_runtime_probing_ & (1u << f)) == 0);
+ old_enabled_ = CpuFeatures::enabled_;
+ CpuFeatures::enabled_ |= 1u << f;
+ }
+ ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+ private:
+ unsigned old_enabled_;
+#else
+ public:
+ explicit Scope(CpuFeature f) {}
+#endif
+ };
+
+ private:
+ static unsigned supported_;
+ static unsigned enabled_;
+ static unsigned found_by_runtime_probing_;
+};
+
typedef int32_t Instr;
@@ -437,6 +534,23 @@ class Assembler : public Malloced {
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches within generated code.
+ inline static void set_target_at(Address constant_pool_entry, Address target);
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches to runtime code.
+ inline static void set_external_target_at(Address constant_pool_entry,
+ Address target) {
+ set_target_at(constant_pool_entry, target);
+ }
+
+ // Here we are patching the address in the constant pool, not the actual call
+ // instruction. The address in the constant pool is the same size as a
+ // pointer.
+ static const int kCallTargetSize = kPointerSize;
+ static const int kExternalTargetSize = kPointerSize;
+
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
@@ -452,6 +566,7 @@ class Assembler : public Malloced {
// register.
static const int kPcLoadDelta = 8;
+ static const int kJSReturnSequenceLength = 4;
// ---------------------------------------------------------------------------
// Code generation
@@ -638,6 +753,66 @@ class Assembler : public Malloced {
void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above
+ // Support for VFP.
+ // All these APIs support S0 to S31 and D0 to D15.
+ // Currently these APIs do not support extended D registers, i.e, D16 to D31.
+ // However, some simple modifications can allow
+ // these APIs to support D16 to D31.
+
+ void fmdrr(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmrrd(const Register dst1,
+ const Register dst2,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmsr(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmrs(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fsitod(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void ftosid(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+
+ void faddd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fsubd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmuld(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fdivd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fcmp(const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void vmrs(const Register dst,
+ const Condition cond = al);
+
// Pseudo instructions
void nop() { mov(r0, Operand(r0)); }
@@ -665,6 +840,13 @@ class Assembler : public Malloced {
return (pc_offset() - l->pos()) / kInstrSize;
}
+ // Check whether an immediate fits an addressing mode 1 instruction.
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
// Debugging
// Mark address of the ExitJSFrame code.
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index d7afb37a..5389a3c5 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -284,7 +284,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// Both registers are preserved by this code so no need to differentiate between
// construct call and normal call.
static void ArrayNativeCode(MacroAssembler* masm,
- Label *call_generic_code) {
+ Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one.
@@ -949,6 +949,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -1027,44 +1029,24 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
- Label no_preemption, retry_preemption;
- __ bind(&retry_preemption);
- ExternalReference stack_guard_limit_address =
- ExternalReference::address_of_stack_guard_limit();
- __ mov(r2, Operand(stack_guard_limit_address));
- __ ldr(r2, MemOperand(r2));
- __ cmp(sp, r2);
- __ b(hi, &no_preemption);
-
- // We have encountered a preemption or stack overflow already before we push
- // the array contents. Save r0 which is the Smi-tagged length of the array.
- __ push(r0);
-
- // Runtime routines expect at least one argument, so give it a Smi.
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ push(r0);
- __ CallRuntime(Runtime::kStackGuard, 1);
-
- // Since we returned, it wasn't a stack overflow. Restore r0 and try again.
- __ pop(r0);
- __ b(&retry_preemption);
-
- __ bind(&no_preemption);
-
- // Eagerly check for stack-overflow before starting to push the arguments.
- // r0: number of arguments.
- // r2: stack limit.
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
Label okay;
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ // Make r2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
__ sub(r2, sp, r2);
-
+ // Check if the arguments will overflow the stack.
__ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ b(hi, &okay);
+ __ b(gt, &okay); // Signed comparison.
// Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ push(r1);
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
+ // End of stack check.
// Push current limit and index.
__ bind(&okay);
@@ -1107,6 +1089,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
index 9ff02cb4..749f32db 100644
--- a/src/arm/codegen-arm-inl.h
+++ b/src/arm/codegen-arm-inl.h
@@ -35,18 +35,15 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
void CodeGenerator::LoadConditionAndSpill(Expression* expression,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control) {
- LoadCondition(expression, typeof_state, true_target, false_target,
- force_control);
+ LoadCondition(expression, true_target, false_target, force_control);
}
-void CodeGenerator::LoadAndSpill(Expression* expression,
- TypeofState typeof_state) {
- Load(expression, typeof_state);
+void CodeGenerator::LoadAndSpill(Expression* expression) {
+ Load(expression);
}
@@ -60,8 +57,8 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
}
-void Reference::GetValueAndSpill(TypeofState typeof_state) {
- GetValue(typeof_state);
+void Reference::GetValueAndSpill() {
+ GetValue();
}
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 47f0e963..7c0b0c63 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "parser.h"
#include "register-allocator-inl.h"
@@ -92,7 +93,6 @@ void DeferredCode::RestoreRegisters() {
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- typeof_state_(NOT_INSIDE_TYPEOF),
true_target_(NULL),
false_target_(NULL),
previous_(NULL) {
@@ -101,11 +101,9 @@ CodeGenState::CodeGenState(CodeGenerator* owner)
CodeGenState::CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target)
: owner_(owner),
- typeof_state_(typeof_state),
true_target_(true_target),
false_target_(false_target),
previous_(owner->state()) {
@@ -144,6 +142,9 @@ CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
// cp: callee's context
void CodeGenerator::GenCode(FunctionLiteral* fun) {
+ // Record the position for debugging purposes.
+ CodeForFunctionPosition(fun);
+
ZoneList<Statement*>* body = fun->body();
// Initialize state.
@@ -322,18 +323,32 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
+ // Calculate the exact length of the return sequence and make sure that
+ // the constant pool is not emitted inside of the return sequence.
+ int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
+ int return_sequence_length = Assembler::kJSReturnSequenceLength;
+ if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
+ // Additional mov instruction generated.
+ return_sequence_length++;
+ }
+ masm_->BlockConstPoolFor(return_sequence_length);
+
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
+ masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
// Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(kJSReturnSequenceLength,
+ // expected by the debugger. The add instruction above is an addressing
+ // mode 1 instruction where there are restrictions on which immediate values
+ // can be encoded in the instruction and which immediate values requires
+ // use of an additional instruction for moving the immediate to a temporary
+ // register.
+ ASSERT_EQ(return_sequence_length,
masm_->InstructionsGeneratedSince(&check_exit_codesize));
}
@@ -442,14 +457,13 @@ MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
// register was set, has_cc() is true and cc_reg_ contains the condition to
// test for 'true'.
void CodeGenerator::LoadCondition(Expression* x,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc) {
ASSERT(!has_cc());
int original_height = frame_->height();
- { CodeGenState new_state(this, typeof_state, true_target, false_target);
+ { CodeGenState new_state(this, true_target, false_target);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@@ -479,13 +493,13 @@ void CodeGenerator::LoadCondition(Expression* x,
}
-void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
JumpTarget true_target;
JumpTarget false_target;
- LoadCondition(x, typeof_state, &true_target, &false_target, false);
+ LoadCondition(expr, &true_target, &false_target, false);
if (has_cc()) {
// Convert cc_reg_ into a boolean value.
@@ -552,24 +566,27 @@ void CodeGenerator::LoadGlobalReceiver(Register scratch) {
}
-// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
-// variables w/o reference errors elsewhere.
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
VirtualFrame::SpilledScope spilled_scope;
- Variable* variable = x->AsVariableProxy()->AsVariable();
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // NOTE: This is somewhat nasty. We force the compiler to load
- // the variable as if through '<global>.<variable>' to make sure we
- // do not get reference errors.
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
Literal key(variable->name());
- // TODO(1241834): Fetch the position from the variable instead of using
- // no position.
Property property(&global, &key, RelocInfo::kNoPosition);
- LoadAndSpill(&property);
+ Reference ref(this, &property);
+ ref.GetValueAndSpill();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
+ frame_->SpillAll();
} else {
- LoadAndSpill(x, INSIDE_TYPEOF);
+ // Anything else can be handled normally.
+ LoadAndSpill(expr);
}
}
@@ -1066,27 +1083,6 @@ void CodeGenerator::Comparison(Condition cc,
}
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) {}
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#if defined(DEBUG)
- void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
-#endif // defined(DEBUG)
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
@@ -1122,22 +1118,20 @@ void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
void CodeGenerator::CheckStack() {
VirtualFrame::SpilledScope spilled_scope;
- if (FLAG_check_stack) {
- Comment cmnt(masm_, "[ check stack");
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- // Put the lr setup instruction in the delay slot. kInstrSize is added to
- // the implicit 8 byte offset that always applies to operations with pc and
- // gives a return address 12 bytes down.
- masm_->add(lr, pc, Operand(Assembler::kInstrSize));
- masm_->cmp(sp, Operand(ip));
- StackCheckStub stub;
- // Call the stub if lower.
- masm_->mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- }
+ Comment cmnt(masm_, "[ check stack");
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ // Put the lr setup instruction in the delay slot. kInstrSize is added to
+ // the implicit 8 byte offset that always applies to operations with pc and
+ // gives a return address 12 bytes down.
+ masm_->add(lr, pc, Operand(Assembler::kInstrSize));
+ masm_->cmp(sp, Operand(ip));
+ StackCheckStub stub;
+ // Call the stub if lower.
+ masm_->mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
@@ -1299,8 +1293,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
JumpTarget then;
JumpTarget else_;
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &else_, true);
+ LoadConditionAndSpill(node->condition(), &then, &else_, true);
if (frame_ != NULL) {
Branch(false, &else_);
}
@@ -1323,8 +1316,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_else_stm);
JumpTarget then;
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &exit, true);
+ LoadConditionAndSpill(node->condition(), &then, &exit, true);
if (frame_ != NULL) {
Branch(false, &exit);
}
@@ -1339,8 +1331,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_then_stm);
JumpTarget else_;
// if (!cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &exit, &else_, true);
+ LoadConditionAndSpill(node->condition(), &exit, &else_, true);
if (frame_ != NULL) {
Branch(true, &exit);
}
@@ -1354,8 +1345,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
Comment cmnt(masm_, "[ If");
ASSERT(!has_then_stm && !has_else_stm);
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &exit, &exit, false);
+ LoadConditionAndSpill(node->condition(), &exit, &exit, false);
if (frame_ != NULL) {
if (has_cc()) {
cc_reg_ = al;
@@ -1570,7 +1560,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
- // Compile the test.
+ // Compile the test.
switch (info) {
case ALWAYS_TRUE:
// If control can fall off the end of the body, jump back to the
@@ -1593,8 +1583,9 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
node->continue_target()->Bind();
}
if (has_valid_frame()) {
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ Comment cmnt(masm_, "[ DoWhileCondition");
+ CodeForDoWhileConditionPosition(node);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A invalid frame here indicates that control did not
// fall out of the test expression.
@@ -1633,8 +1624,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A NULL frame indicates that control did not fall out of the
// test expression.
@@ -1693,8 +1683,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// If the test is always true, there is no need to compile it.
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
Branch(false, node->break_target());
}
@@ -1780,25 +1769,81 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
primitive.Bind();
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0));
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
- frame_->EmitPush(r0); // duplicate the object being enumerated
- frame_->EmitPush(r0);
+ // r0: value to be iterated over
+ frame_->EmitPush(r0); // Push the object being iterated over.
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ JumpTarget call_runtime;
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ JumpTarget check_prototype;
+ JumpTarget use_cache;
+ __ mov(r1, Operand(r0));
+ loop.Bind();
+ // Check that there are no elements.
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ cmp(r2, r4);
+ call_runtime.Branch(ne);
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in r3 for the subsequent
+ // prototype load.
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
+ __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
+ __ cmp(r2, ip);
+ call_runtime.Branch(eq);
+ // Check that there in an enum cache in the non-empty instance
+ // descriptors. This is the case if the next enumeration index
+ // field does not contain a smi.
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ call_runtime.Branch(eq);
+ // For all objects but the receiver, check that the cache is empty.
+ // r4: empty fixed array root.
+ __ cmp(r1, r0);
+ check_prototype.Branch(eq);
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ cmp(r2, r4);
+ call_runtime.Branch(ne);
+ check_prototype.Bind();
+ // Load the prototype from the map and loop if non-null.
+ __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r1, ip);
+ loop.Branch(ne);
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ use_cache.Jump();
+
+ call_runtime.Bind();
+ // Call the runtime to get the property names for the object.
+ frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ // r0: map or fixed array (result from call to
+ // Runtime::kGetPropertyNamesFast)
__ mov(r2, Operand(r0));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
fixed_array.Branch(ne);
+ use_cache.Bind();
// Get enum cache
+ // r0: map (either the result from a call to
+ // Runtime::kGetPropertyNamesFast or has been fetched directly from
+ // the object)
__ mov(r1, Operand(r0));
__ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
@@ -1863,9 +1908,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ ldr(r0, frame_->ElementAt(4)); // push enumerable
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
- Result arg_count_reg(r0);
- __ mov(r0, Operand(1));
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
__ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it.
@@ -2272,7 +2315,8 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(node, script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) {
ASSERT(frame_->height() == original_height);
@@ -2303,20 +2347,19 @@ void CodeGenerator::VisitConditional(Conditional* node) {
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &else_, true);
+ LoadConditionAndSpill(node->condition(), &then, &else_, true);
if (has_valid_frame()) {
Branch(false, &else_);
}
if (has_valid_frame() || then.is_linked()) {
then.Bind();
- LoadAndSpill(node->then_expression(), typeof_state());
+ LoadAndSpill(node->then_expression());
}
if (else_.is_linked()) {
JumpTarget exit;
if (has_valid_frame()) exit.Jump();
else_.Bind();
- LoadAndSpill(node->else_expression(), typeof_state());
+ LoadAndSpill(node->else_expression());
if (exit.is_linked()) exit.Bind();
}
ASSERT(frame_->height() == original_height + 1);
@@ -2383,10 +2426,6 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
frame_->EmitPush(r0);
} else {
- // Note: We would like to keep the assert below, but it fires because of
- // some nasty code in LoadTypeofExpression() which should be removed...
- // ASSERT(!slot->var()->is_dynamic());
-
// Special handling for locals allocated in registers.
__ ldr(r0, SlotOperand(slot, r2));
frame_->EmitPush(r0);
@@ -2481,7 +2520,7 @@ void CodeGenerator::VisitSlot(Slot* node) {
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Slot");
- LoadFromSlot(node, typeof_state());
+ LoadFromSlot(node, NOT_INSIDE_TYPEOF);
ASSERT(frame_->height() == original_height + 1);
}
@@ -2500,7 +2539,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
} else {
ASSERT(var->is_global());
Reference ref(this, node);
- ref.GetValueAndSpill(typeof_state());
+ ref.GetValueAndSpill();
}
ASSERT(frame_->height() == original_height + 1);
}
@@ -2836,7 +2875,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
} else {
// +=, *= and similar binary assignments.
// Get the old value of the lhs.
- target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ target.GetValueAndSpill();
Literal* literal = node->value()->AsLiteral();
bool overwrite =
(node->value()->AsBinaryOperation() != NULL &&
@@ -2901,7 +2940,7 @@ void CodeGenerator::VisitProperty(Property* node) {
Comment cmnt(masm_, "[ Property");
{ Reference property(this, node);
- property.GetValueAndSpill(typeof_state());
+ property.GetValueAndSpill();
}
ASSERT(frame_->height() == original_height + 1);
}
@@ -3071,7 +3110,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Load the function to call from the property through a reference.
Reference ref(this, property);
- ref.GetValueAndSpill(NOT_INSIDE_TYPEOF); // receiver
+ ref.GetValueAndSpill(); // receiver
// Pass receiver to called function.
if (property->is_synthetic()) {
@@ -3301,7 +3340,79 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
+ Comment(masm_, "[ GenerateFastCharCodeAt");
+
+ LoadAndSpill(args->at(0));
+ LoadAndSpill(args->at(1));
+ frame_->EmitPop(r0); // Index.
+ frame_->EmitPop(r1); // String.
+
+ Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
+
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow); // The 'string' was a Smi.
+
+ ASSERT(kSmiTag == 0);
+ __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
+ __ b(ne, &slow); // The index was negative or not a Smi.
+
+ __ bind(&try_again_with_new_string);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &slow);
+
+ // Now r2 has the string type.
+ __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
+ // Now r3 has the length of the string. Compare with the index.
+ __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
+ __ b(le, &slow);
+
+ // Here we know the index is in range. Check that string is sequential.
+ ASSERT_EQ(0, kSeqStringTag);
+ __ tst(r2, Operand(kStringRepresentationMask));
+ __ b(ne, &not_a_flat_string);
+
+ // Check whether it is an ASCII string.
+ ASSERT_EQ(0, kTwoByteStringTag);
+ __ tst(r2, Operand(kStringEncodingMask));
+ __ b(ne, &ascii_string);
+
+ // 2-byte string. We can add without shifting since the Smi tag size is the
+ // log2 of the number of bytes in a two-byte character.
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiShiftSize);
+ __ add(r1, r1, Operand(r0));
+ __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ jmp(&end);
+
+ __ bind(&ascii_string);
+ __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
+ __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ jmp(&end);
+
+ __ bind(&not_a_flat_string);
+ __ and_(r2, r2, Operand(kStringRepresentationMask));
+ __ cmp(r2, Operand(kConsStringTag));
+ __ b(ne, &slow);
+
+ // ConsString.
+ // Check that the right hand side is the empty string (ie if this is really a
+ // flat string in a cons string). If that is not the case we would rather go
+ // to the runtime system now, to flatten the string.
+ __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
+ __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
+ __ cmp(r2, Operand(r3));
+ __ b(ne, &slow);
+
+ // Get the first of the two strings.
+ __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
+ __ jmp(&try_again_with_new_string);
+
+ __ bind(&slow);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+
+ __ bind(&end);
frame_->EmitPush(r0);
}
@@ -3325,6 +3436,51 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r1);
+ __ tst(r1, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r1, ip);
+ true_target()->Branch(eq);
+
+ Register map_reg = r2;
+ __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ false_target()->Branch(eq);
+
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ false_target()->Branch(lt);
+ __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ cc_reg_ = le;
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+ __ tst(r0, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+ Register map_reg = r2;
+ __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
+ cc_reg_ = eq;
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
@@ -3403,6 +3559,17 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ frame_->CallRuntime(Runtime::kStringAdd, 2);
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
@@ -3476,7 +3643,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (op == Token::NOT) {
LoadConditionAndSpill(node->expression(),
- NOT_INSIDE_TYPEOF,
false_target(),
true_target(),
true);
@@ -3490,9 +3656,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else if (variable != NULL) {
Slot* slot = variable->slot();
@@ -3500,9 +3664,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
LoadGlobal();
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
@@ -3514,9 +3676,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else {
// Default: Result of deleting non-global, not dynamically
@@ -3566,9 +3726,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
smi_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
continue_label.Jump();
smi_label.Bind();
@@ -3590,9 +3748,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
continue_label.Bind();
break;
}
@@ -3637,7 +3793,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
ASSERT(frame_->height() == original_height + 1);
return;
}
- target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ target.GetValueAndSpill();
frame_->EmitPop(r0);
JumpTarget slow;
@@ -3677,9 +3833,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
{
// Convert the operand to a number.
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0));
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
}
if (is_postfix) {
// Postfix: store to result (on the stack).
@@ -3731,7 +3885,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
if (op == Token::AND) {
JumpTarget is_true;
LoadConditionAndSpill(node->left(),
- NOT_INSIDE_TYPEOF,
&is_true,
false_target(),
false);
@@ -3767,7 +3920,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
}
is_true.Bind();
LoadConditionAndSpill(node->right(),
- NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@@ -3779,7 +3931,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (op == Token::OR) {
JumpTarget is_false;
LoadConditionAndSpill(node->left(),
- NOT_INSIDE_TYPEOF,
true_target(),
&is_false,
false);
@@ -3815,7 +3966,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
}
is_false.Bind();
LoadConditionAndSpill(node->right(),
- NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@@ -4000,28 +4150,35 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
} else if (check->Equals(Heap::function_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE);
+ Register map_reg = r2;
+ __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
+ true_target()->Branch(eq);
+ // Regular expressions are callable so typeof == 'function'.
+ __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
cc_reg_ = eq;
} else if (check->Equals(Heap::object_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r1, ip);
true_target()->Branch(eq);
+ Register map_reg = r2;
+ __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
+ false_target()->Branch(eq);
+
// It can be an undetectable object.
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
__ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
__ cmp(r1, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(eq);
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
false_target()->Branch(lt);
- __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
cc_reg_ = le;
} else {
@@ -4062,9 +4219,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::IN: {
LoadAndSpill(left);
LoadAndSpill(right);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
frame_->EmitPush(r0);
break;
}
@@ -4114,7 +4269,7 @@ Handle<String> Reference::GetName() {
}
-void Reference::GetValue(TypeofState typeof_state) {
+void Reference::GetValue() {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
@@ -4129,16 +4284,11 @@ void Reference::GetValue(TypeofState typeof_state) {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlot(slot, typeof_state);
+ cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof. If
- // there is a chance that reference errors can be thrown below, we
- // must distinguish between the two kinds of loads (typeof expression
- // loads must not throw a reference error).
VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from named Property");
Handle<String> name(GetName());
@@ -4157,9 +4307,6 @@ void Reference::GetValue(TypeofState typeof_state) {
}
case KEYED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof.
-
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
VirtualFrame* frame = cgen_->frame();
@@ -4495,7 +4642,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
// the_int_ has the answer which is a signed int32 but not a Smi.
// We test for the special value that has a different exponent. This test
@@ -4568,6 +4715,22 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if (cc != eq) {
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but (undefined <= undefined)
+ // == false! See ECMAScript 11.8.5.
+ if (cc == le || cc == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(r2));
+ __ b(ne, &return_equal);
+ if (cc == le) {
+ __ mov(r0, Operand(GREATER)); // undefined <= undefined should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // undefined >= undefined should fail.
+ }
+ __ mov(pc, Operand(lr)); // Return.
+ }
}
}
__ bind(&return_equal);
@@ -4645,9 +4808,17 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Rhs is a smi, lhs is a number.
__ push(lr);
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
+ } else {
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ }
+
+
// r3 and r2 are rhs as double.
__ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
__ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
@@ -4675,9 +4846,16 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ push(lr);
__ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
+ } else {
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ }
+
__ pop(lr);
// Fall through to both_loaded_as_doubles.
}
@@ -4880,9 +5058,23 @@ void CompareStub::Generate(MacroAssembler* masm) {
// fall through if neither is a NaN. Also binds rhs_not_nan.
EmitNanCheck(masm, &rhs_not_nan, cc_);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement double precision comparison.
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
+
+ __ fcmp(d6, d7);
+ __ vmrs(pc);
+ __ mov(r0, Operand(0), LeaveCC, eq);
+ __ mov(r0, Operand(1), LeaveCC, lt);
+ __ mvn(r0, Operand(0), LeaveCC, gt);
+ __ mov(pc, Operand(lr));
+ } else {
+ // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
+ // answer. Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+ }
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
@@ -4935,7 +5127,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ mov(r0, Operand(arg_count));
__ InvokeBuiltin(native, CALL_JS);
__ cmp(r0, Operand(0));
__ pop(pc);
@@ -4982,24 +5173,74 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
- // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ push(lr);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ jmp(&do_the_call); // Tail call. No return.
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
+
+ // Push arguments to the stack
__ push(r1);
__ push(r0);
- __ mov(r0, Operand(1)); // Set number of arguments.
+
+ if (Token::ADD == operation) {
+ // Test for string arguments before calling runtime.
+ // r1 : first argument
+ // r0 : second argument
+ // sp[0] : second argument
+ // sp[1] : first argument
+
+ Label not_strings, not_string1, string1;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &not_string1);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &not_string1);
+
+ // First argument is a a string, test second.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &string1);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &string1);
+
+ // First and second argument are strings.
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &not_strings);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &not_strings);
+
+ // Only second argument is a string.
+ __ b(&not_strings);
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
+
+ __ bind(&not_strings);
+ }
+
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
// We branch here if at least one of r0 and r1 is not a Smi.
@@ -5027,12 +5268,20 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r6);
- __ push(lr);
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub3(r3, r2, r7, r6);
+ __ push(lr);
+ __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ bind(&finished_loading_r0);
// Move r1 to a double in r0-r1.
@@ -5052,12 +5301,19 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r6);
- __ push(lr);
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ } else {
+ // Write Smi from r1 to r1 and r0 in double format.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub4(r1, r0, r7, r6);
+ __ push(lr);
+ __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ bind(&finished_loading_r1);
__ bind(&do_the_call);
@@ -5066,6 +5322,38 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
// r5: Address of heap number for result.
+
+ if (CpuFeatures::IsSupported(VFP3) &&
+ ((Token::MUL == operation) ||
+ (Token::DIV == operation) ||
+ (Token::ADD == operation) ||
+ (Token::SUB == operation))) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement
+ // double precision, add, subtract, multiply, divide.
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
+
+ if (Token::MUL == operation) {
+ __ fmuld(d5, d6, d7);
+ } else if (Token::DIV == operation) {
+ __ fdivd(d5, d6, d7);
+ } else if (Token::ADD == operation) {
+ __ faddd(d5, d6, d7);
+ } else if (Token::SUB == operation) {
+ __ fsubd(d5, d6, d7);
+ } else {
+ UNREACHABLE();
+ }
+
+ __ fmrrd(r0, r1, d5);
+
+ __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+ __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
+ __ mov(r0, Operand(r5));
+ __ mov(pc, lr);
+ return;
+ }
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
__ AlignStack(0);
@@ -5134,38 +5422,49 @@ static void GetInt32(MacroAssembler* masm,
__ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- __ rsb(dest, dest, Operand(30));
-
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
+ // We now have the exponent in dest. Subtract from 30 to get
+ // how much to shift down.
+ __ rsb(dest, dest, Operand(30));
+ }
__ bind(&right_exponent);
- // Get the top bits of the mantissa.
- __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to take.
- // We just orred in the implicit bit so that took care of one and we want to
- // leave the sign bit 0 so we subtract 2 bits from the shift distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't actually
- // need this because the bits get shifted out again, but it's probably slower
- // to test than just to do it.
- __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- __ mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions implementing double precision to integer
+ // conversion using round to zero.
+ __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ __ fmdrr(d7, scratch2, scratch);
+ __ ftosid(s15, d7);
+ __ fmrs(dest, s15);
+ } else {
+ // Get the top bits of the mantissa.
+ __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+ // distance.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
+ // Put sign in zero flag.
+ __ tst(scratch, Operand(HeapNumber::kSignMask));
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the last 10 bits.
+ __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+ // Move down according to the exponent.
+ __ mov(dest, Operand(scratch, LSR, dest));
+ // Fix sign if sign bit was set.
+ __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ }
__ bind(&done);
}
-
// For bitwise ops where the inputs are not both Smis we here try to determine
// whether both inputs are either Smis or at least heap numbers that can be
// represented by a 32 bit signed value. We truncate towards zero as required
@@ -5182,7 +5481,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- GetInt32(masm, r1, r3, r4, r5, &slow);
+ GetInt32(masm, r1, r3, r5, r4, &slow);
__ jmp(&done_checking_r1);
__ bind(&r1_is_smi);
__ mov(r3, Operand(r1, ASR, 1));
@@ -5192,7 +5491,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- GetInt32(masm, r0, r2, r4, r5, &slow);
+ GetInt32(masm, r0, r2, r5, r4, &slow);
__ jmp(&done_checking_r0);
__ bind(&r0_is_smi);
__ mov(r2, Operand(r0, ASR, 1));
@@ -5277,7 +5576,6 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ bind(&slow);
__ push(r1); // restore stack
__ push(r0);
- __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
switch (op_) {
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@@ -5659,7 +5957,6 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
// Enter runtime system.
__ bind(&slow);
__ push(r0);
- __ mov(r0, Operand(0)); // Set number of arguments.
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
__ bind(&not_smi);
@@ -5797,7 +6094,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -5857,7 +6154,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(frame_type);
+ __ LeaveExitFrame(mode);
// check if we should retry or throw exception
Label retry;
@@ -5903,12 +6200,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// this by performing a garbage collection and retrying the
// builtin once.
- StackFrame::Type frame_type = is_debug_break
- ? StackFrame::EXIT_DEBUG
- : StackFrame::EXIT;
+ ExitFrame::Mode mode = is_debug_break
+ ? ExitFrame::MODE_DEBUG
+ : ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(frame_type);
+ __ EnterExitFrame(mode);
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -5923,7 +6220,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
false,
false);
@@ -5932,7 +6229,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
false);
@@ -5943,7 +6240,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
true);
@@ -6135,7 +6432,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Slow-case. Tail call builtin.
__ bind(&slow);
- __ mov(r0, Operand(1)); // Arg count without receiver.
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
}
@@ -6178,7 +6474,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ b(eq, &adaptor);
// Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
+ // through register r0. Use unsigned comparison to get negative
// check for free.
__ cmp(r1, r0);
__ b(cs, &slow);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index e0799508..ba7f9362 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -77,12 +77,12 @@ class Reference BASE_EMBEDDED {
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
- void GetValue(TypeofState typeof_state);
+ void GetValue();
// Generate code to push the value of a reference on top of the expression
// stack and then spill the stack frame. This function is used temporarily
// while the code generator is being transformed.
- inline void GetValueAndSpill(TypeofState typeof_state);
+ inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
@@ -112,10 +112,8 @@ class CodeGenState BASE_EMBEDDED {
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
- // state. The new state has its own typeof state and pair of branch
- // labels.
+ // state. The new state has its own pair of branch labels.
CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target);
@@ -123,13 +121,11 @@ class CodeGenState BASE_EMBEDDED {
// previous state.
~CodeGenState();
- TypeofState typeof_state() const { return typeof_state_; }
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
private:
CodeGenerator* owner_;
- TypeofState typeof_state_;
JumpTarget* true_target_;
JumpTarget* false_target_;
CodeGenState* previous_;
@@ -169,8 +165,8 @@ class CodeGenerator: public AstVisitor {
// Accessors
MacroAssembler* masm() { return masm_; }
-
VirtualFrame* frame() const { return frame_; }
+ Handle<Script> script() { return script_; }
bool has_valid_frame() const { return frame_ != NULL; }
@@ -191,10 +187,6 @@ class CodeGenerator: public AstVisitor {
static const int kUnknownIntValue = -1;
- // Number of instructions used for the JS return sequence. The constant is
- // used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceLength = 4;
-
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@@ -210,7 +202,6 @@ class CodeGenerator: public AstVisitor {
// State
bool has_cc() const { return cc_reg_ != al; }
- TypeofState typeof_state() const { return state_->typeof_state(); }
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
@@ -259,25 +250,22 @@ class CodeGenerator: public AstVisitor {
}
void LoadCondition(Expression* x,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc);
- void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
- inline void LoadAndSpill(Expression* expression,
- TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ inline void LoadAndSpill(Expression* expression);
// Call LoadCondition and then spill the virtual frame unless control flow
// cannot reach the end of the expression (ie, by emitting only
// unconditional jumps to the control targets).
inline void LoadConditionAndSpill(Expression* expression,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control);
@@ -331,7 +319,6 @@ class CodeGenerator: public AstVisitor {
InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
- Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@@ -347,6 +334,8 @@ class CodeGenerator: public AstVisitor {
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsFunction(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -377,6 +366,9 @@ class CodeGenerator: public AstVisitor {
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
+ // Fast support for StringAdd.
+ void GenerateStringAdd(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -391,6 +383,7 @@ class CodeGenerator: public AstVisitor {
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* node);
+ void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -433,6 +426,27 @@ class CodeGenerator: public AstVisitor {
};
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) {}
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#if defined(DEBUG)
+ void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
+#endif // defined(DEBUG)
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
+
+
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 964bfe14..89ff7c08 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -67,6 +67,26 @@ const char* Registers::Name(int reg) {
}
+// Support for VFP registers s0 to s31 (d0 to d15).
+// Note that "sN:sM" is the same as "dN/2"
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* VFPRegisters::names_[kNumVFPRegisters] = {
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
+};
+
+
+const char* VFPRegisters::Name(int reg) {
+ ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
+ return names_[reg];
+}
+
+
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 6bd0d008..94322073 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -43,24 +43,27 @@
# define USE_THUMB_INTERWORK 1
#endif
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
- defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_7A__) || \
+#if defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
#endif
-#if defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7__)
+#if defined(__ARM_ARCH_6__) || \
+ defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__) || \
+ defined(__ARM_ARCH_6Z__) || \
+ defined(__ARM_ARCH_6ZK__) || \
+ defined(__ARM_ARCH_6T2__) || \
+ defined(CAN_USE_ARMV7_INSTRUCTIONS)
# define CAN_USE_ARMV6_INSTRUCTIONS 1
#endif
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
+#if defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || \
+ defined(CAN_USE_ARMV6_INSTRUCTIONS)
+# define CAN_USE_ARMV5_INSTRUCTIONS 1
+# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
// Simulator should support ARM5 instructions.
@@ -75,6 +78,9 @@ namespace arm {
// Number of registers in normal ARM mode.
static const int kNumRegisters = 16;
+// VFP support.
+static const int kNumVFPRegisters = 48;
+
// PC is register 15.
static const int kPCRegister = 15;
static const int kNoRegister = -1;
@@ -231,6 +237,16 @@ class Instr {
inline int RnField() const { return Bits(19, 16); }
inline int RdField() const { return Bits(15, 12); }
+ // Support for VFP.
+ // Vn(19-16) | Vd(15-12) | Vm(3-0)
+ inline int VnField() const { return Bits(19, 16); }
+ inline int VmField() const { return Bits(3, 0); }
+ inline int VdField() const { return Bits(15, 12); }
+ inline int NField() const { return Bit(7); }
+ inline int MField() const { return Bit(5); }
+ inline int DField() const { return Bit(22); }
+ inline int RtField() const { return Bits(15, 12); }
+
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
return static_cast<Opcode>(Bits(24, 21));
@@ -307,7 +323,7 @@ class Registers {
struct RegisterAlias {
int reg;
- const char *name;
+ const char* name;
};
private:
@@ -315,6 +331,15 @@ class Registers {
static const RegisterAlias aliases_[];
};
+// Helper functions for converting between VFP register numbers and names.
+class VFPRegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ private:
+ static const char* names_[kNumVFPRegisters];
+};
} } // namespace assembler::arm
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index cafefce4..a5a358b3 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -33,12 +33,13 @@
#include "v8.h"
#include "cpu.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
void CPU::Setup() {
- // Nothing to do.
+ CpuFeatures::Probe();
}
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index ef336539..fc9808d5 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -61,7 +61,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
- CodeGenerator::kJSReturnSequenceLength);
+ Assembler::kJSReturnSequenceLength);
}
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 64314837..2f9e78f5 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -97,6 +97,10 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
+ void PrintSRegister(int reg);
+ void PrintDRegister(int reg);
+ int FormatVFPRegister(Instr* instr, const char* format);
+ int FormatVFPinstruction(Instr* instr, const char* format);
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
void PrintShiftImm(Instr* instr);
@@ -121,6 +125,10 @@ class Decoder {
void DecodeType6(Instr* instr);
void DecodeType7(Instr* instr);
void DecodeUnconditional(Instr* instr);
+ // For VFP support.
+ void DecodeTypeVFP(Instr* instr);
+ void DecodeType6CoprocessorIns(Instr* instr);
+
const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_;
@@ -171,6 +179,16 @@ void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
+// Print the VFP S register name according to the active name converter.
+void Decoder::PrintSRegister(int reg) {
+ Print(assembler::arm::VFPRegisters::Name(reg));
+}
+
+// Print the VFP D register name according to the active name converter.
+void Decoder::PrintDRegister(int reg) {
+ Print(assembler::arm::VFPRegisters::Name(reg + 32));
+}
+
// These shift names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
@@ -290,6 +308,10 @@ int Decoder::FormatRegister(Instr* instr, const char* format) {
int reg = instr->RmField();
PrintRegister(reg);
return 2;
+ } else if (format[1] == 't') { // 'rt: Rt register
+ int reg = instr->RtField();
+ PrintRegister(reg);
+ return 2;
} else if (format[1] == 'l') {
// 'rlist: register list for load and store multiple instructions
ASSERT(STRING_STARTS_WITH(format, "rlist"));
@@ -315,6 +337,39 @@ int Decoder::FormatRegister(Instr* instr, const char* format) {
}
+// Handle all VFP register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
+ ASSERT((format[0] == 'S') || (format[0] == 'D'));
+
+ if (format[1] == 'n') {
+ int reg = instr->VnField();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NField()));
+ if (format[0] == 'D') PrintDRegister(reg);
+ return 2;
+ } else if (format[1] == 'm') {
+ int reg = instr->VmField();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MField()));
+ if (format[0] == 'D') PrintDRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') {
+ int reg = instr->VdField();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DField()));
+ if (format[0] == 'D') PrintDRegister(reg);
+ return 2;
+ }
+
+ UNREACHABLE();
+ return -1;
+}
+
+
+int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
+ Print(format);
+ return 0;
+}
+
+
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
@@ -459,6 +514,13 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
}
return 1;
}
+ case 'v': {
+ return FormatVFPinstruction(instr, format);
+ }
+ case 'S':
+ case 'D': {
+ return FormatVFPRegister(instr, format);
+ }
case 'w': { // 'w: W field of load and store instructions
if (instr->HasW()) {
Print("!");
@@ -761,8 +823,7 @@ void Decoder::DecodeType5(Instr* instr) {
void Decoder::DecodeType6(Instr* instr) {
- // Coprocessor instructions currently not supported.
- Unknown(instr);
+ DecodeType6CoprocessorIns(instr);
}
@@ -770,12 +831,10 @@ void Decoder::DecodeType7(Instr* instr) {
if (instr->Bit(24) == 1) {
Format(instr, "swi'cond 'swi");
} else {
- // Coprocessor instructions currently not supported.
- Unknown(instr);
+ DecodeTypeVFP(instr);
}
}
-
void Decoder::DecodeUnconditional(Instr* instr) {
if (instr->Bits(7, 4) == 0xB && instr->Bits(27, 25) == 0 && instr->HasL()) {
Format(instr, "'memop'h'pu 'rd, ");
@@ -837,6 +896,136 @@ void Decoder::DecodeUnconditional(Instr* instr) {
}
+// void Decoder::DecodeTypeVFP(Instr* instr)
+// Implements the following VFP instructions:
+// fmsr: Sn = Rt
+// fmrs: Rt = Sn
+// fsitod: Dd = Sm
+// ftosid: Sd = Dm
+// Dd = faddd(Dn, Dm)
+// Dd = fsubd(Dn, Dm)
+// Dd = fmuld(Dn, Dm)
+// Dd = fdivd(Dn, Dm)
+// vcmp(Dd, Dm)
+// VMRS
+void Decoder::DecodeTypeVFP(Instr* instr) {
+ ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+
+ if (instr->Bit(23) == 1) {
+ if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x5) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
+ } else if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(7) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
+ } else if ((instr->Bit(21) == 0x0) &&
+ (instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
+ } else if ((instr->Bits(21, 20) == 0x3) &&
+ (instr->Bits(19, 16) == 0x4) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0x1) &&
+ (instr->Bit(4) == 0x0)) {
+ Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+ } else if ((instr->Bits(23, 20) == 0xF) &&
+ (instr->Bits(19, 16) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(7, 5) == 0x0) &&
+ (instr->Bit(4) == 0x1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ if (instr->Bits(15, 12) == 0xF)
+ Format(instr, "vmrs'cond APSR, FPSCR");
+ else
+ Unknown(instr); // Not used by V8.
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->Bit(21) == 1) {
+ if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
+ } else if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else {
+ if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ Format(instr, "vmov'cond 'Sn, 'rt");
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ Format(instr, "vmov'cond 'rt, 'Sn");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ }
+}
+
+
+// Decode Type 6 coprocessor instructions.
+// Dm = fmdrr(Rt, Rt2)
+// <Rt, Rt2> = fmrrd(Dm)
+void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
+ ASSERT((instr->TypeField() == 6));
+
+ if (instr->Bit(23) == 1) {
+ Unknown(instr); // Not used by V8.
+ } else if (instr->Bit(22) == 1) {
+ if ((instr->Bits(27, 24) == 0xC) &&
+ (instr->Bit(22) == 1) &&
+ (instr->Bits(11, 8) == 0xB) &&
+ (instr->Bits(7, 6) == 0x0) &&
+ (instr->Bit(4) == 1)) {
+ if (instr->Bit(20) == 0) {
+ Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+ } else if (instr->Bit(20) == 1) {
+ Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->Bit(21) == 1) {
+ Unknown(instr); // Not used by V8.
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+}
+
+
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instr* instr = Instr::At(instr_ptr);
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 97feae5d..45cab55d 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -28,6 +28,8 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
#include "fast-codegen.h"
#include "parser.h"
@@ -52,38 +54,94 @@ namespace internal {
// frames-arm.h for its layout.
void FastCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
- // ARM does NOT call SetFunctionPosition.
+ SetFunctionPosition(fun);
+ int locals_count = fun->scope()->num_stack_slots();
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ }
// Adjust fp to point to caller's fp.
__ add(fp, sp, Operand(2 * kPointerSize));
{ Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = fun->scope()->num_stack_slots();
- if (locals_count > 0) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- if (FLAG_check_stack) {
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- }
for (int i = 0; i < locals_count; i++) {
__ push(ip);
}
}
- if (FLAG_check_stack) {
- // Put the lr setup instruction in the delay slot. The kInstrSize is
- // added to the implicit 8 byte offset that always applies to operations
- // with pc and gives a return address 12 bytes down.
- Comment cmnt(masm_, "[ Stack check");
- __ add(lr, pc, Operand(Assembler::kInstrSize));
- __ cmp(sp, Operand(r2));
- StackCheckStub stub;
- __ mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context
+ __ str(r0, MemOperand(cp, Context::SlotOffset(slot->index())));
+ }
+ }
+ }
+
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(r3, r1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
+ __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Duplicate the value; move-to-slot operation might clobber registers.
+ __ mov(r3, r0);
+ Move(arguments->slot(), r0, r1, r2);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, r3, r1, r2);
+ }
+
+ // Check the stack for overflow or break request.
+ // Put the lr setup instruction in the delay slot. The kInstrSize is
+ // added to the implicit 8 byte offset that always applies to operations
+ // with pc and gives a return address 12 bytes down.
+ { Comment cmnt(masm_, "[ Stack check");
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+ __ add(lr, pc, Operand(Assembler::kInstrSize));
+ __ cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ __ mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -95,14 +153,26 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
}
{ Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
}
{ Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the
// body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- SetReturnPosition(fun);
+ }
+ EmitReturnSequence(function_->end_position());
+}
+
+
+void FastCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ } else {
+ __ bind(&return_label_);
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r0.
@@ -110,12 +180,332 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+
+ // Calculate the exact length of the return sequence and make sure that
+ // the constant pool is not emitted inside of the return sequence.
+ int num_parameters = function_->scope()->num_parameters();
+ int32_t sp_delta = (num_parameters + 1) * kPointerSize;
+ int return_sequence_length = Assembler::kJSReturnSequenceLength;
+ if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
+ // Additional mov instruction generated.
+ return_sequence_length++;
+ }
+ masm_->BlockConstPoolFor(return_sequence_length);
+
+ CodeGenerator::RecordPositions(masm_, position);
__ RecordJSReturn();
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
- int num_parameters = function_->scope()->num_parameters();
- __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
+ __ add(sp, sp, Operand(sp_delta));
__ Jump(lr);
+
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger. The add instruction above is an addressing
+ // mode 1 instruction where there are restrictions on which immediate values
+ // can be encoded in the instruction and which immediate values requires
+ // use of an additional instruction for moving the immediate to a temporary
+ // register.
+ ASSERT_EQ(return_sequence_length,
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Register source) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(source);
+ break;
+ case Expression::kTest:
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ }
+ }
+}
+
+
+template <>
+MemOperand FastCodeGenerator::CreateSlotOperand<MemOperand>(
+ Slot* source,
+ Register scratch) {
+ switch (source->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return MemOperand(fp, SlotOffset(source));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(source->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, source->index());
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ // Fall-through.
+ default:
+ UNREACHABLE();
+ return MemOperand(r0, 0); // Dead code to make the compiler happy.
+ }
+}
+
+
+void FastCodeGenerator::Move(Register dst, Slot* source) {
+ // Use dst as scratch.
+ MemOperand location = CreateSlotOperand<MemOperand>(source, dst);
+ __ ldr(dst, location);
+}
+
+
+
+void FastCodeGenerator::Move(Expression::Context context,
+ Slot* source,
+ Register scratch) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ Move(scratch, source);
+ Move(context, scratch);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ __ mov(ip, Operand(expr->handle()));
+ Move(context, ip);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ switch (dst->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ __ str(src, MemOperand(fp, SlotOffset(dst)));
+ break;
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(dst->var()->scope());
+ __ LoadContext(scratch1, context_chain_length);
+ int index = Context::SlotOffset(dst->index());
+ __ mov(scratch2, Operand(index));
+ __ str(src, MemOperand(scratch1, index));
+ __ RecordWrite(scratch1, scratch2, src);
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+
+void FastCodeGenerator::DropAndMove(Expression::Context context,
+ Register source,
+ int drop_count) {
+ ASSERT(drop_count > 0);
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ __ add(sp, sp, Operand(drop_count * kPointerSize));
+ break;
+ case Expression::kValue:
+ if (drop_count > 1) {
+ __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
+ }
+ __ str(source, MemOperand(sp));
+ break;
+ case Expression::kTest:
+ ASSERT(!source.is(sp));
+ __ add(sp, sp, Operand(drop_count * kPointerSize));
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (drop_count > 1) {
+ __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
+ }
+ __ str(source, MemOperand(sp));
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (drop_count > 1) {
+ __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
+ }
+ __ str(source, MemOperand(sp));
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+void FastCodeGenerator::TestAndBranch(Register source,
+ Label* true_label,
+ Label* false_label) {
+ ASSERT_NE(NULL, true_label);
+ ASSERT_NE(NULL, false_label);
+ // Call the runtime to find the boolean value of the source and then
+ // translate it into control flow to the pair of labels.
+ __ push(source);
+ __ CallRuntime(Runtime::kToBool, 1);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(eq, true_label);
+ __ jmp(false_label);
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER: // Fall through.
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(ip);
+ __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ ldr(r1,
+ CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ cmp(r1, cp);
+ __ Check(eq, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
+ // No write barrier since the_hole_value is in old space.
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(r0);
+ __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r2, Operand(offset));
+ // We know that we have written a function, which is not a smi.
+ __ RecordWrite(cp, r2, r0);
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ mov(r2, Operand(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ mov(r1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ } else if (decl->fun() != NULL) {
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
+ Visit(decl->fun()); // Initial value for function decl.
+ } else {
+ __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ Visit(prop->key());
+
+ if (decl->fun() != NULL) {
+ ASSERT_EQ(Expression::kValue, decl->fun()->context());
+ Visit(decl->fun());
+ __ pop(r0);
+ } else {
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Value in r0 is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ }
}
}
@@ -131,50 +521,18 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FastCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- SetStatementPosition(stmt);
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- Visit(stmt->expression());
-}
-
-
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
Expression* expr = stmt->expression();
- Visit(expr);
-
- // Complete the statement based on the location of the subexpression.
- Location source = expr->location();
- ASSERT(!source.is_nowhere());
- if (source.is_temporary()) {
- __ pop(r0);
- } else {
- ASSERT(source.is_constant());
- ASSERT(expr->AsLiteral() != NULL);
+ // Complete the statement based on the type of the subexpression.
+ if (expr->AsLiteral() != NULL) {
__ mov(r0, Operand(expr->AsLiteral()->handle()));
+ } else {
+ ASSERT_EQ(Expression::kValue, expr->context());
+ Visit(expr);
+ __ pop(r0);
}
-
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-
- __ RecordJSReturn();
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- int num_parameters = function_->scope()->num_parameters();
- __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
- __ Jump(lr);
+ EmitReturnSequence(stmt->statement_pos());
}
@@ -182,7 +540,8 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
@@ -191,12 +550,7 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
__ mov(r0, Operand(boilerplate));
__ stm(db_w, sp, cp.bit() | r0.bit());
__ CallRuntime(Runtime::kNewClosure, 2);
-
- if (expr->location().is_temporary()) {
- __ push(r0);
- } else {
- ASSERT(expr->location().is_nowhere());
- }
+ Move(expr->context(), r0);
}
@@ -204,6 +558,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
+ ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
@@ -212,30 +567,69 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
__ mov(r2, Operand(expr->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- if (expr->location().is_temporary()) {
- // Replace the global object with the result.
- __ str(r0, MemOperand(sp));
- } else {
- ASSERT(expr->location().is_nowhere());
- __ pop();
- }
-
- } else {
- Comment cmnt(masm_, "Stack slot");
+ DropAndMove(expr->context(), r0);
+ } else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
- ASSERT(slot != NULL);
- if (expr->location().is_temporary()) {
- __ ldr(ip, MemOperand(fp, SlotOffset(slot)));
- __ push(ip);
- } else {
- ASSERT(expr->location().is_nowhere());
+ if (FLAG_debug_code) {
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ Comment cmnt(masm_, "Stack slot");
+ break;
+ }
+ case Slot::CONTEXT: {
+ Comment cmnt(masm_, "Context slot");
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ Move(expr->context(), slot, r0);
+ } else {
+ // A variable has been rewritten into an explicit access to
+ // an object property.
+ Property* property = rewrite->AsProperty();
+ ASSERT_NOT_NULL(property);
+
+ // Currently the only parameter expressions that can occur are
+ // on the form "slot[literal]".
+
+ // Check that the object is in a slot.
+ Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object_var);
+ Slot* object_slot = object_var->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ Move(r2, object_slot);
+
+ // Check that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ __ mov(r1, Operand(key_literal->handle()));
+
+ // Push both as arguments to ic.
+ __ stm(db_w, sp, r2.bit() | r1.bit());
+
+ // Do a KEYED property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndMove(expr->context(), r0, 2);
}
}
void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExp Literal");
+ Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
// r4 = JS function, literals array
@@ -257,10 +651,132 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ bind(&done);
- if (expr->location().is_temporary()) {
- __ push(r0);
+ Move(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ Label boilerplate_exists;
+ __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // r2 = literal array (0).
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ ldr(r0, FieldMemOperand(r2, literal_offset));
+ // Check whether we need to materialize the object literal boilerplate.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(ip));
+ __ b(ne, &boilerplate_exists);
+ // Create boilerplate if it does not exist.
+ // r1 = literal index (1).
+ __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
+ // r0 = constant properties (2).
+ __ mov(r0, Operand(expr->constant_properties()));
+ __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&boilerplate_exists);
+ // r0 contains boilerplate.
+ // Clone boilerplate.
+ __ push(r0);
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ }
+
+ // If result_saved == true: The result is saved on top of the
+ // stack and in r0.
+ // If result_saved == false: The result not on the stack, just in r0.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(r0); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: // Fall through.
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ pop(r0);
+ __ mov(r2, Operand(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // StoreIC leaves the receiver on the stack.
+ __ ldr(r0, MemOperand(sp)); // Restore result into r0.
+ break;
+ }
+ // Fall through.
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(r0);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ __ ldr(r0, MemOperand(sp)); // Restore result into r0.
+ break;
+
+ case ObjectLiteral::Property::GETTER: // Fall through.
+ case ObjectLiteral::Property::SETTER:
+ __ push(r0);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ __ push(r1);
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ ldr(r0, MemOperand(sp)); // Restore result into r0
+ break;
+ }
+ }
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ pop();
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(r0);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(r0);
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
}
}
@@ -314,7 +830,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
result_saved = true;
}
Visit(subexpr);
- ASSERT(subexpr->location().is_temporary());
+ ASSERT_EQ(Expression::kValue, subexpr->context());
// Store the subexpression value in the array's elements.
__ pop(r0); // Subexpression value.
@@ -329,211 +845,864 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ RecordWrite(r1, r2, r0);
}
- Location destination = expr->location();
- if (destination.is_nowhere() && result_saved) {
- __ pop();
- } else if (destination.is_temporary() && !result_saved) {
- __ push(r0);
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ pop();
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(r0);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(r0);
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
}
}
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
- Expression* rhs = expr->value();
- Visit(rhs);
-
- // Left-hand side can only be a global or a (parameter or local) slot.
+void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
-
- // Complete the assignment based on the location of the right-hand-side
- // value and the desired location of the assignment value.
- Location destination = expr->location();
- Location source = rhs->location();
- ASSERT(!destination.is_constant());
- ASSERT(!source.is_nowhere());
-
if (var->is_global()) {
- // Assignment to a global variable, use inline caching. Right-hand-side
- // value is passed in r0, variable name in r2, and the global object on
- // the stack.
- if (source.is_temporary()) {
- __ pop(r0);
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- __ mov(r0, Operand(rhs->AsLiteral()->handle()));
- }
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in r0, variable name in
+ // r2, and the global object on the stack.
+ __ pop(r0);
__ mov(r2, Operand(var->name()));
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
- if (destination.is_temporary()) {
- __ str(r0, MemOperand(sp));
- } else {
- ASSERT(destination.is_nowhere());
- __ pop();
- }
-
- } else {
- if (source.is_temporary()) {
- if (destination.is_temporary()) {
- // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side
- // temporary on the stack.
- __ ldr(ip, MemOperand(sp));
- } else {
- ASSERT(destination.is_nowhere());
- // Case 'var = temp'. Discard right-hand-side temporary.
- __ pop(ip);
+ DropAndMove(expr->context(), r0);
+
+ } else if (var->slot()) {
+ Slot* slot = var->slot();
+ ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled.
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Perform assignment and discard value.
+ __ pop(r0);
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ break;
+ case Expression::kValue:
+ // Perform assignment and preserve value.
+ __ ldr(r0, MemOperand(sp));
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ break;
+ case Expression::kTest:
+ // Perform assignment and test (and discard) value.
+ __ pop(r0);
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
+ }
+ break;
}
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
- // discarded result. Always perform the assignment.
- __ mov(ip, Operand(rhs->AsLiteral()->handle()));
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
- if (destination.is_temporary()) {
- // Case 'temp <- (var = constant)'. Save result.
- __ push(ip);
+
+ case Slot::CONTEXT: {
+ int chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ if (chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ __ ldr(r0, CodeGenerator::ContextOperand(cp, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset));
+ for (int i = 1; i < chain_length; i++) {
+ __ ldr(r0,
+ CodeGenerator::ContextOperand(r0, Context::CLOSURE_INDEX));
+ __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset));
+ }
+ } else { // Slot is in the current context. Generate optimized code.
+ __ mov(r0, cp);
+ }
+ // The context may be an intermediate context, not a function context.
+ __ ldr(r0, CodeGenerator::ContextOperand(r0, Context::FCONTEXT_INDEX));
+ __ pop(r1);
+ __ str(r1, CodeGenerator::ContextOperand(r0, slot->index()));
+
+ // RecordWrite may destroy all its register arguments.
+ if (expr->context() == Expression::kValue) {
+ __ push(r1);
+ } else if (expr->context() != Expression::kEffect) {
+ __ mov(r3, r1);
+ }
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+
+ // Update the write barrier for the array store with r0 as the scratch
+ // register. Skip the write barrier if the value written (r1) is a smi.
+ // The smi test is part of RecordWrite on other platforms, not on arm.
+ Label exit;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+
+ __ mov(r2, Operand(offset));
+ __ RecordWrite(r0, r2, r1);
+ __ bind(&exit);
+ if (expr->context() != Expression::kEffect &&
+ expr->context() != Expression::kValue) {
+ Move(expr->context(), r3);
+ }
+ break;
}
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
}
}
}
-void FastCodeGenerator::VisitCall(Call* expr) {
- Comment cmnt(masm_, "[ Call");
- Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- Variable* var = fun->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL && !var->is_this() && var->is_global());
- ASSERT(!var->is_possibly_eval());
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
- __ mov(r1, Operand(var->name()));
- // Push global object as receiver.
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ stm(db_w, sp, r1.bit() | r0.bit());
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
+ __ push(ip);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(r0);
+ __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(r0); // Result of assignment, saved even if not needed.
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
+ __ push(ip);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(r0);
+ }
+
+ DropAndMove(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ // Reciever is under the key and value.
+ __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ __ push(ip);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(r0);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(r0); // Result of assignment, saved even if not needed.
+ // Reciever is under the key and value.
+ __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ __ push(ip);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(r0);
+ }
+
+ // Receiver and key are still on stack.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ Move(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+ uint32_t dummy;
+
+ // Record the source position for the property load.
+ SetSourcePosition(expr->position());
+
+ // Evaluate receiver.
+ Visit(expr->obj());
+
+ if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+ !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+ // Do a NAMED property load.
+ // The IC expects the property name in r2 and the receiver on the stack.
+ __ mov(r2, Operand(key->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ } else {
+ // Do a KEYED property load.
+ Visit(expr->key());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Drop key and receiver left on the stack by IC.
+ __ pop();
+ }
+ DropAndMove(expr->context(), r0);
+}
+
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ mov(r0, Operand(args->at(i)->AsLiteral()->handle()));
- __ push(r0);
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
- // Record source position for debugger
+ // Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ Call(ic, reloc_info);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+ __ CallStub(&stub);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- if (expr->location().is_temporary()) {
- __ str(r0, MemOperand(sp));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Call to a global variable.
+ __ mov(r1, Operand(var->name()));
+ // Push global object as receiver for the call IC lookup.
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ stm(db_w, sp, r1.bit() | r0.bit());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ __ mov(r0, Operand(key->handle()));
+ __ push(r0);
+ Visit(prop->obj());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ Visit(prop->obj());
+ Visit(prop->key());
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Load receiver object into r1.
+ if (prop->is_synthetic()) {
+ __ ldr(r1, CodeGenerator::GlobalObject());
+ } else {
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ }
+ // Overwrite (object, key) with (function, receiver).
+ __ str(r0, MemOperand(sp, kPointerSize));
+ __ str(r1, MemOperand(sp));
+ EmitCallWithStub(expr);
+ }
} else {
- ASSERT(expr->location().is_nowhere());
- __ pop();
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the fast code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_fast_codegen(true);
+ }
+ Visit(fun);
+ // Load global receiver object.
+ __ ldr(r1, CodeGenerator::GlobalObject());
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ push(r1);
+ // Emit function call.
+ EmitCallWithStub(expr);
}
}
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ // Push global object (receiver).
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ push(r0);
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ // If location is value, it is already on the stack,
+ // so nothing to do here.
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function, arg_count into r1 and r0.
+ __ mov(r0, Operand(arg_count));
+ // Function is in esp[arg_count + 1].
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in r0, or pop it.
+ DropAndMove(expr->context(), r0);
+}
+
+
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
- Runtime::Function* function = expr->function();
- ASSERT(function != NULL);
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ mov(r1, Operand(expr->name()));
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
+ __ stm(db_w, sp, r1.bit() | r0.bit());
+ }
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ mov(r0, Operand(args->at(i)->AsLiteral()->handle()));
- __ push(r0);
- } else {
- ASSERT(args->at(i)->location().is_temporary());
- // If location is temporary, it is already on the stack,
- // so nothing to do here.
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ }
+
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), r0);
+ } else {
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ Move(expr->context(), r0);
+ }
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kEffect, expr->expression()->context());
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ push(ip);
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ push(ip);
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ ASSERT_EQ(Expression::kTest, expr->expression()->context());
+
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ Visit(expr->expression());
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ true_label_ = saved_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = saved_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ true_label_ = &push_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ push(r0);
+ __ mov(r2, Operand(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ str(r0, MemOperand(sp));
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ mov(r0, Operand(proxy->name()));
+ __ stm(db_w, sp, cp.bit() | r0.bit());
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(r0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ Visit(expr->expression());
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Move(expr->context(), r0);
+ break;
}
+
+ default:
+ UNREACHABLE();
}
+}
+
+
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+
+ Visit(proxy);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
- __ CallRuntime(function, arg_count);
- if (expr->location().is_temporary()) {
- __ push(r0);
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kValue: // Fall through
+ case Expression::kTest: // Fall through
+ case Expression::kTestValue: // Fall through
+ case Expression::kValueTest:
+ // Duplicate the result on the stack.
+ __ push(r0);
+ break;
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ }
+ // Call runtime for +1/-1.
+ __ push(r0);
+ __ mov(ip, Operand(Smi::FromInt(1)));
+ __ push(ip);
+ if (expr->op() == Token::INC) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ // Call Store IC.
+ __ mov(r2, Operand(proxy->AsVariable()->name()));
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Restore up stack after store IC.
+ __ add(sp, sp, Operand(kPointerSize));
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through
+ case Expression::kValue:
+ // Do nothing. Result in either on the stack for value context
+ // or discarded for effect context.
+ break;
+ case Expression::kTest:
+ __ pop(r0);
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ add(sp, sp, Operand(kPointerSize));
+ __ b(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ add(sp, sp, Operand(kPointerSize));
+ __ b(true_label_);
+ break;
+ }
}
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- // Compile a short-circuited boolean or operation in a non-test
- // context.
- ASSERT(expr->op() == Token::OR);
- // Compile (e0 || e1) as if it were
- // (let (temp = e0) temp ? temp : e1).
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ ASSERT_EQ(Expression::kEffect, expr->left()->context());
+ ASSERT_EQ(expr->context(), expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+ break;
+
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
+
+ Visit(expr->left());
+ Visit(expr->right());
+ __ pop(r0);
+ __ pop(r1);
+ GenericBinaryOpStub stub(expr->op(),
+ NO_OVERWRITE);
+ __ CallStub(&stub);
+ Move(expr->context(), r0);
+
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+
+ // Convert current context to test context: Pre-test code.
+ Label push_true;
+ Label push_false;
Label done;
- Location destination = expr->location();
- ASSERT(!destination.is_constant());
-
- Expression* left = expr->left();
- Location left_source = left->location();
- ASSERT(!left_source.is_nowhere());
-
- Expression* right = expr->right();
- Location right_source = right->location();
- ASSERT(!right_source.is_nowhere());
-
- Visit(left);
- // Call the runtime to find the boolean value of the left-hand
- // subexpression. Duplicate the value if it may be needed as the final
- // result.
- if (left_source.is_temporary()) {
- if (destination.is_temporary()) {
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_true;
+ false_label_ = &push_false;
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = &push_true;
+ break;
+
+ case Expression::kTestValue:
+ false_label_ = &push_false;
+ break;
+ }
+ // Convert current context to test context: End pre-test code.
+
+ switch (expr->op()) {
+ case Token::IN: {
+ __ InvokeBuiltin(Builtins::IN, CALL_JS);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(eq, true_label_);
+ __ jmp(false_label_);
+ break;
+ }
+
+ case Token::INSTANCEOF: {
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ tst(r0, r0);
+ __ b(eq, true_label_); // The stub returns 0 for true.
+ __ jmp(false_label_);
+ break;
+ }
+
+ default: {
+ Condition cc = eq;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = eq;
+ __ pop(r0);
+ __ pop(r1);
+ break;
+ case Token::LT:
+ cc = lt;
+ __ pop(r0);
+ __ pop(r1);
+ break;
+ case Token::GT:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = lt;
+ __ pop(r1);
+ __ pop(r0);
+ break;
+ case Token::LTE:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = ge;
+ __ pop(r1);
+ __ pop(r0);
+ break;
+ case Token::GTE:
+ cc = ge;
+ __ pop(r0);
+ __ pop(r1);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ orr(r2, r0, Operand(r1));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow_case);
+ __ cmp(r1, r0);
+ __ b(cc, true_label_);
+ __ jmp(false_label_);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ tst(r0, r0);
+ __ b(cc, true_label_);
+ __ jmp(false_label_);
}
- } else {
- ASSERT(left->AsLiteral() != NULL);
- __ mov(r0, Operand(left->AsLiteral()->handle()));
- __ push(r0);
- if (destination.is_temporary()) __ push(r0);
}
- // The left-hand value is in on top of the stack. It is duplicated on the
- // stack iff the destination location is temporary.
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, &done);
- // Discard the left-hand value if present on the stack.
- if (destination.is_temporary()) __ pop();
- Visit(right);
+ // Convert current context to test context: Post-test code.
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
- // Save or discard the right-hand value as needed.
- if (destination.is_temporary() && right_source.is_constant()) {
- ASSERT(right->AsLiteral() != NULL);
- __ mov(ip, Operand(right->AsLiteral()->handle()));
- __ push(ip);
- } else if (destination.is_nowhere() && right_source.is_temporary()) {
- __ pop();
+ case Expression::kValue:
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_false);
+ break;
}
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ // Convert current context to test context: End post-test code.
+}
- __ bind(&done);
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ Move(expr->context(), r0);
}
+
+#undef __
+
+
} } // namespace v8::internal
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index 6fde4b73..b0fa13a5 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -54,23 +54,24 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
Address sp = fp + ExitFrameConstants::kSPDisplacement;
- Type type;
- if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
- type = EXIT_DEBUG;
+ const int offset = ExitFrameConstants::kCodeOffset;
+ Object* code = Memory::Object_at(fp + offset);
+ bool is_debug_exit = code->IsSmi();
+ if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize;
- } else {
- type = EXIT;
}
// Fill in the state.
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- return type;
+ return EXIT;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
- // Do nothing
+ v->VisitPointer(&code_slot());
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
}
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 0874c092..4924c1ae 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -100,7 +100,7 @@ class ExitFrameConstants : public AllStatic {
static const int kSPDisplacement = -1 * kPointerSize;
// The debug marker is just above the frame pointer.
- static const int kDebugMarkOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize;
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index ba836454..c56f414a 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -107,12 +107,17 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
- __ mov(t1, Operand(t1, LSR, String::kHashShift));
+ __ ldr(t1, FieldMemOperand(r2, String::kHashFieldOffset));
if (i > 0) {
- __ add(t1, t1, Operand(StringDictionary::GetProbeOffset(i)));
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ add(t1, t1, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
- __ and_(t1, t1, Operand(r3));
+ __ and_(t1, r3, Operand(t1, LSR, String::kHashShift));
// Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 45c6540e..aa6570ce 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -155,6 +155,15 @@ void MacroAssembler::Ret(Condition cond) {
}
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ cmp(sp, Operand(ip));
+ b(lo, on_stack_overflow);
+}
+
+
+
+
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
@@ -274,9 +283,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
- ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
// Compute the argv pointer and keep it in a callee-saved register.
// r0 is argc.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -298,8 +305,11 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
mov(fp, Operand(sp)); // setup new frame pointer
- // Push debug marker.
- mov(ip, Operand(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+ if (mode == ExitFrame::MODE_DEBUG) {
+ mov(ip, Operand(Smi::FromInt(0)));
+ } else {
+ mov(ip, Operand(CodeObject()));
+ }
push(ip);
// Save the frame pointer and the context in top.
@@ -316,7 +326,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// Use sp as base to push.
CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
}
@@ -348,14 +358,14 @@ void MacroAssembler::AlignStack(int offset) {
}
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// This code intentionally clobbers r2 and r3.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
add(r3, fp, Operand(kOffset));
CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
}
@@ -784,15 +794,13 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
mov(scratch1, Operand(new_space_allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
ldr(result, MemOperand(scratch1));
- } else {
-#ifdef DEBUG
+ } else if (FLAG_debug_code) {
// Assert that result actually contains top on entry. scratch2 is used
// immediately below so this use of scratch2 does not cause difference with
// respect to register content between debug and release mode.
ldr(scratch2, MemOperand(scratch1));
cmp(result, scratch2);
Check(eq, "Unexpected allocation top");
-#endif
}
// Calculate new top and bail out if new space is exhausted. Use result
@@ -805,7 +813,11 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
cmp(result, Operand(scratch2));
b(hi, gc_required);
- // Update allocation top. result temporarily holds the new top,
+ // Update allocation top. result temporarily holds the new top.
+ if (FLAG_debug_code) {
+ tst(result, Operand(kObjectAlignmentMask));
+ Check(eq, "Unaligned allocation in new space");
+ }
str(result, MemOperand(scratch1));
// Tag and adjust back to start of new object.
@@ -834,15 +846,13 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
mov(scratch1, Operand(new_space_allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
ldr(result, MemOperand(scratch1));
- } else {
-#ifdef DEBUG
+ } else if (FLAG_debug_code) {
// Assert that result actually contains top on entry. scratch2 is used
// immediately below so this use of scratch2 does not cause difference with
// respect to register content between debug and release mode.
ldr(scratch2, MemOperand(scratch1));
cmp(result, scratch2);
Check(eq, "Unexpected allocation top");
-#endif
}
// Calculate new top and bail out if new space is exhausted. Use result
@@ -856,7 +866,11 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
cmp(result, Operand(scratch2));
b(hi, gc_required);
- // Update allocation top. result temporarily holds the new top,
+ // Update allocation top. result temporarily holds the new top.
+ if (FLAG_debug_code) {
+ tst(result, Operand(kObjectAlignmentMask));
+ Check(eq, "Unaligned allocation in new space");
+ }
str(result, MemOperand(scratch1));
// Adjust back to start of new object.
@@ -975,6 +989,17 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
}
+void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
+ Register outHighReg,
+ Register outLowReg) {
+ // ARMv7 VFP3 instructions to implement integer to double conversion.
+ mov(r7, Operand(inReg, ASR, kSmiTagSize));
+ fmsr(s15, r7);
+ fsitod(d7, s15);
+ fmrrd(outLowReg, outHighReg, d7);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
@@ -1141,6 +1166,9 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ set_allow_stub_calls(true);
+
mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
@@ -1150,6 +1178,26 @@ void MacroAssembler::Abort(const char* msg) {
}
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ for (int i = 1; i < context_chain_length; i++) {
+ ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ }
+ // The context may be an intermediate context, not a function context.
+ ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else { // Slot is in the current function context.
+ // The context may be an intermediate context, not a function context.
+ ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ }
+}
+
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index e37bb5e1..09743290 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -79,6 +79,11 @@ class MacroAssembler: public Assembler {
void RecordWrite(Register object, Register offset, Register scratch);
// ---------------------------------------------------------------------------
+ // Stack limit support
+
+ void StackLimitCheck(Label* on_stack_limit_hit);
+
+ // ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -87,18 +92,20 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register r0 and
+ // Enter specific kind of exit frame; either normal or debug mode.
+ // Expects the number of arguments in register r0 and
// the builtin function to call in register r1. Exits with argc in
// r4, argv in r6, and and the builtin function to call in r5.
- void EnterExitFrame(StackFrame::Type type);
+ void EnterExitFrame(ExitFrame::Mode mode);
// Leave the current exit frame. Expects the return value in r0.
- void LeaveExitFrame(StackFrame::Type type);
+ void LeaveExitFrame(ExitFrame::Mode mode);
// Align the stack by optionally pushing a Smi zero.
void AlignStack(int offset);
+ void LoadContext(Register dst, int context_chain_length);
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -240,6 +247,11 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+ // Uses VFP instructions to Convert a Smi to a double.
+ void IntegerToDoubleConversionWithVFP3(Register inReg,
+ Register outHighReg,
+ Register outLowReg);
+
// ---------------------------------------------------------------------------
// Runtime calls
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 2e75a61a..24b6a9c8 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -29,6 +29,7 @@
#include "unicode.h"
#include "log.h"
#include "ast.h"
+#include "code-stubs.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
@@ -587,9 +588,9 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label stack_limit_hit;
Label stack_ok;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ mov(r0, Operand(stack_guard_limit));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ sub(r0, sp, r0, SetCC);
// Handle it if the stack pointer is already below the stack limit.
@@ -1089,9 +1090,9 @@ void RegExpMacroAssemblerARM::Pop(Register target) {
void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption.
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ mov(r0, Operand(stack_guard_limit));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(sp, r0);
SafeCall(&check_preempt_label_, ls);
@@ -1099,14 +1100,12 @@ void RegExpMacroAssemblerARM::CheckPreemption() {
void RegExpMacroAssemblerARM::CheckStackLimit() {
- if (FLAG_check_stack) {
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(backtrack_stackpointer(), Operand(r0));
- SafeCall(&stack_overflow_label_, ls);
- }
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit();
+ __ mov(r0, Operand(stack_limit));
+ __ ldr(r0, MemOperand(r0));
+ __ cmp(backtrack_stackpointer(), Operand(r0));
+ SafeCall(&stack_overflow_label_, ls);
}
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 0711ac19..f70bc055 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -260,6 +260,21 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
};
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM.
+class RegExpCEntryStub: public CodeStub {
+ public:
+ RegExpCEntryStub() {}
+ virtual ~RegExpCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return RegExpCEntry; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "RegExpCEntryStub"; }
+};
+
#endif // V8_NATIVE_REGEXP
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 22bec822..9dc417bb 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -342,6 +342,11 @@ void Debugger::Debug() {
PrintF("Z flag: %d; ", sim_->z_flag_);
PrintF("C flag: %d; ", sim_->c_flag_);
PrintF("V flag: %d\n", sim_->v_flag_);
+ PrintF("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
+ PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
+ PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
+ PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
+ PrintF("INEXACT flag: %d; ", sim_->inexact_vfp_flag_);
} else if (strcmp(cmd, "unstop") == 0) {
intptr_t stop_pc = sim_->get_pc() - Instr::kInstrSize;
Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
@@ -429,6 +434,24 @@ Simulator::Simulator() {
c_flag_ = false;
v_flag_ = false;
+ // Initializing VFP registers.
+ // All registers are initialized to zero to start with
+ // even though s_registers_ & d_registers_ share the same
+ // physical registers in the target.
+ for (int i = 0; i < num_s_registers; i++) {
+ vfp_register[i] = 0;
+ }
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = false;
+ v_flag_FPSCR_ = false;
+
+ inv_op_vfp_flag_ = false;
+ div_zero_vfp_flag_ = false;
+ overflow_vfp_flag_ = false;
+ underflow_vfp_flag_ = false;
+ inexact_vfp_flag_ = false;
+
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
// some buffer below.
@@ -545,6 +568,99 @@ int32_t Simulator::get_pc() const {
}
+// Getting from and setting into VFP registers.
+void Simulator::set_s_register(int sreg, unsigned int value) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ vfp_register[sreg] = value;
+}
+
+
+unsigned int Simulator::get_s_register(int sreg) const {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ return vfp_register[sreg];
+}
+
+
+void Simulator::set_s_register_from_float(int sreg, const float flt) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ // Read the bits from the single precision floating point value
+ // into the unsigned integer element of vfp_register[] given by index=sreg.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &flt, sizeof(vfp_register[0]));
+ memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
+}
+
+
+void Simulator::set_s_register_from_sinteger(int sreg, const int sint) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ // Read the bits from the integer value into the unsigned integer element of
+ // vfp_register[] given by index=sreg.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &sint, sizeof(vfp_register[0]));
+ memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
+}
+
+
+void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
+ ASSERT((dreg >= 0) && (dreg < num_d_registers));
+ // Read the bits from the double precision floating point value into the two
+ // consecutive unsigned integer elements of vfp_register[] given by index
+ // 2*sreg and 2*sreg+1.
+ char buffer[2 * sizeof(vfp_register[0])];
+ memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
+#ifndef BIG_ENDIAN_FLOATING_POINT
+ memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
+#else
+ memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
+ memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
+#endif
+}
+
+
+float Simulator::get_float_from_s_register(int sreg) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+
+ float sm_val = 0.0;
+ // Read the bits from the unsigned integer vfp_register[] array
+ // into the single precision floating point value and return it.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
+ memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
+ return(sm_val);
+}
+
+
+int Simulator::get_sinteger_from_s_register(int sreg) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+
+ int sm_val = 0;
+ // Read the bits from the unsigned integer vfp_register[] array
+ // into the single precision floating point value and return it.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
+ memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
+ return(sm_val);
+}
+
+
+double Simulator::get_double_from_d_register(int dreg) {
+ ASSERT((dreg >= 0) && (dreg < num_d_registers));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer vfp_register[] array
+ // into the double precision floating point value and return it.
+ char buffer[2 * sizeof(vfp_register[0])];
+#ifdef BIG_ENDIAN_FLOATING_POINT
+ memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
+ memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
+#else
+ memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
+#endif
+ memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
+ return(dm_val);
+}
+
+
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void Simulator::GetFpArgs(double* x, double* y) {
@@ -772,6 +888,37 @@ bool Simulator::OverflowFrom(int32_t alu_out,
}
+// Support for VFP comparisons.
+void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
+ // All non-NaN cases.
+ if (val1 == val2) {
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = true;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = false;
+ } else if (val1 < val2) {
+ n_flag_FPSCR_ = true;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = false;
+ v_flag_FPSCR_ = false;
+ } else {
+ // Case when (val1 > val2).
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = false;
+ }
+}
+
+
+void Simulator::Copy_FPSCR_to_APSR() {
+ n_flag_ = n_flag_FPSCR_;
+ z_flag_ = z_flag_FPSCR_;
+ c_flag_ = c_flag_FPSCR_;
+ v_flag_ = v_flag_FPSCR_;
+}
+
+
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with register.
int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
@@ -1154,7 +1301,7 @@ void Simulator::DecodeType01(Instr* instr) {
}
}
} else {
- UNIMPLEMENTED(); // not used by V8
+ UNIMPLEMENTED(); // Not used by V8.
}
} else {
// extra load/store instructions
@@ -1664,16 +1811,15 @@ void Simulator::DecodeType5(Instr* instr) {
void Simulator::DecodeType6(Instr* instr) {
- UNIMPLEMENTED();
+ DecodeType6CoprocessorIns(instr);
}
void Simulator::DecodeType7(Instr* instr) {
if (instr->Bit(24) == 1) {
- // Format(instr, "swi 'swi");
SoftwareInterrupt(instr);
} else {
- UNIMPLEMENTED();
+ DecodeTypeVFP(instr);
}
}
@@ -1745,6 +1891,177 @@ void Simulator::DecodeUnconditional(Instr* instr) {
}
+// void Simulator::DecodeTypeVFP(Instr* instr)
+// The Following ARMv7 VFPv instructions are currently supported.
+// fmsr :Sn = Rt
+// fmrs :Rt = Sn
+// fsitod: Dd = Sm
+// ftosid: Sd = Dm
+// Dd = faddd(Dn, Dm)
+// Dd = fsubd(Dn, Dm)
+// Dd = fmuld(Dn, Dm)
+// Dd = fdivd(Dn, Dm)
+// vcmp(Dd, Dm)
+// VMRS
+void Simulator::DecodeTypeVFP(Instr* instr) {
+ ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+
+ int rt = instr->RtField();
+ int vm = instr->VmField();
+ int vn = instr->VnField();
+ int vd = instr->VdField();
+
+ if (instr->Bit(23) == 1) {
+ if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x5) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ double dm_val = get_double_from_d_register(vm);
+ int32_t int_value = static_cast<int32_t>(dm_val);
+ set_s_register_from_sinteger(((vd<<1) | instr->DField()), int_value);
+ } else if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(7) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ int32_t int_value = get_sinteger_from_s_register(((vm<<1) |
+ instr->MField()));
+ double dbl_value = static_cast<double>(int_value);
+ set_d_register_from_double(vd, dbl_value);
+ } else if ((instr->Bit(21) == 0x0) &&
+ (instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value / dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Bits(21, 20) == 0x3) &&
+ (instr->Bits(19, 16) == 0x4) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0x1) &&
+ (instr->Bit(4) == 0x0)) {
+ double dd_value = get_double_from_d_register(vd);
+ double dm_value = get_double_from_d_register(vm);
+ Compute_FPSCR_Flags(dd_value, dm_value);
+ } else if ((instr->Bits(23, 20) == 0xF) &&
+ (instr->Bits(19, 16) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(7, 5) == 0x0) &&
+ (instr->Bit(4) == 0x1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ if (instr->Bits(15, 12) == 0xF)
+ Copy_FPSCR_to_APSR();
+ else
+ UNIMPLEMENTED(); // Not used by V8.
+ } else {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ } else if (instr->Bit(21) == 1) {
+ if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value + dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value - dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value * dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ } else {
+ if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ int32_t rs_val = get_register(rt);
+ set_s_register_from_sinteger(((vn<<1) | instr->NField()), rs_val);
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ int32_t int_value = get_sinteger_from_s_register(((vn<<1) |
+ instr->NField()));
+ set_register(rt, int_value);
+ } else {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ }
+}
+
+
+// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
+// Decode Type 6 coprocessor instructions.
+// Dm = fmdrr(Rt, Rt2)
+// <Rt, Rt2> = fmrrd(Dm)
+void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
+ ASSERT((instr->TypeField() == 6));
+
+ int rt = instr->RtField();
+ int rn = instr->RnField();
+ int vm = instr->VmField();
+
+ if (instr->Bit(23) == 1) {
+ UNIMPLEMENTED();
+ } else if (instr->Bit(22) == 1) {
+ if ((instr->Bits(27, 24) == 0xC) &&
+ (instr->Bit(22) == 1) &&
+ (instr->Bits(11, 8) == 0xB) &&
+ (instr->Bits(7, 6) == 0x0) &&
+ (instr->Bit(4) == 1)) {
+ if (instr->Bit(20) == 0) {
+ int32_t rs_val = get_register(rt);
+ int32_t rn_val = get_register(rn);
+
+ set_s_register_from_sinteger(2*vm, rs_val);
+ set_s_register_from_sinteger((2*vm+1), rn_val);
+
+ } else if (instr->Bit(20) == 1) {
+ int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
+ int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
+
+ set_register(rt, rt_int_value);
+ set_register(rn, rn_int_value);
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ } else if (instr->Bit(21) == 1) {
+ UNIMPLEMENTED();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instr* instr) {
pc_modified_ = false;
@@ -1802,7 +2119,6 @@ void Simulator::InstructionDecode(Instr* instr) {
}
-//
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.
@@ -1924,6 +2240,25 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
return result;
}
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ int new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ int current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+
} } // namespace assembler::arm
#endif // !defined(__arm__)
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index ff6bbf43..3a4bb311 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -52,6 +52,12 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
};
@@ -60,6 +66,10 @@ class SimulatorStack : public v8::internal::AllStatic {
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
+
#else // defined(__arm__)
// When running with the simulator transition into simulated execution at this
@@ -73,6 +83,11 @@ class SimulatorStack : public v8::internal::AllStatic {
assembler::arm::Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+
+
#include "constants-arm.h"
@@ -82,7 +97,6 @@ namespace arm {
class Simulator {
public:
friend class Debugger;
-
enum Register {
no_reg = -1,
r0 = 0, r1, r2, r3, r4, r5, r6, r7,
@@ -90,7 +104,15 @@ class Simulator {
num_registers,
sp = 13,
lr = 14,
- pc = 15
+ pc = 15,
+ s0 = 0, s1, s2, s3, s4, s5, s6, s7,
+ s8, s9, s10, s11, s12, s13, s14, s15,
+ s16, s17, s18, s19, s20, s21, s22, s23,
+ s24, s25, s26, s27, s28, s29, s30, s31,
+ num_s_registers = 32,
+ d0 = 0, d1, d2, d3, d4, d5, d6, d7,
+ d8, d9, d10, d11, d12, d13, d14, d15,
+ num_d_registers = 16
};
Simulator();
@@ -106,6 +128,16 @@ class Simulator {
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
+ // Support for VFP.
+ void set_s_register(int reg, unsigned int value);
+ unsigned int get_s_register(int reg) const;
+ void set_d_register_from_double(int dreg, const double& dbl);
+ double get_double_from_d_register(int dreg);
+ void set_s_register_from_float(int sreg, const float dbl);
+ float get_float_from_s_register(int sreg);
+ void set_s_register_from_sinteger(int reg, const int value);
+ int get_sinteger_from_s_register(int reg);
+
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
@@ -124,6 +156,12 @@ class Simulator {
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@@ -154,6 +192,10 @@ class Simulator {
int32_t right,
bool addition);
+ // Support for VFP.
+ void Compute_FPSCR_Flags(double val1, double val2);
+ void Copy_FPSCR_to_APSR();
+
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instr* instr, bool* carry_out);
int32_t GetImm(Instr* instr, bool* carry_out);
@@ -185,6 +227,10 @@ class Simulator {
void DecodeType7(Instr* instr);
void DecodeUnconditional(Instr* instr);
+ // Support for VFP.
+ void DecodeTypeVFP(Instr* instr);
+ void DecodeType6CoprocessorIns(Instr* instr);
+
// Executes one instruction.
void InstructionDecode(Instr* instr);
@@ -198,20 +244,34 @@ class Simulator {
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
- // architecture state
+ // Architecture state.
int32_t registers_[16];
bool n_flag_;
bool z_flag_;
bool c_flag_;
bool v_flag_;
- // simulator support
+ // VFP architecture state.
+ unsigned int vfp_register[num_s_registers];
+ bool n_flag_FPSCR_;
+ bool z_flag_FPSCR_;
+ bool c_flag_FPSCR_;
+ bool v_flag_FPSCR_;
+
+ // VFP FP exception flags architecture state.
+ bool inv_op_vfp_flag_;
+ bool div_zero_vfp_flag_;
+ bool overflow_vfp_flag_;
+ bool underflow_vfp_flag_;
+ bool inexact_vfp_flag_;
+
+ // Simulator support.
char* stack_;
bool pc_modified_;
int icount_;
static bool initialized_;
- // registered breakpoints
+ // Registered breakpoints.
Instr* break_pc_;
instr_t break_instr_;
};
@@ -229,6 +289,15 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::arm::Simulator::current()->StackLimit();
}
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ assembler::arm::Simulator* sim = assembler::arm::Simulator::current();
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static inline void UnregisterCTryCatch() {
+ assembler::arm::Simulator::current()->PopAddress();
+ }
};
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 8282655f..efccaf49 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -105,7 +105,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ b(eq, &miss);
// Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, String::kLengthOffset));
+ __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
__ eor(scratch, scratch, Operand(flags));
@@ -229,10 +229,7 @@ void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
miss, &check_wrapper);
// Load length directly from the string.
- __ and_(scratch1, scratch1, Operand(kStringSizeMask));
- __ add(scratch1, scratch1, Operand(String::kHashShift));
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ mov(r0, Operand(r0, LSR, scratch1));
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 97d164ee..132c8aeb 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -146,29 +146,27 @@ void VirtualFrame::AllocateStackSlots() {
// Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
}
- if (FLAG_check_stack) {
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- }
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
for (int i = 0; i < count; i++) {
__ push(ip);
}
- if (FLAG_check_stack) {
- // Put the lr setup instruction in the delay slot. The kInstrSize is added
- // to the implicit 8 byte offset that always applies to operations with pc
- // and gives a return address 12 bytes down.
- masm()->add(lr, pc, Operand(Assembler::kInstrSize));
- masm()->cmp(sp, Operand(r2));
- StackCheckStub stub;
- // Call the stub if lower.
- masm()->mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- }
+ // Check the stack for overflow or a break request.
+ // Put the lr setup instruction in the delay slot. The kInstrSize is added
+ // to the implicit 8 byte offset that always applies to operations with pc
+ // and gives a return address 12 bytes down.
+ masm()->add(lr, pc, Operand(Assembler::kInstrSize));
+ masm()->cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ // Call the stub if lower.
+ masm()->mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
+
void VirtualFrame::SaveContextRegister() {
UNIMPLEMENTED();
}
@@ -245,11 +243,8 @@ void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- Result* arg_count_register,
int arg_count) {
- ASSERT(arg_count_register->reg().is(r0));
PrepareForCall(arg_count, arg_count);
- arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
}
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 457478da..d5230007 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -305,7 +305,6 @@ class VirtualFrame : public ZoneObject {
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
- Result* arg_count_register,
int arg_count);
// Call into an IC stub given the number of arguments it removes
diff --git a/src/array.js b/src/array.js
index 94d74a50..20d884ee 100644
--- a/src/array.js
+++ b/src/array.js
@@ -77,7 +77,8 @@ function SparseJoin(array, len, convert) {
var key = keys[i];
if (key != last_key) {
var e = array[key];
- builder.add(convert(e));
+ if (typeof(e) !== 'string') e = convert(e);
+ builder.add(e);
last_key = key;
}
}
@@ -114,17 +115,36 @@ function Join(array, length, separator, convert) {
if (length == 1) {
var e = array[0];
if (!IS_UNDEFINED(e) || (0 in array)) {
+ if (typeof(e) === 'string') return e;
return convert(e);
}
}
var builder = new StringBuilder();
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (i != 0) builder.add(separator);
- if (!IS_UNDEFINED(e) || (i in array)) {
- builder.add(convert(e));
+ // We pull the empty separator check outside the loop for speed!
+ if (separator.length == 0) {
+ for (var i = 0; i < length; i++) {
+ var e = array[i];
+ if (!IS_UNDEFINED(e) || (i in array)) {
+ if (typeof(e) !== 'string') e = convert(e);
+ if (e.length > 0) {
+ var elements = builder.elements;
+ elements[elements.length] = e;
+ }
+ }
+ }
+ } else {
+ for (var i = 0; i < length; i++) {
+ var e = array[i];
+ if (i != 0) builder.add(separator);
+ if (!IS_UNDEFINED(e) || (i in array)) {
+ if (typeof(e) !== 'string') e = convert(e);
+ if (e.length > 0) {
+ var elements = builder.elements;
+ elements[elements.length] = e;
+ }
+ }
}
}
return builder.generate();
@@ -136,12 +156,14 @@ function Join(array, length, separator, convert) {
function ConvertToString(e) {
+ if (typeof(e) === 'string') return e;
if (e == null) return '';
else return ToString(e);
}
function ConvertToLocaleString(e) {
+ if (typeof(e) === 'string') return e;
if (e == null) return '';
else {
// e_obj's toLocaleString might be overwritten, check if it is a function.
@@ -149,7 +171,7 @@ function ConvertToLocaleString(e) {
// See issue 877615.
var e_obj = ToObject(e);
if (IS_FUNCTION(e_obj.toLocaleString))
- return e_obj.toLocaleString();
+ return ToString(e_obj.toLocaleString());
else
return ToString(e);
}
diff --git a/src/assembler.cc b/src/assembler.cc
index 34595f83..9c9ddcdd 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -174,14 +174,14 @@ void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
- *--pos_ = data_delta << kPositionTypeTagBits | tag;
+ *--pos_ = static_cast<byte>(data_delta << kPositionTypeTagBits | tag);
}
void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
- *--pos_ = top_tag << (kTagBits + kExtraTagBits) |
- extra_tag << kTagBits |
- kDefaultTag;
+ *--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) |
+ extra_tag << kTagBits |
+ kDefaultTag);
}
@@ -196,7 +196,7 @@ void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
- *--pos_ = data_delta;
+ *--pos_ = static_cast<byte>(data_delta);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
data_delta = data_delta >> kBitsPerByte;
}
@@ -211,7 +211,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::NUMBER_OF_MODES < kMaxRelocModes);
// Use unsigned delta-encoding for pc.
- uint32_t pc_delta = rinfo->pc() - last_pc_;
+ uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
RelocInfo::Mode rmode = rinfo->rmode();
// The two most common modes are given small tags, and usually fit in a byte.
@@ -522,6 +522,10 @@ ExternalReference::ExternalReference(Builtins::CFunctionId id)
: address_(Redirect(Builtins::c_function_address(id))) {}
+ExternalReference::ExternalReference(ApiFunction* fun)
+ : address_(Redirect(fun->address())) {}
+
+
ExternalReference::ExternalReference(Builtins::Name name)
: address_(Builtins::builtin_address(name)) {}
@@ -579,11 +583,16 @@ ExternalReference ExternalReference::roots_address() {
}
-ExternalReference ExternalReference::address_of_stack_guard_limit() {
+ExternalReference ExternalReference::address_of_stack_limit() {
return ExternalReference(StackGuard::address_of_jslimit());
}
+ExternalReference ExternalReference::address_of_real_stack_limit() {
+ return ExternalReference(StackGuard::address_of_real_jslimit());
+}
+
+
ExternalReference ExternalReference::address_of_regexp_stack_limit() {
return ExternalReference(RegExpStack::limit_address());
}
@@ -608,6 +617,27 @@ ExternalReference ExternalReference::new_space_allocation_limit_address() {
return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
}
+
+ExternalReference ExternalReference::handle_scope_extensions_address() {
+ return ExternalReference(HandleScope::current_extensions_address());
+}
+
+
+ExternalReference ExternalReference::handle_scope_next_address() {
+ return ExternalReference(HandleScope::current_next_address());
+}
+
+
+ExternalReference ExternalReference::handle_scope_limit_address() {
+ return ExternalReference(HandleScope::current_limit_address());
+}
+
+
+ExternalReference ExternalReference::scheduled_exception_address() {
+ return ExternalReference(Top::scheduled_exception_address());
+}
+
+
#ifdef V8_NATIVE_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() {
diff --git a/src/assembler.h b/src/assembler.h
index 21a66dd5..aecd4cd6 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -373,6 +373,8 @@ class ExternalReference BASE_EMBEDDED {
public:
explicit ExternalReference(Builtins::CFunctionId id);
+ explicit ExternalReference(ApiFunction* ptr);
+
explicit ExternalReference(Builtins::Name name);
explicit ExternalReference(Runtime::FunctionId id);
@@ -406,7 +408,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference roots_address();
// Static variable StackGuard::address_of_jslimit()
- static ExternalReference address_of_stack_guard_limit();
+ static ExternalReference address_of_stack_limit();
+
+ // Static variable StackGuard::address_of_real_jslimit()
+ static ExternalReference address_of_real_stack_limit();
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit();
@@ -422,6 +427,12 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference double_fp_operation(Token::Value operation);
static ExternalReference compare_doubles();
+ static ExternalReference handle_scope_extensions_address();
+ static ExternalReference handle_scope_next_address();
+ static ExternalReference handle_scope_limit_address();
+
+ static ExternalReference scheduled_exception_address();
+
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -460,12 +471,16 @@ class ExternalReference BASE_EMBEDDED {
static void* Redirect(void* address, bool fp_return = false) {
if (redirector_ == NULL) return address;
- return (*redirector_)(address, fp_return);
+ void* answer = (*redirector_)(address, fp_return);
+ return answer;
}
static void* Redirect(Address address_arg, bool fp_return = false) {
void* address = reinterpret_cast<void*>(address_arg);
- return redirector_ == NULL ? address : (*redirector_)(address, fp_return);
+ void* answer = (redirector_ == NULL) ?
+ address :
+ (*redirector_)(address, fp_return);
+ return answer;
}
void* address_;
diff --git a/src/ast.cc b/src/ast.cc
index f6864b82..90b5ed68 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "ast.h"
+#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
@@ -138,6 +139,13 @@ ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
}
+bool ObjectLiteral::Property::IsCompileTimeValue() {
+ return kind_ == CONSTANT ||
+ (kind_ == MATERIALIZED_LITERAL &&
+ CompileTimeValue::IsCompileTimeValue(value_));
+}
+
+
bool ObjectLiteral::IsValidJSON() {
int length = properties()->length();
for (int i = 0; i < length; i++) {
diff --git a/src/ast.h b/src/ast.h
index 42154f61..c27d558a 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -28,7 +28,6 @@
#ifndef V8_AST_H_
#define V8_AST_H_
-#include "location.h"
#include "execution.h"
#include "factory.h"
#include "jsregexp.h"
@@ -162,7 +161,25 @@ class Statement: public AstNode {
class Expression: public AstNode {
public:
- Expression() : location_(Location::Temporary()) {}
+ enum Context {
+ // Not assigned a context yet, or else will not be visited during
+ // code generation.
+ kUninitialized,
+ // Evaluated for its side effects.
+ kEffect,
+ // Evaluated for its value (and side effects).
+ kValue,
+ // Evaluated for control flow (and side effects).
+ kTest,
+ // Evaluated for control flow and side effects. Value is also
+ // needed if true.
+ kValueTest,
+ // Evaluated for control flow and side effects. Value is also
+ // needed if false.
+ kTestValue
+ };
+
+ Expression() : context_(kUninitialized) {}
virtual Expression* AsExpression() { return this; }
@@ -177,12 +194,12 @@ class Expression: public AstNode {
// Static type information for this expression.
SmiAnalysis* type() { return &type_; }
- Location location() { return location_; }
- void set_location(Location loc) { location_ = loc; }
+ Context context() { return context_; }
+ void set_context(Context context) { context_ = context; }
private:
SmiAnalysis type_;
- Location location_;
+ Context context_;
};
@@ -305,7 +322,7 @@ class IterationStatement: public BreakableStatement {
class DoWhileStatement: public IterationStatement {
public:
explicit DoWhileStatement(ZoneStringList* labels)
- : IterationStatement(labels), cond_(NULL) {
+ : IterationStatement(labels), cond_(NULL), condition_position_(-1) {
}
void Initialize(Expression* cond, Statement* body) {
@@ -317,8 +334,14 @@ class DoWhileStatement: public IterationStatement {
Expression* cond() const { return cond_; }
+ // Position where condition expression starts. We need it to make
+ // the loop's condition a breakable location.
+ int condition_position() { return condition_position_; }
+ void set_condition_position(int pos) { condition_position_ = pos; }
+
private:
Expression* cond_;
+ int condition_position_;
};
@@ -747,6 +770,8 @@ class ObjectLiteral: public MaterializedLiteral {
Expression* value() { return value_; }
Kind kind() { return kind_; }
+ bool IsCompileTimeValue();
+
private:
Literal* key_;
Expression* value_;
@@ -933,11 +958,7 @@ class Slot: public Expression {
// variable name in the context object on the heap,
// with lookup starting at the current context. index()
// is invalid.
- LOOKUP,
-
- // A property in the global object. var()->name() is
- // the property name.
- GLOBAL
+ LOOKUP
};
Slot(Variable* var, Type type, int index)
@@ -1059,6 +1080,7 @@ class CallRuntime: public Expression {
Handle<String> name() const { return name_; }
Runtime::Function* function() const { return function_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
+ bool is_jsruntime() const { return function_ == NULL; }
private:
Handle<String> name_;
@@ -1261,7 +1283,6 @@ class FunctionLiteral: public Expression {
ZoneList<Statement*>* body,
int materialized_literal_count,
int expected_property_count,
- bool has_only_this_property_assignments,
bool has_only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments,
int num_parameters,
@@ -1273,7 +1294,6 @@ class FunctionLiteral: public Expression {
body_(body),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
- has_only_this_property_assignments_(has_only_this_property_assignments),
has_only_simple_this_property_assignments_(
has_only_simple_this_property_assignments),
this_property_assignments_(this_property_assignments),
@@ -1283,7 +1303,8 @@ class FunctionLiteral: public Expression {
is_expression_(is_expression),
loop_nesting_(0),
function_token_position_(RelocInfo::kNoPosition),
- inferred_name_(Heap::empty_string()) {
+ inferred_name_(Heap::empty_string()),
+ try_fast_codegen_(false) {
#ifdef DEBUG
already_compiled_ = false;
#endif
@@ -1305,9 +1326,6 @@ class FunctionLiteral: public Expression {
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
- bool has_only_this_property_assignments() {
- return has_only_this_property_assignments_;
- }
bool has_only_simple_this_property_assignments() {
return has_only_simple_this_property_assignments_;
}
@@ -1326,6 +1344,9 @@ class FunctionLiteral: public Expression {
inferred_name_ = inferred_name;
}
+ bool try_fast_codegen() { return try_fast_codegen_; }
+ void set_try_fast_codegen(bool flag) { try_fast_codegen_ = flag; }
+
#ifdef DEBUG
void mark_as_compiled() {
ASSERT(!already_compiled_);
@@ -1339,7 +1360,6 @@ class FunctionLiteral: public Expression {
ZoneList<Statement*>* body_;
int materialized_literal_count_;
int expected_property_count_;
- bool has_only_this_property_assignments_;
bool has_only_simple_this_property_assignments_;
Handle<FixedArray> this_property_assignments_;
int num_parameters_;
@@ -1349,6 +1369,7 @@ class FunctionLiteral: public Expression {
int loop_nesting_;
int function_token_position_;
Handle<String> inferred_name_;
+ bool try_fast_codegen_;
#ifdef DEBUG
bool already_compiled_;
#endif
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 43aa1a3b..deda96f3 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -36,6 +36,7 @@
#include "global-handles.h"
#include "macro-assembler.h"
#include "natives.h"
+#include "snapshot.h"
namespace v8 {
namespace internal {
@@ -92,14 +93,39 @@ class SourceCodeCache BASE_EMBEDDED {
static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
+// This is for delete, not delete[].
+static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
+
+
+NativesExternalStringResource::NativesExternalStringResource(const char* source)
+ : data_(source), length_(StrLength(source)) {
+ if (delete_these_non_arrays_on_tear_down == NULL) {
+ delete_these_non_arrays_on_tear_down = new List<char*>(2);
+ }
+ // The resources are small objects and we only make a fixed number of
+ // them, but let's clean them up on exit for neatness.
+ delete_these_non_arrays_on_tear_down->
+ Add(reinterpret_cast<char*>(this));
+}
Handle<String> Bootstrapper::NativesSourceLookup(int index) {
ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
if (Heap::natives_source_cache()->get(index)->IsUndefined()) {
- Handle<String> source_code =
- Factory::NewStringFromAscii(Natives::GetScriptSource(index));
- Heap::natives_source_cache()->set(index, *source_code);
+ if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
+ // We can use external strings for the natives.
+ NativesExternalStringResource* resource =
+ new NativesExternalStringResource(
+ Natives::GetScriptSource(index).start());
+ Handle<String> source_code =
+ Factory::NewExternalStringFromAscii(resource);
+ Heap::natives_source_cache()->set(index, *source_code);
+ } else {
+ // Old snapshot code can't cope with external strings at all.
+ Handle<String> source_code =
+ Factory::NewStringFromAscii(Natives::GetScriptSource(index));
+ Heap::natives_source_cache()->set(index, *source_code);
+ }
}
Handle<Object> cached_source(Heap::natives_source_cache()->get(index));
return Handle<String>::cast(cached_source);
@@ -125,6 +151,16 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
void Bootstrapper::TearDown() {
+ if (delete_these_non_arrays_on_tear_down != NULL) {
+ int len = delete_these_non_arrays_on_tear_down->length();
+ ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
+ for (int i = 0; i < len; i++) {
+ delete delete_these_non_arrays_on_tear_down->at(i);
+ }
+ delete delete_these_non_arrays_on_tear_down;
+ delete_these_non_arrays_on_tear_down = NULL;
+ }
+
natives_cache.Initialize(false); // Yes, symmetrical
extensions_cache.Initialize(false);
}
@@ -316,8 +352,11 @@ Genesis* Genesis::current_ = NULL;
void Bootstrapper::Iterate(ObjectVisitor* v) {
natives_cache.Iterate(v);
+ v->Synchronize("NativesCache");
extensions_cache.Iterate(v);
+ v->Synchronize("Extensions");
PendingFixups::Iterate(v);
+ v->Synchronize("PendingFixups");
}
@@ -1072,21 +1111,29 @@ bool Genesis::InstallNatives() {
Factory::LookupAsciiSymbol("context_data"),
proxy_context_data,
common_attributes);
- Handle<Proxy> proxy_eval_from_function =
- Factory::NewProxy(&Accessors::ScriptEvalFromFunction);
+ Handle<Proxy> proxy_eval_from_script =
+ Factory::NewProxy(&Accessors::ScriptEvalFromScript);
script_descriptors =
Factory::CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("eval_from_function"),
- proxy_eval_from_function,
+ Factory::LookupAsciiSymbol("eval_from_script"),
+ proxy_eval_from_script,
common_attributes);
- Handle<Proxy> proxy_eval_from_position =
- Factory::NewProxy(&Accessors::ScriptEvalFromPosition);
+ Handle<Proxy> proxy_eval_from_script_position =
+ Factory::NewProxy(&Accessors::ScriptEvalFromScriptPosition);
script_descriptors =
Factory::CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("eval_from_position"),
- proxy_eval_from_position,
+ Factory::LookupAsciiSymbol("eval_from_script_position"),
+ proxy_eval_from_script_position,
+ common_attributes);
+ Handle<Proxy> proxy_eval_from_function_name =
+ Factory::NewProxy(&Accessors::ScriptEvalFromFunctionName);
+ script_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ script_descriptors,
+ Factory::LookupAsciiSymbol("eval_from_function_name"),
+ proxy_eval_from_function_name,
common_attributes);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@@ -1299,8 +1346,6 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
ASSERT(Top::has_pending_exception() != result);
if (!result) {
Top::clear_pending_exception();
- v8::Utils::ReportApiFailure(
- "v8::Context::New()", "Error installing extension");
}
current->set_state(v8::INSTALLED);
return result;
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 15fc88dc..07d2747b 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -76,6 +76,24 @@ class Bootstrapper : public AllStatic {
static void FreeThreadResources();
};
+
+class NativesExternalStringResource
+ : public v8::String::ExternalAsciiStringResource {
+ public:
+ explicit NativesExternalStringResource(const char* source);
+
+ const char* data() const {
+ return data_;
+ }
+
+ size_t length() const {
+ return length_;
+ }
+ private:
+ const char* data_;
+ size_t length_;
+};
+
}} // namespace v8::internal
#endif // V8_BOOTSTRAPPER_H_
diff --git a/src/builtins.cc b/src/builtins.cc
index fa1b34e6..b66635c5 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -380,6 +380,9 @@ BUILTIN(HandleApiCall) {
{
// Leaving JavaScript.
VMState state(EXTERNAL);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ state.set_external_callback(v8::ToCData<Address>(callback_obj));
+#endif
value = callback(new_args);
}
if (value.IsEmpty()) {
@@ -446,6 +449,9 @@ static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
{
// Leaving JavaScript.
VMState state(EXTERNAL);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ state.set_external_callback(v8::ToCData<Address>(callback_obj));
+#endif
value = callback(new_args);
}
if (value.IsEmpty()) {
diff --git a/src/checks.cc b/src/checks.cc
index f8a2f24f..b5df316d 100644
--- a/src/checks.cc
+++ b/src/checks.cc
@@ -36,6 +36,8 @@ static int fatal_error_handler_nesting_depth = 0;
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+ fflush(stdout);
+ fflush(stderr);
fatal_error_handler_nesting_depth++;
// First time we try to print an error message
if (fatal_error_handler_nesting_depth < 2) {
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 586c9480..dbc39ff3 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -36,10 +36,27 @@ namespace v8 {
namespace internal {
Handle<Code> CodeStub::GetCode() {
- uint32_t key = GetKey();
- int index = Heap::code_stubs()->FindEntry(key);
- if (index == NumberDictionary::kNotFound) {
- HandleScope scope;
+ bool custom_cache = has_custom_cache();
+
+ int index = 0;
+ uint32_t key = 0;
+ if (custom_cache) {
+ Code* cached;
+ if (GetCustomCache(&cached)) {
+ return Handle<Code>(cached);
+ } else {
+ index = NumberDictionary::kNotFound;
+ }
+ } else {
+ key = GetKey();
+ index = Heap::code_stubs()->FindEntry(key);
+ if (index != NumberDictionary::kNotFound)
+ return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
+ }
+
+ Code* result;
+ {
+ v8::HandleScope scope;
// Update the static counter each time a new code stub is generated.
Counters::code_stubs.Increment();
@@ -79,63 +96,29 @@ Handle<Code> CodeStub::GetCode() {
}
#endif
- // Update the dictionary and the root in Heap.
- Handle<NumberDictionary> dict =
- Factory::DictionaryAtNumberPut(
- Handle<NumberDictionary>(Heap::code_stubs()),
- key,
- code);
- Heap::public_set_code_stubs(*dict);
- index = Heap::code_stubs()->FindEntry(key);
+ if (custom_cache) {
+ SetCustomCache(*code);
+ } else {
+ // Update the dictionary and the root in Heap.
+ Handle<NumberDictionary> dict =
+ Factory::DictionaryAtNumberPut(
+ Handle<NumberDictionary>(Heap::code_stubs()),
+ key,
+ code);
+ Heap::public_set_code_stubs(*dict);
+ }
+ result = *code;
}
- ASSERT(index != NumberDictionary::kNotFound);
- return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
+ return Handle<Code>(result);
}
const char* CodeStub::MajorName(CodeStub::Major major_key) {
switch (major_key) {
- case CallFunction:
- return "CallFunction";
- case GenericBinaryOp:
- return "GenericBinaryOp";
- case SmiOp:
- return "SmiOp";
- case Compare:
- return "Compare";
- case RecordWrite:
- return "RecordWrite";
- case StackCheck:
- return "StackCheck";
- case UnarySub:
- return "UnarySub";
- case RevertToNumber:
- return "RevertToNumber";
- case ToBoolean:
- return "ToBoolean";
- case Instanceof:
- return "Instanceof";
- case CounterOp:
- return "CounterOp";
- case ArgumentsAccess:
- return "ArgumentsAccess";
- case Runtime:
- return "Runtime";
- case CEntry:
- return "CEntry";
- case JSEntry:
- return "JSEntry";
- case GetProperty:
- return "GetProperty";
- case SetProperty:
- return "SetProperty";
- case InvokeBuiltin:
- return "InvokeBuiltin";
- case ConvertToDouble:
- return "ConvertToDouble";
- case WriteInt32ToHeapNumber:
- return "WriteInt32ToHeapNumber";
+#define DEF_CASE(name) case name: return #name;
+ CODE_STUB_LIST(DEF_CASE)
+#undef DEF_CASE
default:
UNREACHABLE();
return NULL;
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 91d951f2..25a2d0f5 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -31,32 +31,52 @@
namespace v8 {
namespace internal {
+// List of code stubs used on all platforms. The order in this list is important
+// as only the stubs up to and including RecordWrite allows nested stub calls.
+#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
+ V(CallFunction) \
+ V(GenericBinaryOp) \
+ V(StringAdd) \
+ V(SmiOp) \
+ V(Compare) \
+ V(RecordWrite) \
+ V(ConvertToDouble) \
+ V(WriteInt32ToHeapNumber) \
+ V(StackCheck) \
+ V(UnarySub) \
+ V(RevertToNumber) \
+ V(ToBoolean) \
+ V(Instanceof) \
+ V(CounterOp) \
+ V(ArgumentsAccess) \
+ V(Runtime) \
+ V(CEntry) \
+ V(JSEntry)
+
+// List of code stubs only used on ARM platforms.
+#ifdef V8_TARGET_ARCH_ARM
+#define CODE_STUB_LIST_ARM(V) \
+ V(GetProperty) \
+ V(SetProperty) \
+ V(InvokeBuiltin) \
+ V(RegExpCEntry)
+#else
+#define CODE_STUB_LIST_ARM(V)
+#endif
+
+// Combined list of code stubs.
+#define CODE_STUB_LIST(V) \
+ CODE_STUB_LIST_ALL_PLATFORMS(V) \
+ CODE_STUB_LIST_ARM(V)
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
public:
enum Major {
- CallFunction,
- GenericBinaryOp,
- SmiOp,
- Compare,
- RecordWrite, // Last stub that allows stub calls inside.
- ConvertToDouble,
- WriteInt32ToHeapNumber,
- StackCheck,
- UnarySub,
- RevertToNumber,
- ToBoolean,
- Instanceof,
- CounterOp,
- ArgumentsAccess,
- Runtime,
- CEntry,
- JSEntry,
- GetProperty, // ARM only
- SetProperty, // ARM only
- InvokeBuiltin, // ARM only
- RegExpCEntry, // ARM only
+#define DEF_ENUM(name) name,
+ CODE_STUB_LIST(DEF_ENUM)
+#undef DEF_ENUM
+ NoCache, // marker for stubs that do custom caching
NUMBER_OF_IDS
};
@@ -73,6 +93,12 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
+ // Override these methods to provide a custom caching mechanism for
+ // an individual type of code stub.
+ virtual bool GetCustomCache(Code** code_out) { return false; }
+ virtual void SetCustomCache(Code* value) { }
+ virtual bool has_custom_cache() { return false; }
+
protected:
static const int kMajorBits = 5;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
diff --git a/src/codegen.cc b/src/codegen.cc
index 28c0ba5f..26e8d7de 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "oprofile-agent.h"
#include "prettyprinter.h"
@@ -250,98 +251,6 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) {
#endif
-// Sets the function info on a function.
-// The start_position points to the first '(' character after the function name
-// in the full script source. When counting characters in the script source the
-// the first character is number 0 (not 1).
-void CodeGenerator::SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script) {
- fun->shared()->set_length(lit->num_parameters());
- fun->shared()->set_formal_parameter_count(lit->num_parameters());
- fun->shared()->set_script(*script);
- fun->shared()->set_function_token_position(lit->function_token_position());
- fun->shared()->set_start_position(lit->start_position());
- fun->shared()->set_end_position(lit->end_position());
- fun->shared()->set_is_expression(lit->is_expression());
- fun->shared()->set_is_toplevel(is_toplevel);
- fun->shared()->set_inferred_name(*lit->inferred_name());
- fun->shared()->SetThisPropertyAssignmentsInfo(
- lit->has_only_this_property_assignments(),
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
-}
-
-
-Handle<Code> CodeGenerator::ComputeLazyCompile(int argc) {
- CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
-}
-
-
-Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
-#ifdef DEBUG
- // We should not try to compile the same function literal more than
- // once.
- node->mark_as_compiled();
-#endif
-
- // Determine if the function can be lazily compiled. This is
- // necessary to allow some of our builtin JS files to be lazily
- // compiled. These builtins cannot be handled lazily by the parser,
- // since we have to know if a function uses the special natives
- // syntax, which is something the parser records.
- bool allow_lazy = node->AllowsLazyCompilation();
-
- // Generate code
- Handle<Code> code;
- if (FLAG_lazy && allow_lazy) {
- code = ComputeLazyCompile(node->num_parameters());
- } else {
- // The bodies of function literals have not yet been visited by
- // the AST optimizer/analyzer.
- if (!Rewriter::Optimize(node)) {
- return Handle<JSFunction>::null();
- }
-
- code = MakeCode(node, script_, false);
-
- // Check for stack-overflow exception.
- if (code.is_null()) {
- SetStackOverflow();
- return Handle<JSFunction>::null();
- }
-
- // Function compilation complete.
- LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *node->name()));
-
-#ifdef ENABLE_OPROFILE_AGENT
- OProfileAgent::CreateNativeCodeRegion(*node->name(),
- code->instruction_start(),
- code->instruction_size());
-#endif
- }
-
- // Create a boilerplate function.
- Handle<JSFunction> function =
- Factory::NewFunctionBoilerplate(node->name(),
- node->materialized_literal_count(),
- code);
- CodeGenerator::SetFunctionInfo(function, node, false, script_);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger that a new function has been added.
- Debugger::OnNewFunction(function);
-#endif
-
- // Set the expected number of properties for instances and return
- // the resulting function.
- SetExpectedNofPropertiesFromEstimate(function,
- node->expected_property_count());
- return function;
-}
-
-
Handle<Code> CodeGenerator::ComputeCallInitialize(
int argc,
InLoopFlag in_loop) {
@@ -398,7 +307,8 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
array->set_undefined(j++);
}
} else {
- Handle<JSFunction> function = BuildBoilerplate(node->fun());
+ Handle<JSFunction> function =
+ Compiler::BuildBoilerplate(node->fun(), script(), this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
array->set(j++, *function);
@@ -433,7 +343,10 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateLog, "_Log"},
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
{&CodeGenerator::GenerateMathSin, "_Math_sin"},
- {&CodeGenerator::GenerateMathCos, "_Math_cos"}
+ {&CodeGenerator::GenerateMathCos, "_Math_cos"},
+ {&CodeGenerator::GenerateIsObject, "_IsObject"},
+ {&CodeGenerator::GenerateIsFunction, "_IsFunction"},
+ {&CodeGenerator::GenerateStringAdd, "_StringAdd"},
};
@@ -521,6 +434,9 @@ void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos());
}
+void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) {
+ if (FLAG_debug_info) RecordPositions(masm(), stmt->condition_position());
+}
void CodeGenerator::CodeForSourcePosition(int pos) {
if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
@@ -551,4 +467,20 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
}
+bool ApiGetterEntryStub::GetCustomCache(Code** code_out) {
+ Object* cache = info()->load_stub_cache();
+ if (cache->IsUndefined()) {
+ return false;
+ } else {
+ *code_out = Code::cast(cache);
+ return true;
+ }
+}
+
+
+void ApiGetterEntryStub::SetCustomCache(Code* value) {
+ info()->set_load_stub_cache(value);
+}
+
+
} } // namespace v8::internal
diff --git a/src/codegen.h b/src/codegen.h
index 8c1b7336..85a08d59 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -38,9 +38,9 @@
// MakeCode
// MakeCodePrologue
// MakeCodeEpilogue
-// SetFunctionInfo
// masm
// frame
+// script
// has_valid_frame
// SetFrame
// DeleteFrame
@@ -69,6 +69,7 @@
// CodeForFunctionPosition
// CodeForReturnPosition
// CodeForStatementPosition
+// CodeForDoWhileConditionPosition
// CodeForSourcePosition
@@ -301,7 +302,7 @@ class CEntryStub : public CodeStub {
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm);
@@ -320,6 +321,32 @@ class CEntryStub : public CodeStub {
};
+class ApiGetterEntryStub : public CodeStub {
+ public:
+ ApiGetterEntryStub(Handle<AccessorInfo> info,
+ ApiFunction* fun)
+ : info_(info),
+ fun_(fun) { }
+ void Generate(MacroAssembler* masm);
+ virtual bool has_custom_cache() { return true; }
+ virtual bool GetCustomCache(Code** code_out);
+ virtual void SetCustomCache(Code* value);
+
+ static const int kStackSpace = 6;
+ static const int kArgc = 4;
+ private:
+ Handle<AccessorInfo> info() { return info_; }
+ ApiFunction* fun() { return fun_; }
+ Major MajorKey() { return NoCache; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "ApiEntryStub"; }
+ // The accessor info associated with the function.
+ Handle<AccessorInfo> info_;
+ // The function to be called.
+ ApiFunction* fun_;
+};
+
+
class CEntryDebugBreakStub : public CEntryStub {
public:
CEntryDebugBreakStub() : CEntryStub(1) { }
diff --git a/src/compiler.cc b/src/compiler.cc
index e422bf79..48da63df 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -46,11 +46,23 @@ class CodeGenSelector: public AstVisitor {
public:
enum CodeGenTag { NORMAL, FAST };
- CodeGenSelector() : has_supported_syntax_(true) {}
+ CodeGenSelector()
+ : has_supported_syntax_(true),
+ context_(Expression::kUninitialized) {
+ }
CodeGenTag Select(FunctionLiteral* fun);
private:
+ // Visit an expression in a given expression context.
+ void ProcessExpression(Expression* expr, Expression::Context context) {
+ Expression::Context saved = context_;
+ context_ = context;
+ Visit(expr);
+ expr->set_context(context);
+ context_ = saved;
+ }
+
void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts);
@@ -61,6 +73,9 @@ class CodeGenSelector: public AstVisitor {
bool has_supported_syntax_;
+ // The desired expression context of the currently visited expression.
+ Expression::Context context_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
};
@@ -68,7 +83,8 @@ class CodeGenSelector: public AstVisitor {
static Handle<Code> MakeCode(FunctionLiteral* literal,
Handle<Script> script,
Handle<Context> context,
- bool is_eval) {
+ bool is_eval,
+ Handle<SharedFunctionInfo> shared) {
ASSERT(literal != NULL);
// Rewrite the AST by introducing .result assignments where needed.
@@ -105,12 +121,24 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
// Generate code and return it.
if (FLAG_fast_compiler) {
- CodeGenSelector selector;
- CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
- if (code_gen == CodeGenSelector::FAST) {
- return FastCodeGenerator::MakeCode(literal, script, is_eval);
+ // If there is no shared function info, try the fast code
+ // generator for code in the global scope. Otherwise obey the
+ // explicit hint in the shared function info.
+ // If always_fast_compiler is true, always try the fast compiler.
+ if (shared.is_null() && !literal->scope()->is_global_scope() &&
+ !FLAG_always_fast_compiler) {
+ if (FLAG_trace_bailout) PrintF("Non-global scope\n");
+ } else if (!shared.is_null() && !shared->try_fast_codegen() &&
+ !FLAG_always_fast_compiler) {
+ if (FLAG_trace_bailout) PrintF("No hint to try fast\n");
+ } else {
+ CodeGenSelector selector;
+ CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
+ if (code_gen == CodeGenSelector::FAST) {
+ return FastCodeGenerator::MakeCode(literal, script, is_eval);
+ }
+ ASSERT(code_gen == CodeGenSelector::NORMAL);
}
- ASSERT(code_gen == CodeGenSelector::NORMAL);
}
return CodeGenerator::MakeCode(literal, script, is_eval);
}
@@ -151,8 +179,10 @@ static Handle<JSFunction> MakeFunction(bool is_global,
// called.
if (is_eval) {
JavaScriptFrameIterator it;
- script->set_eval_from_function(it.frame()->function());
- int offset = it.frame()->pc() - it.frame()->code()->instruction_start();
+ script->set_eval_from_shared(
+ JSFunction::cast(it.frame()->function())->shared());
+ int offset = static_cast<int>(
+ it.frame()->pc() - it.frame()->code()->instruction_start());
script->set_eval_from_instructions_offset(Smi::FromInt(offset));
}
}
@@ -195,7 +225,8 @@ static Handle<JSFunction> MakeFunction(bool is_global,
HistogramTimerScope timer(rate);
// Compile the code.
- Handle<Code> code = MakeCode(lit, script, context, is_eval);
+ Handle<Code> code = MakeCode(lit, script, context, is_eval,
+ Handle<SharedFunctionInfo>::null());
// Check for stack-overflow exceptions.
if (code.is_null()) {
@@ -232,7 +263,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
code);
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- CodeGenerator::SetFunctionInfo(fun, lit, true, script);
+ Compiler::SetFunctionInfo(fun, lit, true, script);
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
@@ -396,7 +427,8 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
HistogramTimerScope timer(&Counters::compile_lazy);
// Compile the code.
- Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false);
+ Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false,
+ shared);
// Check for stack-overflow exception.
if (code.is_null()) {
@@ -438,7 +470,6 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// Set the optimication hints after performing lazy compilation, as these are
// not set when the function is set up as a lazily compiled function.
shared->SetThisPropertyAssignmentsInfo(
- lit->has_only_this_property_assignments(),
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
@@ -448,18 +479,132 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
}
+Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
+ Handle<Script> script,
+ AstVisitor* caller) {
+#ifdef DEBUG
+ // We should not try to compile the same function literal more than
+ // once.
+ literal->mark_as_compiled();
+#endif
+
+ // Determine if the function can be lazily compiled. This is
+ // necessary to allow some of our builtin JS files to be lazily
+ // compiled. These builtins cannot be handled lazily by the parser,
+ // since we have to know if a function uses the special natives
+ // syntax, which is something the parser records.
+ bool allow_lazy = literal->AllowsLazyCompilation();
+
+ // Generate code
+ Handle<Code> code;
+ if (FLAG_lazy && allow_lazy) {
+ code = ComputeLazyCompile(literal->num_parameters());
+ } else {
+ // The bodies of function literals have not yet been visited by
+ // the AST optimizer/analyzer.
+ if (!Rewriter::Optimize(literal)) {
+ return Handle<JSFunction>::null();
+ }
+
+ // Generate code and return it.
+ bool is_compiled = false;
+ if (FLAG_fast_compiler && literal->try_fast_codegen()) {
+ CodeGenSelector selector;
+ CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
+ if (code_gen == CodeGenSelector::FAST) {
+ code = FastCodeGenerator::MakeCode(literal,
+ script,
+ false); // Not eval.
+ is_compiled = true;
+ }
+ }
+
+ if (!is_compiled) {
+ // We didn't try the fast compiler, or we failed to select it.
+ code = CodeGenerator::MakeCode(literal,
+ script,
+ false); // Not eval.
+ }
+
+ // Check for stack-overflow exception.
+ if (code.is_null()) {
+ caller->SetStackOverflow();
+ return Handle<JSFunction>::null();
+ }
+
+ // Function compilation complete.
+ LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name()));
+
+#ifdef ENABLE_OPROFILE_AGENT
+ OProfileAgent::CreateNativeCodeRegion(*node->name(),
+ code->instruction_start(),
+ code->instruction_size());
+#endif
+ }
+
+ // Create a boilerplate function.
+ Handle<JSFunction> function =
+ Factory::NewFunctionBoilerplate(literal->name(),
+ literal->materialized_literal_count(),
+ code);
+ SetFunctionInfo(function, literal, false, script);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Notify debugger that a new function has been added.
+ Debugger::OnNewFunction(function);
+#endif
+
+ // Set the expected number of properties for instances and return
+ // the resulting function.
+ SetExpectedNofPropertiesFromEstimate(function,
+ literal->expected_property_count());
+ return function;
+}
+
+
+// Sets the function info on a function.
+// The start_position points to the first '(' character after the function name
+// in the full script source. When counting characters in the script source the
+// the first character is number 0 (not 1).
+void Compiler::SetFunctionInfo(Handle<JSFunction> fun,
+ FunctionLiteral* lit,
+ bool is_toplevel,
+ Handle<Script> script) {
+ fun->shared()->set_length(lit->num_parameters());
+ fun->shared()->set_formal_parameter_count(lit->num_parameters());
+ fun->shared()->set_script(*script);
+ fun->shared()->set_function_token_position(lit->function_token_position());
+ fun->shared()->set_start_position(lit->start_position());
+ fun->shared()->set_end_position(lit->end_position());
+ fun->shared()->set_is_expression(lit->is_expression());
+ fun->shared()->set_is_toplevel(is_toplevel);
+ fun->shared()->set_inferred_name(*lit->inferred_name());
+ fun->shared()->SetThisPropertyAssignmentsInfo(
+ lit->has_only_simple_this_property_assignments(),
+ *lit->this_property_assignments());
+ fun->shared()->set_try_fast_codegen(lit->try_fast_codegen());
+}
+
+
CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
Scope* scope = fun->scope();
- if (!scope->is_global_scope()) {
- if (FLAG_trace_bailout) PrintF("Non-global scope\n");
- return NORMAL;
+ if (scope->num_heap_slots() > 0) {
+ // We support functions with a local context if they do not have
+ // parameters that need to be copied into the context.
+ for (int i = 0, len = scope->num_parameters(); i < len; i++) {
+ Slot* slot = scope->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ if (FLAG_trace_bailout) {
+ PrintF("Function has context-allocated parameters.\n");
+ }
+ return NORMAL;
+ }
+ }
}
- ASSERT(scope->num_heap_slots() == 0);
- ASSERT(scope->arguments() == NULL);
has_supported_syntax_ = true;
- VisitDeclarations(fun->scope()->declarations());
+ VisitDeclarations(scope->declarations());
if (!has_supported_syntax_) return NORMAL;
VisitStatements(fun->body());
@@ -500,9 +645,20 @@ void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
void CodeGenSelector::VisitDeclaration(Declaration* decl) {
- Variable* var = decl->proxy()->var();
- if (!var->is_global() || var->mode() == Variable::CONST) {
- BAILOUT("Non-global declaration");
+ Property* prop = decl->proxy()->AsProperty();
+ if (prop != NULL) {
+ // Property rewrites are shared, ensure we are not changing its
+ // expression context state.
+ ASSERT(prop->obj()->context() == Expression::kUninitialized ||
+ prop->obj()->context() == Expression::kValue);
+ ASSERT(prop->key()->context() == Expression::kUninitialized ||
+ prop->key()->context() == Expression::kValue);
+ ProcessExpression(prop->obj(), Expression::kValue);
+ ProcessExpression(prop->key(), Expression::kValue);
+ }
+
+ if (decl->fun() != NULL) {
+ ProcessExpression(decl->fun(), Expression::kValue);
}
}
@@ -513,10 +669,7 @@ void CodeGenSelector::VisitBlock(Block* stmt) {
void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
- Expression* expr = stmt->expression();
- Visit(expr);
- CHECK_BAILOUT;
- expr->set_location(Location::Nowhere());
+ ProcessExpression(stmt->expression(), Expression::kEffect);
}
@@ -526,7 +679,11 @@ void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {
void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
- BAILOUT("IfStatement");
+ ProcessExpression(stmt->condition(), Expression::kTest);
+ CHECK_BAILOUT;
+ Visit(stmt->then_statement());
+ CHECK_BAILOUT;
+ Visit(stmt->else_statement());
}
@@ -541,7 +698,7 @@ void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
- Visit(stmt->expression());
+ ProcessExpression(stmt->expression(), Expression::kValue);
}
@@ -561,17 +718,39 @@ void CodeGenSelector::VisitSwitchStatement(SwitchStatement* stmt) {
void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) {
- BAILOUT("DoWhileStatement");
+ // We do not handle loops with breaks or continue statements in their
+ // body. We will bailout when we hit those statements in the body.
+ ProcessExpression(stmt->cond(), Expression::kTest);
+ CHECK_BAILOUT;
+ Visit(stmt->body());
}
void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
- BAILOUT("WhileStatement");
+ // We do not handle loops with breaks or continue statements in their
+ // body. We will bailout when we hit those statements in the body.
+ ProcessExpression(stmt->cond(), Expression::kTest);
+ CHECK_BAILOUT;
+ Visit(stmt->body());
}
void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
- BAILOUT("ForStatement");
+ // We do not handle loops with breaks or continue statements in their
+ // body. We will bailout when we hit those statements in the body.
+ if (stmt->init() != NULL) {
+ Visit(stmt->init());
+ CHECK_BAILOUT;
+ }
+ if (stmt->cond() != NULL) {
+ ProcessExpression(stmt->cond(), Expression::kTest);
+ CHECK_BAILOUT;
+ }
+ Visit(stmt->body());
+ if (stmt->next() != NULL) {
+ CHECK_BAILOUT;
+ Visit(stmt->next());
+ }
}
@@ -591,14 +770,12 @@ void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {
- BAILOUT("DebuggerStatement");
+ // Debugger statement is supported.
}
void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {
- if (!expr->AllowsLazyCompilation()) {
- BAILOUT("FunctionLiteral does not allow lazy compilation");
- }
+ // Function literal is supported.
}
@@ -609,37 +786,92 @@ void CodeGenSelector::VisitFunctionBoilerplateLiteral(
void CodeGenSelector::VisitConditional(Conditional* expr) {
- BAILOUT("Conditional");
+ ProcessExpression(expr->condition(), Expression::kTest);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->then_expression(), context_);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->else_expression(), context_);
}
void CodeGenSelector::VisitSlot(Slot* expr) {
- Slot::Type type = expr->type();
- if (type != Slot::PARAMETER && type != Slot::LOCAL) {
- BAILOUT("non-parameter/non-local slot reference");
- }
+ UNREACHABLE();
}
void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
Expression* rewrite = expr->var()->rewrite();
- if (rewrite != NULL) Visit(rewrite);
+ // A rewrite of NULL indicates a global variable.
+ if (rewrite != NULL) {
+ // Non-global.
+ Slot* slot = rewrite->AsSlot();
+ if (slot != NULL) {
+ Slot::Type type = slot->type();
+ // When LOOKUP slots are enabled, some currently dead code
+ // implementing unary typeof will become live.
+ if (type == Slot::LOOKUP) {
+ BAILOUT("Lookup slot");
+ }
+ } else {
+#ifdef DEBUG
+ // Only remaining possibility is a property where the object is
+ // a slotted variable and the key is a smi.
+ Property* property = rewrite->AsProperty();
+ ASSERT_NOT_NULL(property);
+ Variable* object = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object);
+ ASSERT_NOT_NULL(object->slot());
+ ASSERT_NOT_NULL(property->key()->AsLiteral());
+ ASSERT(property->key()->AsLiteral()->handle()->IsSmi());
+#endif
+ }
+ }
}
void CodeGenSelector::VisitLiteral(Literal* expr) {
- // All literals are supported.
- expr->set_location(Location::Constant());
+ /* Nothing to do. */
}
void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {
- // RegexpLiterals are supported.
+ /* Nothing to do. */
}
void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
- BAILOUT("ObjectLiteral");
+ ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+
+ for (int i = 0, len = properties->length(); i < len; i++) {
+ ObjectLiteral::Property* property = properties->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+
+ // For (non-compile-time) materialized literals and computed
+ // properties with symbolic keys we will use an IC and therefore not
+ // generate code for the key.
+ case ObjectLiteral::Property::COMPUTED: // Fall through.
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ if (property->key()->handle()->IsSymbol()) {
+ break;
+ }
+ // Fall through.
+
+ // In all other cases we need the key's value on the stack
+ // for a runtime call. (Relies on TEMP meaning STACK.)
+ case ObjectLiteral::Property::GETTER: // Fall through.
+ case ObjectLiteral::Property::SETTER: // Fall through.
+ case ObjectLiteral::Property::PROTOTYPE:
+ ProcessExpression(property->key(), Expression::kValue);
+ CHECK_BAILOUT;
+ break;
+ }
+ ProcessExpression(property->value(), Expression::kValue);
+ CHECK_BAILOUT;
+ }
}
@@ -649,7 +881,7 @@ void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
Expression* subexpr = subexprs->at(i);
if (subexpr->AsLiteral() != NULL) continue;
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
- Visit(subexpr);
+ ProcessExpression(subexpr, Expression::kValue);
CHECK_BAILOUT;
}
}
@@ -661,10 +893,8 @@ void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
void CodeGenSelector::VisitAssignment(Assignment* expr) {
- // We support plain non-compound assignments to parameters and
- // non-context (stack-allocated) locals.
- if (expr->starts_initialization_block()) BAILOUT("initialization block");
-
+ // We support plain non-compound assignments to properties, parameters and
+ // non-context (stack-allocated) locals, and global variables.
Token::Value op = expr->op();
if (op == Token::INIT_CONST) BAILOUT("initialize constant");
if (op != Token::ASSIGN && op != Token::INIT_VAR) {
@@ -672,17 +902,41 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
}
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- if (var == NULL) BAILOUT("non-variable assignment");
-
- if (!var->is_global()) {
- ASSERT(var->slot() != NULL);
- Slot::Type type = var->slot()->type();
- if (type != Slot::PARAMETER && type != Slot::LOCAL) {
- BAILOUT("non-parameter/non-local slot assignment");
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ // All global variables are supported.
+ if (!var->is_global()) {
+ ASSERT(var->slot() != NULL);
+ Slot::Type type = var->slot()->type();
+ if (type == Slot::LOOKUP) {
+ BAILOUT("Lookup slot");
+ }
+ }
+ } else if (prop != NULL) {
+ ASSERT(prop->obj()->context() == Expression::kUninitialized ||
+ prop->obj()->context() == Expression::kValue);
+ ProcessExpression(prop->obj(), Expression::kValue);
+ CHECK_BAILOUT;
+ // We will only visit the key during code generation for keyed property
+ // stores. Leave its expression context uninitialized for named
+ // property stores.
+ Literal* lit = prop->key()->AsLiteral();
+ uint32_t ignored;
+ if (lit == NULL ||
+ !lit->handle()->IsSymbol() ||
+ String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) {
+ ASSERT(prop->key()->context() == Expression::kUninitialized ||
+ prop->key()->context() == Expression::kValue);
+ ProcessExpression(prop->key(), Expression::kValue);
+ CHECK_BAILOUT;
}
+ } else {
+ // This is a throw reference error.
+ BAILOUT("non-variable/non-property assignment");
}
- Visit(expr->value());
+ ProcessExpression(expr->value(), Expression::kValue);
}
@@ -692,7 +946,9 @@ void CodeGenSelector::VisitThrow(Throw* expr) {
void CodeGenSelector::VisitProperty(Property* expr) {
- BAILOUT("Property");
+ ProcessExpression(expr->obj(), Expression::kValue);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->key(), Expression::kValue);
}
@@ -703,58 +959,154 @@ void CodeGenSelector::VisitCall(Call* expr) {
// Check for supported calls
if (var != NULL && var->is_possibly_eval()) {
- BAILOUT("Call to a function named 'eval'");
+ BAILOUT("call to the identifier 'eval'");
} else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
+ // Calls to global variables are supported.
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ BAILOUT("call to a lookup slot");
+ } else if (fun->AsProperty() != NULL) {
+ Property* prop = fun->AsProperty();
+ Literal* literal_key = prop->key()->AsLiteral();
+ if (literal_key != NULL && literal_key->handle()->IsSymbol()) {
+ ProcessExpression(prop->obj(), Expression::kValue);
+ CHECK_BAILOUT;
+ } else {
+ ProcessExpression(prop->obj(), Expression::kValue);
+ CHECK_BAILOUT;
+ ProcessExpression(prop->key(), Expression::kValue);
+ CHECK_BAILOUT;
+ }
} else {
- BAILOUT("Call to a non-global function");
+ // Otherwise the call is supported if the function expression is.
+ ProcessExpression(fun, Expression::kValue);
}
- // Check all arguments to the call
+ // Check all arguments to the call.
for (int i = 0; i < args->length(); i++) {
- Visit(args->at(i));
+ ProcessExpression(args->at(i), Expression::kValue);
CHECK_BAILOUT;
}
}
void CodeGenSelector::VisitCallNew(CallNew* expr) {
- BAILOUT("CallNew");
+ ProcessExpression(expr->expression(), Expression::kValue);
+ CHECK_BAILOUT;
+ ZoneList<Expression*>* args = expr->arguments();
+ // Check all arguments to the call
+ for (int i = 0; i < args->length(); i++) {
+ ProcessExpression(args->at(i), Expression::kValue);
+ CHECK_BAILOUT;
+ }
}
void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
- // In case of JS runtime function bail out.
- if (expr->function() == NULL) BAILOUT("CallRuntime");
// Check for inline runtime call
if (expr->name()->Get(0) == '_' &&
CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
- BAILOUT("InlineRuntimeCall");
+ BAILOUT("inlined runtime call");
}
+ // Check all arguments to the call. (Relies on TEMP meaning STACK.)
for (int i = 0; i < expr->arguments()->length(); i++) {
- Visit(expr->arguments()->at(i));
+ ProcessExpression(expr->arguments()->at(i), Expression::kValue);
CHECK_BAILOUT;
}
}
void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
- BAILOUT("UnaryOperation");
+ switch (expr->op()) {
+ case Token::VOID:
+ ProcessExpression(expr->expression(), Expression::kEffect);
+ break;
+ case Token::NOT:
+ ProcessExpression(expr->expression(), Expression::kTest);
+ break;
+ case Token::TYPEOF:
+ ProcessExpression(expr->expression(), Expression::kValue);
+ break;
+ default:
+ BAILOUT("UnaryOperation");
+ }
}
void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
- BAILOUT("CountOperation");
+ // We support postfix count operations on global variables.
+ if (expr->is_prefix()) BAILOUT("Prefix CountOperation");
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ if (var == NULL || !var->is_global()) BAILOUT("non-global postincrement");
+ ProcessExpression(expr->expression(), Expression::kValue);
}
void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
+ case Token::COMMA:
+ ProcessExpression(expr->left(), Expression::kEffect);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->right(), context_);
+ break;
+
case Token::OR:
- Visit(expr->left());
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kTestValue:
+ // The left subexpression's value is not needed, it is in a pure
+ // test context.
+ ProcessExpression(expr->left(), Expression::kTest);
+ break;
+ case Expression::kValue: // Fall through.
+ case Expression::kValueTest:
+ // The left subexpression's value is needed, it is in a hybrid
+ // value/test context.
+ ProcessExpression(expr->left(), Expression::kValueTest);
+ break;
+ }
CHECK_BAILOUT;
- Visit(expr->right());
+ ProcessExpression(expr->right(), context_);
+ break;
+
+ case Token::AND:
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest:
+ // The left subexpression's value is not needed, it is in a pure
+ // test context.
+ ProcessExpression(expr->left(), Expression::kTest);
+ break;
+ case Expression::kValue: // Fall through.
+ case Expression::kTestValue:
+ // The left subexpression's value is needed, it is in a hybrid
+ // test/value context.
+ ProcessExpression(expr->left(), Expression::kTestValue);
+ break;
+ }
+ CHECK_BAILOUT;
+ ProcessExpression(expr->right(), context_);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ ProcessExpression(expr->left(), Expression::kValue);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->right(), Expression::kValue);
break;
default:
@@ -764,12 +1116,14 @@ void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
- BAILOUT("CompareOperation");
+ ProcessExpression(expr->left(), Expression::kValue);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->right(), Expression::kValue);
}
void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {
- BAILOUT("ThisFunction");
+ // ThisFunction is supported.
}
#undef BAILOUT
diff --git a/src/compiler.h b/src/compiler.h
index 579970b3..546e446b 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -71,6 +71,19 @@ class Compiler : public AllStatic {
// true on success and false if the compilation resulted in a stack
// overflow.
static bool CompileLazy(Handle<SharedFunctionInfo> shared, int loop_nesting);
+
+ // Compile a function boilerplate object (the function is possibly
+ // lazily compiled). Called recursively from a backend code
+ // generator 'caller' to build the boilerplate.
+ static Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node,
+ Handle<Script> script,
+ AstVisitor* caller);
+
+ // Set the function info for a newly compiled function.
+ static void SetFunctionInfo(Handle<JSFunction> fun,
+ FunctionLiteral* lit,
+ bool is_toplevel,
+ Handle<Script> script);
};
diff --git a/src/conversions.cc b/src/conversions.cc
index 3e66d286..fd6d38d8 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -50,7 +50,7 @@ int HexValue(uc32 c) {
// Provide a common interface to getting a character at a certain
// index from a char* or a String object.
static inline int GetChar(const char* str, int index) {
- ASSERT(index >= 0 && index < static_cast<int>(strlen(str)));
+ ASSERT(index >= 0 && index < StrLength(str));
return str[index];
}
@@ -61,7 +61,7 @@ static inline int GetChar(String* str, int index) {
static inline int GetLength(const char* str) {
- return strlen(str);
+ return StrLength(str);
}
@@ -101,7 +101,7 @@ static inline void ReleaseCString(String* original, const char* str) {
static inline bool IsSpace(const char* str, int index) {
- ASSERT(index >= 0 && index < static_cast<int>(strlen(str)));
+ ASSERT(index >= 0 && index < StrLength(str));
return Scanner::kIsWhiteSpace.get(str[index]);
}
@@ -121,13 +121,13 @@ static inline bool SubStringEquals(const char* str,
static inline bool SubStringEquals(String* str, int index, const char* other) {
HandleScope scope;
int str_length = str->length();
- int other_length = strlen(other);
+ int other_length = StrLength(other);
int end = index + other_length < str_length ?
index + other_length :
str_length;
- Handle<String> slice =
- Factory::NewStringSlice(Handle<String>(str), index, end);
- return slice->IsEqualTo(Vector<const char>(other, other_length));
+ Handle<String> substring =
+ Factory::NewSubString(Handle<String>(str), index, end);
+ return substring->IsEqualTo(Vector<const char>(other, other_length));
}
@@ -319,7 +319,7 @@ static double InternalStringToDouble(S* str,
ReleaseCString(str, cstr);
if (result != 0.0 || end != cstr) {
// It appears that strtod worked
- index += end - cstr;
+ index += static_cast<int>(end - cstr);
} else {
// Check for {+,-,}Infinity
bool is_negative = (GetChar(str, index) == '-');
@@ -383,7 +383,7 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
int sign;
char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
- int length = strlen(decimal_rep);
+ int length = StrLength(decimal_rep);
if (sign) builder.AddCharacter('-');
@@ -465,7 +465,7 @@ char* DoubleToFixedCString(double value, int f) {
int decimal_point;
int sign;
char* decimal_rep = dtoa(abs_value, 3, f, &decimal_point, &sign, NULL);
- int decimal_rep_length = strlen(decimal_rep);
+ int decimal_rep_length = StrLength(decimal_rep);
// Create a representation that is padded with zeros if needed.
int zero_prefix_length = 0;
@@ -526,7 +526,8 @@ static char* CreateExponentialRepresentation(char* decimal_rep,
if (significant_digits != 1) {
builder.AddCharacter('.');
builder.AddString(decimal_rep + 1);
- builder.AddPadding('0', significant_digits - strlen(decimal_rep));
+ int rep_length = StrLength(decimal_rep);
+ builder.AddPadding('0', significant_digits - rep_length);
}
builder.AddCharacter('e');
@@ -553,11 +554,11 @@ char* DoubleToExponentialCString(double value, int f) {
char* decimal_rep = NULL;
if (f == -1) {
decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
- f = strlen(decimal_rep) - 1;
+ f = StrLength(decimal_rep) - 1;
} else {
decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
}
- int decimal_rep_length = strlen(decimal_rep);
+ int decimal_rep_length = StrLength(decimal_rep);
ASSERT(decimal_rep_length > 0);
ASSERT(decimal_rep_length <= f + 1);
USE(decimal_rep_length);
@@ -585,7 +586,7 @@ char* DoubleToPrecisionCString(double value, int p) {
int decimal_point;
int sign;
char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
- int decimal_rep_length = strlen(decimal_rep);
+ int decimal_rep_length = StrLength(decimal_rep);
ASSERT(decimal_rep_length <= p);
int exponent = decimal_point - 1;
@@ -619,7 +620,7 @@ char* DoubleToPrecisionCString(double value, int p) {
builder.AddCharacter('.');
const int extra = negative ? 2 : 1;
if (decimal_rep_length > decimal_point) {
- const int len = strlen(decimal_rep + decimal_point);
+ const int len = StrLength(decimal_rep + decimal_point);
const int n = Min(len, p - (builder.position() - extra));
builder.AddSubstring(decimal_rep + decimal_point, n);
}
diff --git a/src/d8.cc b/src/d8.cc
index e4658b1c..dedbd55b 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -159,7 +159,11 @@ Handle<Value> Shell::Write(const Arguments& args) {
printf(" ");
}
v8::String::Utf8Value str(args[i]);
- fwrite(*str, sizeof(**str), str.length(), stdout);
+ int n = fwrite(*str, sizeof(**str), str.length(), stdout);
+ if (n != str.length()) {
+ printf("Error in fwrite\n");
+ exit(1);
+ }
}
return Undefined();
}
@@ -203,7 +207,7 @@ Handle<Value> Shell::Load(const Arguments& args) {
return ThrowException(String::New("Error loading file"));
}
if (!ExecuteString(source, String::New(*file), false, false)) {
- return ThrowException(String::New("Error executing file"));
+ return ThrowException(String::New("Error executing file"));
}
}
return Undefined();
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 9d5cace0..07013825 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -105,7 +105,7 @@ void DebuggerAgent::CreateSession(Socket* client) {
if (session_ != NULL) {
static const char* message = "Remote debugging session already active\r\n";
- client->Send(message, strlen(message));
+ client->Send(message, StrLength(message));
delete client;
return;
}
@@ -172,14 +172,15 @@ void DebuggerAgentSession::Run() {
}
// Convert UTF-8 to UTF-16.
- unibrow::Utf8InputBuffer<> buf(*message, strlen(*message));
+ unibrow::Utf8InputBuffer<> buf(*message,
+ StrLength(*message));
int len = 0;
while (buf.has_more()) {
buf.GetNext();
len++;
}
int16_t* temp = NewArray<int16_t>(len + 1);
- buf.Reset(*message, strlen(*message));
+ buf.Reset(*message, StrLength(*message));
for (int i = 0; i < len; i++) {
temp[i] = buf.GetNext();
}
@@ -203,7 +204,8 @@ void DebuggerAgentSession::Shutdown() {
const char* DebuggerAgentUtil::kContentLength = "Content-Length";
-int DebuggerAgentUtil::kContentLengthSize = strlen(kContentLength);
+int DebuggerAgentUtil::kContentLengthSize =
+ StrLength(kContentLength);
SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
diff --git a/src/debug-delay.js b/src/debug-delay.js
index 35f7fcd7..04fde1f9 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -1245,6 +1245,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
this.suspendRequest_(request, response);
} else if (request.command == 'version') {
this.versionRequest_(request, response);
+ } else if (request.command == 'profile') {
+ this.profileRequest_(request, response);
} else {
throw new Error('Unknown command "' + request.command + '" in request');
}
@@ -1924,6 +1926,25 @@ DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
};
+DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
+ if (!request.arguments) {
+ return response.failed('Missing arguments');
+ }
+ var modules = parseInt(request.arguments.modules);
+ if (isNaN(modules)) {
+ return response.failed('Modules is not an integer');
+ }
+ if (request.arguments.command == 'resume') {
+ %ProfilerResume(modules);
+ } else if (request.arguments.command == 'pause') {
+ %ProfilerPause(modules);
+ } else {
+ return response.failed('Unknown command');
+ }
+ response.body = {};
+};
+
+
// Check whether the previously processed command caused the VM to become
// running.
DebugCommandProcessor.prototype.isRunning = function() {
diff --git a/src/debug.cc b/src/debug.cc
index d3a6b5b1..2c4552ef 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -108,12 +108,13 @@ void BreakLocationIterator::Next() {
// current value of these.
if (RelocInfo::IsPosition(rmode())) {
if (RelocInfo::IsStatementPosition(rmode())) {
- statement_position_ =
- rinfo()->data() - debug_info_->shared()->start_position();
+ statement_position_ = static_cast<int>(
+ rinfo()->data() - debug_info_->shared()->start_position());
}
// Always update the position as we don't want that to be before the
// statement position.
- position_ = rinfo()->data() - debug_info_->shared()->start_position();
+ position_ = static_cast<int>(
+ rinfo()->data() - debug_info_->shared()->start_position());
ASSERT(position_ >= 0);
ASSERT(statement_position_ >= 0);
}
@@ -182,7 +183,7 @@ void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
// Check if this break point is closer that what was previously found.
if (this->pc() < pc && pc - this->pc() < distance) {
closest_break_point = break_point();
- distance = pc - this->pc();
+ distance = static_cast<int>(pc - this->pc());
// Check whether we can't get any closer.
if (distance == 0) break;
}
@@ -1758,6 +1759,8 @@ bool Debugger::never_unload_debugger_ = false;
v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
bool Debugger::debugger_unload_pending_ = false;
v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
+v8::Debug::DebugMessageDispatchHandler
+ Debugger::debug_message_dispatch_handler_ = NULL;
int Debugger::host_dispatch_micros_ = 100 * 1000;
DebuggerAgent* Debugger::agent_ = NULL;
LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
@@ -2398,6 +2401,12 @@ void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
}
+void Debugger::SetDebugMessageDispatchHandler(
+ v8::Debug::DebugMessageDispatchHandler handler) {
+ debug_message_dispatch_handler_ = handler;
+}
+
+
// Calls the registered debug message handler. This callback is part of the
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
@@ -2428,6 +2437,10 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
if (!Debug::InDebugger()) {
StackGuard::DebugCommand();
}
+
+ if (Debugger::debug_message_dispatch_handler_ != NULL) {
+ Debugger::debug_message_dispatch_handler_();
+ }
}
diff --git a/src/debug.h b/src/debug.h
index 29c2bc20..24f0db41 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -102,7 +102,9 @@ class BreakLocationIterator {
void ClearAllDebugBreak();
- inline int code_position() { return pc() - debug_info_->code()->entry(); }
+ inline int code_position() {
+ return static_cast<int>(pc() - debug_info_->code()->entry());
+ }
inline int break_point() { return break_point_; }
inline int position() { return position_; }
inline int statement_position() { return statement_position_; }
@@ -368,15 +370,6 @@ class Debug {
// Garbage collection notifications.
static void AfterGarbageCollection();
- // Code generation assumptions.
- static const int kIa32CallInstructionLength = 5;
- static const int kIa32JSReturnSequenceLength = 6;
-
- // The x64 JS return sequence is padded with int3 to make it large
- // enough to hold a call instruction when the debugger patches it.
- static const int kX64CallInstructionLength = 13;
- static const int kX64JSReturnSequenceLength = 13;
-
// Code generator routines.
static void GenerateLoadICDebugBreak(MacroAssembler* masm);
static void GenerateStoreICDebugBreak(MacroAssembler* masm);
@@ -625,6 +618,8 @@ class Debugger {
static void SetMessageHandler(v8::Debug::MessageHandler2 handler);
static void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period);
+ static void SetDebugMessageDispatchHandler(
+ v8::Debug::DebugMessageDispatchHandler handler);
// Invoke the message handler function.
static void InvokeMessageHandler(MessageImpl message);
@@ -685,6 +680,7 @@ class Debugger {
static v8::Debug::MessageHandler2 message_handler_;
static bool debugger_unload_pending_; // Was message handler cleared?
static v8::Debug::HostDispatchHandler host_dispatch_handler_;
+ static v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
static int host_dispatch_micros_;
static DebuggerAgent* agent_;
diff --git a/src/disassembler.cc b/src/disassembler.cc
index e2f908d7..524dbe67 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -74,7 +74,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
}
if (code_ != NULL) {
- int offs = pc - code_->instruction_start();
+ int offs = static_cast<int>(pc - code_->instruction_start());
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
OS::SNPrintF(buffer, "%d (%p)", offs, pc);
@@ -289,7 +289,7 @@ static int DecodeIt(FILE* f,
}
delete it;
- return pc - begin;
+ return static_cast<int>(pc - begin);
}
diff --git a/src/dtoa-config.c b/src/dtoa-config.c
index bc0a58a1..a1acd2dd 100644
--- a/src/dtoa-config.c
+++ b/src/dtoa-config.c
@@ -38,7 +38,7 @@
*/
#if !(defined(__APPLE__) && defined(__MACH__)) && \
- !defined(WIN32) && !defined(__FreeBSD__)
+ !defined(WIN32) && !defined(__FreeBSD__) && !defined(__OpenBSD__)
#include <endian.h>
#endif
#include <math.h>
@@ -47,14 +47,16 @@
/* The floating point word order on ARM is big endian when floating point
* emulation is used, even if the byte order is little endian */
#if !(defined(__APPLE__) && defined(__MACH__)) && !defined(WIN32) && \
- !defined(__FreeBSD__) && __FLOAT_WORD_ORDER == __BIG_ENDIAN
+ !defined(__FreeBSD__) && !defined(__OpenBSD__) && \
+ __FLOAT_WORD_ORDER == __BIG_ENDIAN
#define IEEE_MC68k
#else
#define IEEE_8087
#endif
#define __MATH_H__
-#if defined(__APPLE__) && defined(__MACH__) || defined(__FreeBSD__)
+#if defined(__APPLE__) && defined(__MACH__) || defined(__FreeBSD__) || \
+ defined(__OpenBSD__)
/* stdlib.h on FreeBSD and Apple's 10.5 and later SDKs will mangle the
* name of strtod. If it's included after strtod is redefined as
* gay_strtod, it will mangle the name of gay_strtod, which is
diff --git a/src/execution.cc b/src/execution.cc
index 229b8df9..2f646a56 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -31,18 +31,8 @@
#include "api.h"
#include "codegen-inl.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/simulator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/simulator-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/simulator-arm.h"
-#else
-#error Unsupported target architecture.
-#endif
-
#include "debug.h"
+#include "simulator.h"
#include "v8threads.h"
namespace v8 {
@@ -237,15 +227,14 @@ void StackGuard::SetStackLimit(uintptr_t limit) {
// If the current limits are special (eg due to a pending interrupt) then
// leave them alone.
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
- if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
+ if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
thread_local_.jslimit_ = jslimit;
- Heap::SetStackLimit(jslimit);
}
- if (thread_local_.climit_ == thread_local_.initial_climit_) {
+ if (thread_local_.climit_ == thread_local_.real_climit_) {
thread_local_.climit_ = limit;
}
- thread_local_.initial_climit_ = limit;
- thread_local_.initial_jslimit_ = jslimit;
+ thread_local_.real_climit_ = limit;
+ thread_local_.real_jslimit_ = jslimit;
}
@@ -354,7 +343,7 @@ char* StackGuard::ArchiveStackGuard(char* to) {
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access;
memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- Heap::SetStackLimit(thread_local_.jslimit_);
+ Heap::SetStackLimits();
return from + sizeof(ThreadLocal);
}
@@ -366,33 +355,33 @@ static internal::Thread::LocalStorageKey stack_limit_key =
void StackGuard::FreeThreadResources() {
Thread::SetThreadLocal(
stack_limit_key,
- reinterpret_cast<void*>(thread_local_.initial_climit_));
+ reinterpret_cast<void*>(thread_local_.real_climit_));
}
void StackGuard::ThreadLocal::Clear() {
- initial_jslimit_ = kIllegalLimit;
+ real_jslimit_ = kIllegalLimit;
jslimit_ = kIllegalLimit;
- initial_climit_ = kIllegalLimit;
+ real_climit_ = kIllegalLimit;
climit_ = kIllegalLimit;
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
interrupt_flags_ = 0;
- Heap::SetStackLimit(kIllegalLimit);
+ Heap::SetStackLimits();
}
void StackGuard::ThreadLocal::Initialize() {
- if (initial_climit_ == kIllegalLimit) {
+ if (real_climit_ == kIllegalLimit) {
// Takes the address of the limit variable in order to find out where
// the top of stack is right now.
uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
- initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+ real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
- initial_climit_ = limit;
+ real_climit_ = limit;
climit_ = limit;
- Heap::SetStackLimit(SimulatorStack::JsLimitFromCLimit(limit));
+ Heap::SetStackLimits();
}
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
diff --git a/src/execution.h b/src/execution.h
index ac00aa46..52198c42 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -150,10 +150,6 @@ class StackGuard : public AllStatic {
// is assumed to grow downwards.
static void SetStackLimit(uintptr_t limit);
- static Address address_of_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.jslimit_);
- }
-
// Threading support.
static char* ArchiveStackGuard(char* to);
static char* RestoreStackGuard(char* from);
@@ -181,16 +177,24 @@ class StackGuard : public AllStatic {
#endif
static void Continue(InterruptFlag after_what);
- // This provides an asynchronous read of the stack limit for the current
+ // This provides an asynchronous read of the stack limits for the current
// thread. There are no locks protecting this, but it is assumed that you
// have the global V8 lock if you are using multiple V8 threads.
static uintptr_t climit() {
return thread_local_.climit_;
}
-
static uintptr_t jslimit() {
return thread_local_.jslimit_;
}
+ static uintptr_t real_jslimit() {
+ return thread_local_.real_jslimit_;
+ }
+ static Address address_of_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.jslimit_);
+ }
+ static Address address_of_real_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
+ }
private:
// You should hold the ExecutionAccess lock when calling this method.
@@ -198,17 +202,17 @@ class StackGuard : public AllStatic {
// You should hold the ExecutionAccess lock when calling this method.
static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
- Heap::SetStackLimit(value);
thread_local_.jslimit_ = value;
thread_local_.climit_ = value;
+ Heap::SetStackLimits();
}
- // Reset limits to initial values. For example after handling interrupt.
+ // Reset limits to actual values. For example after handling interrupt.
// You should hold the ExecutionAccess lock when calling this method.
static void reset_limits(const ExecutionAccess& lock) {
- thread_local_.jslimit_ = thread_local_.initial_jslimit_;
- Heap::SetStackLimit(thread_local_.jslimit_);
- thread_local_.climit_ = thread_local_.initial_climit_;
+ thread_local_.jslimit_ = thread_local_.real_jslimit_;
+ thread_local_.climit_ = thread_local_.real_climit_;
+ Heap::SetStackLimits();
}
// Enable or disable interrupts.
@@ -232,10 +236,21 @@ class StackGuard : public AllStatic {
// Clear.
void Initialize();
void Clear();
- uintptr_t initial_jslimit_;
+
+ // The stack limit is split into a JavaScript and a C++ stack limit. These
+ // two are the same except when running on a simulator where the C++ and
+ // JavaScript stacks are separate. Each of the two stack limits have two
+ // values. The one eith the real_ prefix is the actual stack limit
+ // set for the VM. The one without the real_ prefix has the same value as
+ // the actual stack limit except when there is an interruption (e.g. debug
+ // break or preemption) in which case it is lowered to make stack checks
+ // fail. Both the generated code and the runtime system check against the
+ // one without the real_ prefix.
+ uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
uintptr_t jslimit_;
- uintptr_t initial_climit_;
+ uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
uintptr_t climit_;
+
int nesting_;
int postpone_interrupts_nesting_;
int interrupt_flags_;
diff --git a/src/factory.cc b/src/factory.cc
index 32b69db3..83775ef6 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -106,10 +106,10 @@ Handle<String> Factory::NewConsString(Handle<String> first,
}
-Handle<String> Factory::NewStringSlice(Handle<String> str,
- int begin,
- int end) {
- CALL_HEAP_FUNCTION(str->Slice(begin, end), String);
+Handle<String> Factory::NewSubString(Handle<String> str,
+ int begin,
+ int end) {
+ CALL_HEAP_FUNCTION(str->SubString(begin, end), String);
}
@@ -189,7 +189,7 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
script->set_wrapper(*wrapper);
script->set_line_ends(Heap::undefined_value());
- script->set_eval_from_function(Heap::undefined_value());
+ script->set_eval_from_shared(Heap::undefined_value());
script->set_eval_from_instructions_offset(Smi::FromInt(0));
return script;
diff --git a/src/factory.h b/src/factory.h
index cb438e95..951c0439 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -106,11 +106,10 @@ class Factory : public AllStatic {
static Handle<String> NewConsString(Handle<String> first,
Handle<String> second);
- // Create a new sliced string object which represents a substring of a
- // backing string.
- static Handle<String> NewStringSlice(Handle<String> str,
- int begin,
- int end);
+ // Create a new string object which holds a substring of a string.
+ static Handle<String> NewSubString(Handle<String> str,
+ int begin,
+ int end);
// Creates a new external String object. There are two String encodings
// in the system: ASCII and two byte. Unlike other String types, it does
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index d0c264a7..1bdc3671 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "fast-codegen.h"
#include "stub-cache.h"
#include "debug.h"
@@ -35,6 +36,8 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm_)
+
Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval) {
@@ -53,6 +56,7 @@ Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
int FastCodeGenerator::SlotOffset(Slot* slot) {
+ ASSERT(slot != NULL);
// Offset is negative because higher indexes are at lower addresses.
int offset = -slot->index() * kPointerSize;
// Adjust by a (parameter or local) base offset.
@@ -75,86 +79,52 @@ void FastCodeGenerator::VisitDeclarations(
int length = declarations->length();
int globals = 0;
for (int i = 0; i < length; i++) {
- Declaration* node = declarations->at(i);
- Variable* var = node->proxy()->var();
+ Declaration* decl = declarations->at(i);
+ Variable* var = decl->proxy()->var();
Slot* slot = var->slot();
// If it was not possible to allocate the variable at compile
// time, we need to "declare" it at runtime to make sure it
// actually exists in the local context.
if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
- UNREACHABLE();
+ VisitDeclaration(decl);
} else {
// Count global variables and functions for later processing
globals++;
}
}
- // Return in case of no declared global functions or variables.
- if (globals == 0) return;
-
// Compute array of global variable and function declarations.
- Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
- for (int j = 0, i = 0; i < length; i++) {
- Declaration* node = declarations->at(i);
- Variable* var = node->proxy()->var();
- Slot* slot = var->slot();
-
- if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
- array->set(j++, *(var->name()));
- if (node->fun() == NULL) {
- if (var->mode() == Variable::CONST) {
- // In case this is const property use the hole.
- array->set_the_hole(j++);
+ // Do nothing in case of no declared global functions or variables.
+ if (globals > 0) {
+ Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+ for (int j = 0, i = 0; i < length; i++) {
+ Declaration* decl = declarations->at(i);
+ Variable* var = decl->proxy()->var();
+ Slot* slot = var->slot();
+
+ if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
+ array->set(j++, *(var->name()));
+ if (decl->fun() == NULL) {
+ if (var->mode() == Variable::CONST) {
+ // In case this is const property use the hole.
+ array->set_the_hole(j++);
+ } else {
+ array->set_undefined(j++);
+ }
} else {
- array->set_undefined(j++);
+ Handle<JSFunction> function =
+ Compiler::BuildBoilerplate(decl->fun(), script_, this);
+ // Check for stack-overflow exception.
+ if (HasStackOverflow()) return;
+ array->set(j++, *function);
}
- } else {
- Handle<JSFunction> function = BuildBoilerplate(node->fun());
- // Check for stack-overflow exception.
- if (HasStackOverflow()) return;
- array->set(j++, *function);
}
}
+ // Invoke the platform-dependent code generator to do the actual
+ // declaration the global variables and functions.
+ DeclareGlobals(array);
}
-
- // Invoke the platform-dependent code generator to do the actual
- // declaration the global variables and functions.
- DeclareGlobals(array);
-}
-
-Handle<JSFunction> FastCodeGenerator::BuildBoilerplate(FunctionLiteral* fun) {
-#ifdef DEBUG
- // We should not try to compile the same function literal more than
- // once.
- fun->mark_as_compiled();
-#endif
-
- // Generate code
- Handle<Code> code = CodeGenerator::ComputeLazyCompile(fun->num_parameters());
- // Check for stack-overflow exception.
- if (code.is_null()) {
- SetStackOverflow();
- return Handle<JSFunction>::null();
- }
-
- // Create a boilerplate function.
- Handle<JSFunction> function =
- Factory::NewFunctionBoilerplate(fun->name(),
- fun->materialized_literal_count(),
- code);
- CodeGenerator::SetFunctionInfo(function, fun, false, script_);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger that a new function has been added.
- Debugger::OnNewFunction(function);
-#endif
-
- // Set the expected number of properties for instances and return
- // the resulting function.
- SetExpectedNofPropertiesFromEstimate(function,
- fun->expected_property_count());
- return function;
}
@@ -186,8 +156,91 @@ void FastCodeGenerator::SetSourcePosition(int pos) {
}
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
+void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
+#ifdef DEBUG
+ Expression::Context expected = Expression::kUninitialized;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through.
+ case Expression::kTest:
+ // The value of the left subexpression is not needed.
+ expected = Expression::kTest;
+ break;
+ case Expression::kValue:
+ // The value of the left subexpression is needed and its specific
+ // context depends on the operator.
+ expected = (expr->op() == Token::OR)
+ ? Expression::kValueTest
+ : Expression::kTestValue;
+ break;
+ case Expression::kValueTest:
+ // The value of the left subexpression is needed for OR.
+ expected = (expr->op() == Token::OR)
+ ? Expression::kValueTest
+ : Expression::kTest;
+ break;
+ case Expression::kTestValue:
+ // The value of the left subexpression is needed for AND.
+ expected = (expr->op() == Token::OR)
+ ? Expression::kTest
+ : Expression::kTestValue;
+ break;
+ }
+ ASSERT_EQ(expected, expr->left()->context());
+ ASSERT_EQ(expr->context(), expr->right()->context());
+#endif
+
+ Label eval_right, done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+
+ // Set up the appropriate context for the left subexpression based on the
+ // operation and our own context.
+ if (expr->op() == Token::OR) {
+ // If there is no usable true label in the OR expression's context, use
+ // the end of this expression, otherwise inherit the same true label.
+ if (expr->context() == Expression::kEffect ||
+ expr->context() == Expression::kValue) {
+ true_label_ = &done;
+ }
+ // The false label is the label of the second subexpression.
+ false_label_ = &eval_right;
+ } else {
+ ASSERT_EQ(Token::AND, expr->op());
+ // The true label is the label of the second subexpression.
+ true_label_ = &eval_right;
+ // If there is no usable false label in the AND expression's context,
+ // use the end of the expression, otherwise inherit the same false
+ // label.
+ if (expr->context() == Expression::kEffect ||
+ expr->context() == Expression::kValue) {
+ false_label_ = &done;
+ }
+ }
+
+ Visit(expr->left());
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+
+ __ bind(&eval_right);
+ Visit(expr->right());
+
+ __ bind(&done);
+}
+
+
+void FastCodeGenerator::VisitBlock(Block* stmt) {
+ Comment cmnt(masm_, "[ Block");
+ SetStatementPosition(stmt);
+ VisitStatements(stmt->statements());
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ SetStatementPosition(stmt);
+ Visit(stmt->expression());
}
@@ -198,7 +251,29 @@ void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ IfStatement");
+ // Expressions cannot recursively enter statements, there are no labels in
+ // the state.
+ ASSERT_EQ(NULL, true_label_);
+ ASSERT_EQ(NULL, false_label_);
+ Label then_part, else_part, done;
+
+ // Do not worry about optimizing for empty then or else bodies.
+ true_label_ = &then_part;
+ false_label_ = &else_part;
+ ASSERT(stmt->condition()->context() == Expression::kTest);
+ Visit(stmt->condition());
+ true_label_ = NULL;
+ false_label_ = NULL;
+
+ __ bind(&then_part);
+ Visit(stmt->then_statement());
+ __ jmp(&done);
+
+ __ bind(&else_part);
+ Visit(stmt->else_statement());
+
+ __ bind(&done);
}
@@ -228,17 +303,120 @@ void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ DoWhileStatement");
+ increment_loop_depth();
+ Label body, exit, stack_limit_hit, stack_check_success;
+
+ __ bind(&body);
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ // We are not in an expression context because we have been compiling
+ // statements. Set up a test expression context for the condition.
+ ASSERT_EQ(NULL, true_label_);
+ ASSERT_EQ(NULL, false_label_);
+ true_label_ = &body;
+ false_label_ = &exit;
+ ASSERT(stmt->cond()->context() == Expression::kTest);
+ Visit(stmt->cond());
+ true_label_ = NULL;
+ false_label_ = NULL;
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(&exit);
+
+ decrement_loop_depth();
}
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ WhileStatement");
+ increment_loop_depth();
+ Label test, body, exit, stack_limit_hit, stack_check_success;
+
+ // Emit the test at the bottom of the loop.
+ __ jmp(&test);
+
+ __ bind(&body);
+ Visit(stmt->body());
+
+ __ bind(&test);
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ // We are not in an expression context because we have been compiling
+ // statements. Set up a test expression context for the condition.
+ ASSERT_EQ(NULL, true_label_);
+ ASSERT_EQ(NULL, false_label_);
+ true_label_ = &body;
+ false_label_ = &exit;
+ ASSERT(stmt->cond()->context() == Expression::kTest);
+ Visit(stmt->cond());
+ true_label_ = NULL;
+ false_label_ = NULL;
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(&exit);
+
+ decrement_loop_depth();
}
void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ ForStatement");
+ Label test, body, exit, stack_limit_hit, stack_check_success;
+ if (stmt->init() != NULL) Visit(stmt->init());
+
+ increment_loop_depth();
+ // Emit the test at the bottom of the loop (even if empty).
+ __ jmp(&test);
+ __ bind(&body);
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ if (stmt->next() != NULL) Visit(stmt->next());
+
+ __ bind(&test);
+
+ if (stmt->cond() == NULL) {
+ // For an empty test jump to the top of the loop.
+ __ jmp(&body);
+ } else {
+ // We are not in an expression context because we have been compiling
+ // statements. Set up a test expression context for the condition.
+ ASSERT_EQ(NULL, true_label_);
+ ASSERT_EQ(NULL, false_label_);
+
+ true_label_ = &body;
+ false_label_ = &exit;
+ ASSERT(stmt->cond()->context() == Expression::kTest);
+ Visit(stmt->cond());
+ true_label_ = NULL;
+ false_label_ = NULL;
+ }
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(&exit);
+ decrement_loop_depth();
}
@@ -258,7 +436,12 @@ void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
- UNREACHABLE();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Comment cmnt(masm_, "[ DebuggerStatement");
+ SetStatementPosition(stmt);
+ __ CallRuntime(Runtime::kDebugBreak, 0);
+ // Ignore the return value.
+#endif
}
@@ -269,7 +452,37 @@ void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
void FastCodeGenerator::VisitConditional(Conditional* expr) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ Conditional");
+ ASSERT_EQ(Expression::kTest, expr->condition()->context());
+ ASSERT_EQ(expr->context(), expr->then_expression()->context());
+ ASSERT_EQ(expr->context(), expr->else_expression()->context());
+
+
+ Label true_case, false_case, done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+
+ true_label_ = &true_case;
+ false_label_ = &false_case;
+ Visit(expr->condition());
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+
+ __ bind(&true_case);
+ Visit(expr->then_expression());
+ // If control flow falls through Visit, jump to done.
+ if (expr->context() == Expression::kEffect ||
+ expr->context() == Expression::kValue) {
+ __ jmp(&done);
+ }
+
+ __ bind(&false_case);
+ Visit(expr->else_expression());
+ // If control flow falls through Visit, merge it with true case here.
+ if (expr->context() == Expression::kEffect ||
+ expr->context() == Expression::kValue) {
+ __ bind(&done);
+ }
}
@@ -280,53 +493,68 @@ void FastCodeGenerator::VisitSlot(Slot* expr) {
void FastCodeGenerator::VisitLiteral(Literal* expr) {
- // No code is emitted (here) for simple literals.
-}
-
-
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ Literal");
+ Move(expr->context(), expr);
}
-void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitThrow(Throw* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitProperty(Property* expr) {
- UNREACHABLE();
-}
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+ Comment cmnt(masm_, "[ Assignment");
+ ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
+ // Record source code position of the (possible) IC call.
+ SetSourcePosition(expr->position());
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
- UNREACHABLE();
-}
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->target()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type = (prop->key()->context() == Expression::kUninitialized)
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+ Expression* rhs = expr->value();
+ ASSERT_EQ(Expression::kValue, rhs->context());
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- UNREACHABLE();
+ switch (assign_type) {
+ case VARIABLE:
+ Visit(rhs);
+ EmitVariableAssignment(expr);
+ break;
+ case NAMED_PROPERTY:
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(rhs);
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(prop->key());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ Visit(rhs);
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
}
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
UNREACHABLE();
}
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FastCodeGenerator::VisitThrow(Throw* expr) {
UNREACHABLE();
}
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- UNREACHABLE();
-}
+#undef __
} } // namespace v8::internal
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
index 42d6cde9..9b262a73 100644
--- a/src/fast-codegen.h
+++ b/src/fast-codegen.h
@@ -39,7 +39,13 @@ namespace internal {
class FastCodeGenerator: public AstVisitor {
public:
FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
- : masm_(masm), function_(NULL), script_(script), is_eval_(is_eval) {
+ : masm_(masm),
+ function_(NULL),
+ script_(script),
+ is_eval_(is_eval),
+ loop_depth_(0),
+ true_label_(NULL),
+ false_label_(NULL) {
}
static Handle<Code> MakeCode(FunctionLiteral* fun,
@@ -50,25 +56,79 @@ class FastCodeGenerator: public AstVisitor {
private:
int SlotOffset(Slot* slot);
+ void Move(Expression::Context destination, Register source);
+ void Move(Expression::Context destination, Slot* source, Register scratch);
+ void Move(Expression::Context destination, Literal* source);
+ void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
+ void Move(Register dst, Slot* source);
+
+ // Templated to allow for Operand on intel and MemOperand on ARM.
+ template <typename MemoryLocation>
+ MemoryLocation CreateSlotOperand(Slot* slot, Register scratch);
+
+ // Drop the TOS, and store source to destination.
+ // If destination is TOS, just overwrite TOS with source.
+ void DropAndMove(Expression::Context destination,
+ Register source,
+ int drop_count = 1);
+
+ // Test the JavaScript value in source as if in a test context, compile
+ // control flow to a pair of labels.
+ void TestAndBranch(Register source, Label* true_label, Label* false_label);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
- Handle<JSFunction> BuildBoilerplate(FunctionLiteral* fun);
void DeclareGlobals(Handle<FixedArray> pairs);
+ // Platform-specific return sequence
+ void EmitReturnSequence(int position);
+
+ // Platform-specific code sequences for calls
+ void EmitCallWithStub(Call* expr);
+ void EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info);
+
+ // Platform-specific support for compiling assignments.
+
+ // Complete a variable assignment. The right-hand-side value is expected
+ // on top of the stack.
+ void EmitVariableAssignment(Assignment* expr);
+
+ // Complete a named property assignment. The receiver and right-hand-side
+ // value are expected on top of the stack.
+ void EmitNamedPropertyAssignment(Assignment* expr);
+
+ // Complete a keyed property assignment. The reciever, key, and
+ // right-hand-side value are expected on top of the stack.
+ void EmitKeyedPropertyAssignment(Assignment* expr);
+
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
void SetSourcePosition(int pos);
+ int loop_depth() { return loop_depth_; }
+ void increment_loop_depth() { loop_depth_++; }
+ void decrement_loop_depth() {
+ ASSERT(loop_depth_ > 0);
+ loop_depth_--;
+ }
+
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+ // Handles the shortcutted logical binary operations in VisitBinaryOperation.
+ void EmitLogicalOperation(BinaryOperation* expr);
+
MacroAssembler* masm_;
FunctionLiteral* function_;
Handle<Script> script_;
bool is_eval_;
+ Label return_label_;
+ int loop_depth_;
+
+ Label* true_label_;
+ Label* false_label_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 42c96b68..88fda123 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -114,6 +114,8 @@ DEFINE_bool(enable_rdtsc, true,
"enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
+DEFINE_bool(enable_vfp3, true,
+ "enable use of VFP3 instructions if available (ARM only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@@ -132,8 +134,6 @@ DEFINE_bool(stack_trace_on_abort, true,
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace, false, "trace function calls")
DEFINE_bool(defer_negation, true, "defer negation operation")
-DEFINE_bool(check_stack, true,
- "check stack for overflow, interrupt, breakpoint")
// codegen.cc
DEFINE_bool(lazy, true, "use lazy compilation")
@@ -147,6 +147,8 @@ DEFINE_bool(fast_compiler, true,
"use the fast-mode compiler for some top-level code")
DEFINE_bool(trace_bailout, false,
"print reasons for failing to use fast compilation")
+DEFINE_bool(always_fast_compiler, false,
+ "always try using the fast compiler")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -154,9 +156,9 @@ DEFINE_bool(compilation_cache, true, "enable compilation cache")
// debug.cc
DEFINE_bool(remote_debugging, false, "enable remote debugging")
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
-DEFINE_bool(debugger_auto_break, false,
+DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
- "in the queue (experimental)")
+ "in the queue")
// frames.cc
DEFINE_int(max_stack_trace_source_length, 300,
@@ -198,6 +200,7 @@ DEFINE_bool(canonicalize_object_literal_maps, true,
// mksnapshot.cc
DEFINE_bool(h, false, "print this message")
+DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
// parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
diff --git a/src/flags.cc b/src/flags.cc
index 5df3afd5..d444c976 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -303,8 +303,8 @@ static void SplitArgument(const char* arg,
// get the value if any
if (*arg == '=') {
// make a copy so we can NUL-terminate flag name
- int n = arg - *name;
- CHECK(n < buffer_size); // buffer is too small
+ size_t n = arg - *name;
+ CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small
memcpy(buffer, *name, n);
buffer[n] = '\0';
*name = buffer;
diff --git a/src/frames.cc b/src/frames.cc
index 5cd83324..7c327dd3 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -393,8 +393,19 @@ Code* EntryConstructFrame::code() const {
}
+Object*& ExitFrame::code_slot() const {
+ const int offset = ExitFrameConstants::kCodeOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
Code* ExitFrame::code() const {
- return Heap::c_entry_code();
+ Object* code = code_slot();
+ if (code->IsSmi()) {
+ return Heap::c_entry_debug_break_code();
+ } else {
+ return Code::cast(code);
+ }
}
@@ -412,11 +423,6 @@ Address ExitFrame::GetCallerStackPointer() const {
}
-Code* ExitDebugFrame::code() const {
- return Heap::c_entry_debug_break_code();
-}
-
-
Address StandardFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - n * kPointerSize;
@@ -430,7 +436,7 @@ int StandardFrame::ComputeExpressionsCount() const {
Address limit = sp();
ASSERT(base >= limit); // stack grows downwards
// Include register-allocated locals in number of expressions.
- return (base - limit) / kPointerSize;
+ return static_cast<int>((base - limit) / kPointerSize);
}
@@ -460,7 +466,7 @@ Object* JavaScriptFrame::GetParameter(int index) const {
int JavaScriptFrame::ComputeParametersCount() const {
Address base = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
- return (base - limit) / kPointerSize;
+ return static_cast<int>((base - limit) / kPointerSize);
}
diff --git a/src/frames.h b/src/frames.h
index 768196d3..024065ab 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -93,7 +93,6 @@ class StackHandler BASE_EMBEDDED {
V(ENTRY, EntryFrame) \
V(ENTRY_CONSTRUCT, EntryConstructFrame) \
V(EXIT, ExitFrame) \
- V(EXIT_DEBUG, ExitDebugFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \
V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \
@@ -119,7 +118,6 @@ class StackFrame BASE_EMBEDDED {
bool is_entry() const { return type() == ENTRY; }
bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
bool is_exit() const { return type() == EXIT; }
- bool is_exit_debug() const { return type() == EXIT_DEBUG; }
bool is_java_script() const { return type() == JAVA_SCRIPT; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_internal() const { return type() == INTERNAL; }
@@ -260,10 +258,13 @@ class EntryConstructFrame: public EntryFrame {
// Exit frames are used to exit JavaScript execution and go to C.
class ExitFrame: public StackFrame {
public:
+ enum Mode { MODE_NORMAL, MODE_DEBUG };
virtual Type type() const { return EXIT; }
virtual Code* code() const;
+ Object*& code_slot() const;
+
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
@@ -289,26 +290,6 @@ class ExitFrame: public StackFrame {
};
-class ExitDebugFrame: public ExitFrame {
- public:
- virtual Type type() const { return EXIT_DEBUG; }
-
- virtual Code* code() const;
-
- static ExitDebugFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_exit_debug());
- return static_cast<ExitDebugFrame*>(frame);
- }
-
- protected:
- explicit ExitDebugFrame(StackFrameIterator* iterator)
- : ExitFrame(iterator) { }
-
- private:
- friend class StackFrameIterator;
-};
-
-
class StandardFrame: public StackFrame {
public:
// Testers.
diff --git a/src/global-handles.cc b/src/global-handles.cc
index f4b69fcd..1a0c9829 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -44,6 +44,10 @@ class GlobalHandles::Node : public Malloced {
callback_ = NULL;
}
+ Node() {
+ state_ = DESTROYED;
+ }
+
explicit Node(Object* object) {
Initialize(object);
// Initialize link structure.
@@ -161,6 +165,9 @@ class GlobalHandles::Node : public Malloced {
// It's fine though to reuse nodes that were destroyed in weak callback
// as those cannot be deallocated until we are back from the callback.
set_first_free(NULL);
+ if (first_deallocated()) {
+ first_deallocated()->set_next(head());
+ }
// Leaving V8.
VMState state(EXTERNAL);
func(object, par);
@@ -200,20 +207,81 @@ class GlobalHandles::Node : public Malloced {
};
+class GlobalHandles::Pool BASE_EMBEDDED {
+ public:
+ Pool() {
+ current_ = new Chunk();
+ current_->previous = NULL;
+ next_ = current_->nodes;
+ limit_ = current_->nodes + kNodesPerChunk;
+ }
+
+ Node* Allocate() {
+ if (next_ < limit_) {
+ return next_++;
+ }
+ return SlowAllocate();
+ }
+
+ void Release() {
+ Chunk* current = current_;
+ ASSERT(current != NULL); // At least a single block must by allocated
+ do {
+ Chunk* previous = current->previous;
+ delete current;
+ current = previous;
+ } while (current != NULL);
+ current_ = NULL;
+ next_ = limit_ = NULL;
+ }
+
+ private:
+ static const int kNodesPerChunk = (1 << 12) - 1;
+ struct Chunk : public Malloced {
+ Chunk* previous;
+ Node nodes[kNodesPerChunk];
+ };
+
+ Node* SlowAllocate() {
+ Chunk* chunk = new Chunk();
+ chunk->previous = current_;
+ current_ = chunk;
+
+ Node* new_nodes = current_->nodes;
+ next_ = new_nodes + 1;
+ limit_ = new_nodes + kNodesPerChunk;
+ return new_nodes;
+ }
+
+ Chunk* current_;
+ Node* next_;
+ Node* limit_;
+};
+
+
+static GlobalHandles::Pool pool_;
+
+
Handle<Object> GlobalHandles::Create(Object* value) {
Counters::global_handles.Increment();
Node* result;
- if (first_free() == NULL) {
- // Allocate a new node.
- result = new Node(value);
- result->set_next(head());
- set_head(result);
- } else {
+ if (first_free()) {
// Take the first node in the free list.
result = first_free();
set_first_free(result->next_free());
- result->Initialize(value);
+ } else if (first_deallocated()) {
+ // Next try deallocated list
+ result = first_deallocated();
+ set_first_deallocated(result->next_free());
+ ASSERT(result->next() == head());
+ set_head(result);
+ } else {
+ // Allocate a new node.
+ result = pool_.Allocate();
+ result->set_next(head());
+ set_head(result);
}
+ result->Initialize(value);
return result->handle();
}
@@ -292,7 +360,7 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
- // At the same time deallocate all DESTROYED nodes
+ // At the same time deallocate all DESTROYED nodes.
ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count;
Node** p = &head_;
@@ -310,17 +378,24 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
// Delete the link.
Node* node = *p;
*p = node->next(); // Update the link.
- delete node;
+ if (first_deallocated()) {
+ first_deallocated()->set_next(node);
+ }
+ node->set_next_free(first_deallocated());
+ set_first_deallocated(node);
} else {
p = (*p)->next_addr();
}
}
set_first_free(NULL);
+ if (first_deallocated()) {
+ first_deallocated()->set_next(head());
+ }
}
-void GlobalHandles::IterateRoots(ObjectVisitor* v) {
- // Traversal of global handles marked as NORMAL or NEAR_DEATH.
+void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
+ // Traversal of global handles marked as NORMAL.
for (Node* current = head_; current != NULL; current = current->next()) {
if (current->state_ == Node::NORMAL) {
v->VisitPointer(&current->object_);
@@ -328,17 +403,22 @@ void GlobalHandles::IterateRoots(ObjectVisitor* v) {
}
}
-void GlobalHandles::TearDown() {
- // Delete all the nodes in the linked list.
- Node* current = head_;
- while (current != NULL) {
- Node* n = current;
- current = current->next();
- delete n;
+
+void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
+ for (Node* current = head_; current != NULL; current = current->next()) {
+ if (current->state_ != Node::DESTROYED) {
+ v->VisitPointer(&current->object_);
+ }
}
- // Reset the head and free_list.
+}
+
+
+void GlobalHandles::TearDown() {
+ // Reset all the lists.
set_head(NULL);
set_first_free(NULL);
+ set_first_deallocated(NULL);
+ pool_.Release();
}
@@ -347,6 +427,27 @@ int GlobalHandles::number_of_global_object_weak_handles_ = 0;
GlobalHandles::Node* GlobalHandles::head_ = NULL;
GlobalHandles::Node* GlobalHandles::first_free_ = NULL;
+GlobalHandles::Node* GlobalHandles::first_deallocated_ = NULL;
+
+void GlobalHandles::RecordStats(HeapStats* stats) {
+ *stats->global_handle_count = 0;
+ *stats->weak_global_handle_count = 0;
+ *stats->pending_global_handle_count = 0;
+ *stats->near_death_global_handle_count = 0;
+ *stats->destroyed_global_handle_count = 0;
+ for (Node* current = head_; current != NULL; current = current->next()) {
+ *stats->global_handle_count += 1;
+ if (current->state_ == Node::WEAK) {
+ *stats->weak_global_handle_count += 1;
+ } else if (current->state_ == Node::PENDING) {
+ *stats->pending_global_handle_count += 1;
+ } else if (current->state_ == Node::NEAR_DEATH) {
+ *stats->near_death_global_handle_count += 1;
+ } else if (current->state_ == Node::DESTROYED) {
+ *stats->destroyed_global_handle_count += 1;
+ }
+ }
+}
#ifdef DEBUG
diff --git a/src/global-handles.h b/src/global-handles.h
index feb95bf2..659f86ec 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -48,7 +48,8 @@ namespace internal {
class ObjectGroup : public Malloced {
public:
ObjectGroup() : objects_(4) {}
- explicit ObjectGroup(size_t capacity) : objects_(capacity) {}
+ explicit ObjectGroup(size_t capacity)
+ : objects_(static_cast<int>(capacity)) { }
List<Object**> objects_;
};
@@ -77,6 +78,8 @@ class GlobalHandles : public AllStatic {
// Returns the current number of weak handles.
static int NumberOfWeakHandles() { return number_of_weak_handles_; }
+ static void RecordStats(HeapStats* stats);
+
// Returns the current number of weak handles to global objects.
// These handles are also included in NumberOfWeakHandles().
static int NumberOfGlobalObjectWeakHandles() {
@@ -95,8 +98,11 @@ class GlobalHandles : public AllStatic {
// Process pending weak handles.
static void PostGarbageCollectionProcessing();
+ // Iterates over all strong handles.
+ static void IterateStrongRoots(ObjectVisitor* v);
+
// Iterates over all handles.
- static void IterateRoots(ObjectVisitor* v);
+ static void IterateAllRoots(ObjectVisitor* v);
// Iterates over all weak roots in heap.
static void IterateWeakRoots(ObjectVisitor* v);
@@ -127,6 +133,7 @@ class GlobalHandles : public AllStatic {
static void PrintStats();
static void Print();
#endif
+ class Pool;
private:
// Internal node structure, one for each global handle.
class Node;
@@ -148,6 +155,23 @@ class GlobalHandles : public AllStatic {
static Node* first_free_;
static Node* first_free() { return first_free_; }
static void set_first_free(Node* value) { first_free_ = value; }
+
+ // List of deallocated nodes.
+ // Deallocated nodes form a prefix of all the nodes and
+ // |first_deallocated| points to last deallocated node before
+ // |head|. Those deallocated nodes are additionally linked
+ // by |next_free|:
+ // 1st deallocated head
+ // | |
+ // V V
+ // node node ... node node
+ // .next -> .next -> .next ->
+ // <- .next_free <- .next_free <- .next_free
+ static Node* first_deallocated_;
+ static Node* first_deallocated() { return first_deallocated_; }
+ static void set_first_deallocated(Node* value) {
+ first_deallocated_ = value;
+ }
};
diff --git a/src/globals.h b/src/globals.h
index efe0127e..ad0539f4 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -103,6 +103,10 @@ typedef byte* Address;
#define V8PRIxPTR "lx"
#endif
+#if defined(__APPLE__) && defined(__MACH__)
+#define USING_MAC_ABI
+#endif
+
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
@@ -170,6 +174,15 @@ const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
#endif
+// Constants relevant to double precision floating point numbers.
+
+// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
+// other bits set.
+const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
+// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
+const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
+
+
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
// (sorted alphabetically)
@@ -263,7 +276,9 @@ enum AllocationSpace {
LO_SPACE, // Promoted large objects.
FIRST_SPACE = NEW_SPACE,
- LAST_SPACE = LO_SPACE
+ LAST_SPACE = LO_SPACE,
+ FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
+ LAST_PAGED_SPACE = CELL_SPACE
};
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
@@ -279,6 +294,8 @@ enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
+enum VisitMode { VISIT_ALL, VISIT_ONLY_STRONG };
+
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
@@ -558,6 +575,17 @@ inline Dest bit_cast(const Source& source) {
}
+// Feature flags bit positions. They are mostly based on the CPUID spec.
+// (We assign CPUID itself to one of the currently reserved bits --
+// feel free to change this if needed.)
+enum CpuFeature { SSE3 = 32, // x86
+ SSE2 = 26, // x86
+ CMOV = 15, // x86
+ RDTSC = 4, // x86
+ CPUID = 10, // x86
+ VFP3 = 1, // ARM
+ SAHF = 0}; // x86
+
} } // namespace v8::internal
#endif // V8_GLOBALS_H_
diff --git a/src/handles.cc b/src/handles.cc
index b764334e..d551e21c 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -37,6 +37,7 @@
#include "global-handles.h"
#include "natives.h"
#include "runtime.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
@@ -49,8 +50,8 @@ v8::ImplementationUtilities::HandleScopeData HandleScope::current_ =
int HandleScope::NumberOfHandles() {
int n = HandleScopeImplementer::instance()->blocks()->length();
if (n == 0) return 0;
- return ((n - 1) * kHandleBlockSize) +
- (current_.next - HandleScopeImplementer::instance()->blocks()->last());
+ return ((n - 1) * kHandleBlockSize) + static_cast<int>(
+ (current_.next - HandleScopeImplementer::instance()->blocks()->last()));
}
@@ -105,6 +106,21 @@ void HandleScope::ZapRange(Object** start, Object** end) {
}
+Address HandleScope::current_extensions_address() {
+ return reinterpret_cast<Address>(&current_.extensions);
+}
+
+
+Address HandleScope::current_next_address() {
+ return reinterpret_cast<Address>(&current_.next);
+}
+
+
+Address HandleScope::current_limit_address() {
+ return reinterpret_cast<Address>(&current_.limit);
+}
+
+
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
Handle<JSArray> array) {
CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray);
@@ -285,7 +301,9 @@ Handle<Object> GetPrototype(Handle<Object> obj) {
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
bool create_if_needed) {
- Handle<String> key = Factory::hidden_symbol();
+ Object* holder = obj->BypassGlobalProxy();
+ if (holder->IsUndefined()) return Factory::undefined_value();
+ obj = Handle<JSObject>(JSObject::cast(holder));
if (obj->HasFastProperties()) {
// If the object has fast properties, check whether the first slot
@@ -294,7 +312,7 @@ Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = obj->map()->instance_descriptors();
if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == *key) &&
+ (descriptors->GetKey(0) == Heap::hidden_symbol()) &&
descriptors->IsProperty(0)) {
ASSERT(descriptors->GetType(0) == FIELD);
return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)));
@@ -304,17 +322,17 @@ Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
// Only attempt to find the hidden properties in the local object and not
// in the prototype chain. Note that HasLocalProperty() can cause a GC in
// the general case in the presence of interceptors.
- if (!obj->HasLocalProperty(*key)) {
+ if (!obj->HasHiddenPropertiesObject()) {
// Hidden properties object not found. Allocate a new hidden properties
// object if requested. Otherwise return the undefined value.
if (create_if_needed) {
Handle<Object> hidden_obj = Factory::NewJSObject(Top::object_function());
- return SetProperty(obj, key, hidden_obj, DONT_ENUM);
+ CALL_HEAP_FUNCTION(obj->SetHiddenPropertiesObject(*hidden_obj), Object);
} else {
return Factory::undefined_value();
}
}
- return GetProperty(obj, key);
+ return Handle<Object>(obj->GetHiddenPropertiesObject());
}
@@ -338,7 +356,7 @@ Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
Handle<String> SubString(Handle<String> str, int start, int end) {
- CALL_HEAP_FUNCTION(str->Slice(start, end), String);
+ CALL_HEAP_FUNCTION(str->SubString(start, end), String);
}
@@ -415,8 +433,8 @@ void InitScriptLineEnds(Handle<Script> script) {
if (!script->source()->IsString()) {
ASSERT(script->source()->IsUndefined());
- script->set_line_ends(*(Factory::NewJSArray(0)));
- ASSERT(script->line_ends()->IsJSArray());
+ script->set_line_ends(*(Factory::NewFixedArray(0)));
+ ASSERT(script->line_ends()->IsFixedArray());
return;
}
@@ -449,9 +467,8 @@ void InitScriptLineEnds(Handle<Script> script) {
}
ASSERT(array_index == line_count);
- Handle<JSArray> object = Factory::NewJSArrayWithElements(array);
- script->set_line_ends(*object);
- ASSERT(script->line_ends()->IsJSArray());
+ script->set_line_ends(*array);
+ ASSERT(script->line_ends()->IsFixedArray());
}
@@ -459,17 +476,18 @@ void InitScriptLineEnds(Handle<Script> script) {
int GetScriptLineNumber(Handle<Script> script, int code_pos) {
InitScriptLineEnds(script);
AssertNoAllocation no_allocation;
- JSArray* line_ends_array = JSArray::cast(script->line_ends());
- const int line_ends_len = (Smi::cast(line_ends_array->length()))->value();
+ FixedArray* line_ends_array =
+ FixedArray::cast(script->line_ends());
+ const int line_ends_len = line_ends_array->length();
int line = -1;
if (line_ends_len > 0 &&
- code_pos <= (Smi::cast(line_ends_array->GetElement(0)))->value()) {
+ code_pos <= (Smi::cast(line_ends_array->get(0)))->value()) {
line = 0;
} else {
for (int i = 1; i < line_ends_len; ++i) {
- if ((Smi::cast(line_ends_array->GetElement(i - 1)))->value() < code_pos &&
- code_pos <= (Smi::cast(line_ends_array->GetElement(i)))->value()) {
+ if ((Smi::cast(line_ends_array->get(i - 1)))->value() < code_pos &&
+ code_pos <= (Smi::cast(line_ends_array->get(i)))->value()) {
line = i;
break;
}
@@ -530,6 +548,12 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
KeyCollectionType type) {
Handle<FixedArray> content = Factory::empty_fixed_array();
+ Handle<JSObject> arguments_boilerplate =
+ Handle<JSObject>(
+ Top::context()->global_context()->arguments_boilerplate());
+ Handle<JSFunction> arguments_function =
+ Handle<JSFunction>(
+ JSFunction::cast(arguments_boilerplate->map()->constructor()));
// Only collect keys if access is permitted.
for (Handle<Object> p = object;
@@ -559,8 +583,21 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
}
- // Compute the property keys.
- content = UnionOfKeys(content, GetEnumPropertyKeys(current));
+ // We can cache the computed property keys if access checks are
+ // not needed and no interceptors are involved.
+ //
+ // We do not use the cache if the object has elements and
+ // therefore it does not make sense to cache the property names
+ // for arguments objects. Arguments objects will always have
+ // elements.
+ bool cache_enum_keys =
+ ((current->map()->constructor() != *arguments_function) &&
+ !current->IsAccessCheckNeeded() &&
+ !current->HasNamedInterceptor() &&
+ !current->HasIndexedInterceptor());
+ // Compute the property keys and cache them if possible.
+ content =
+ UnionOfKeys(content, GetEnumPropertyKeys(current, cache_enum_keys));
// Add the property keys from the interceptor.
if (current->HasNamedInterceptor()) {
@@ -587,7 +624,8 @@ Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
}
-Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object) {
+Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
+ bool cache_result) {
int index = 0;
if (object->HasFastProperties()) {
if (object->map()->instance_descriptors()->HasEnumCache()) {
@@ -610,10 +648,12 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object) {
}
}
(*storage)->SortPairs(*sort_array, sort_array->length());
- Handle<FixedArray> bridge_storage =
- Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength);
- DescriptorArray* desc = object->map()->instance_descriptors();
- desc->SetEnumCache(*bridge_storage, *storage);
+ if (cache_result) {
+ Handle<FixedArray> bridge_storage =
+ Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength);
+ DescriptorArray* desc = object->map()->instance_descriptors();
+ desc->SetEnumCache(*bridge_storage, *storage);
+ }
ASSERT(storage->length() == index);
return storage;
} else {
@@ -672,6 +712,11 @@ OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
}
+Handle<Code> ComputeLazyCompile(int argc) {
+ CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
+}
+
+
OptimizedObjectForAddingMultipleProperties::
~OptimizedObjectForAddingMultipleProperties() {
// Reoptimize the object to allow fast property access.
diff --git a/src/handles.h b/src/handles.h
index 5d574657..fe820d59 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -133,6 +133,13 @@ class HandleScope {
return result;
}
+ // Deallocates any extensions used by the current scope.
+ static void DeleteExtensions();
+
+ static Address current_extensions_address();
+ static Address current_next_address();
+ static Address current_limit_address();
+
private:
// Prevent heap allocation or illegal handle scopes.
HandleScope(const HandleScope&);
@@ -166,9 +173,6 @@ class HandleScope {
// Extend the handle scope making room for more handles.
static internal::Object** Extend();
- // Deallocates any extensions used by the current scope.
- static void DeleteExtensions();
-
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end);
@@ -273,7 +277,8 @@ enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
KeyCollectionType type);
Handle<JSArray> GetKeysFor(Handle<JSObject> object);
-Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
+Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
+ bool cache_result);
// Computes the union of keys and return the result.
// Used for implementing "for (n in object) { }"
@@ -304,8 +309,8 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> prototype);
-// Do lazy compilation of the given function. Returns true on success
-// and false if the compilation resulted in a stack overflow.
+// Does lazy compilation of the given function. Returns true on success and
+// false if the compilation resulted in a stack overflow.
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
@@ -315,6 +320,9 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
+// Returns the lazy compilation stub for argc arguments.
+Handle<Code> ComputeLazyCompile(int argc);
+
// These deal with lazily loaded properties.
void SetupLazy(Handle<JSObject> obj,
int index,
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 0646878e..eccd5ee2 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -41,10 +41,10 @@ int Heap::MaxObjectSizeInPagedSpace() {
Object* Heap::AllocateSymbol(Vector<const char> str,
int chars,
- uint32_t length_field) {
+ uint32_t hash_field) {
unibrow::Utf8InputBuffer<> buffer(str.start(),
static_cast<unsigned>(str.length()));
- return AllocateInternalSymbol(&buffer, chars, length_field);
+ return AllocateInternalSymbol(&buffer, chars, hash_field);
}
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index bfd378dd..bd1cd2d9 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -78,6 +78,10 @@ JSObjectsCluster Clusterizer::Clusterize(HeapObject* obj, bool fine_grain) {
}
} else if (obj->IsString()) {
return JSObjectsCluster(Heap::String_symbol());
+ } else if (obj->IsJSGlobalPropertyCell()) {
+ return JSObjectsCluster(JSObjectsCluster::GLOBAL_PROPERTY);
+ } else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
+ return JSObjectsCluster(JSObjectsCluster::CODE);
}
return JSObjectsCluster();
}
@@ -112,6 +116,16 @@ int Clusterizer::CalculateNetworkSize(JSObject* obj) {
if (FixedArray::cast(obj->elements())->length() != 0) {
size += obj->elements()->Size();
}
+ // For functions, also account non-empty context and literals sizes.
+ if (obj->IsJSFunction()) {
+ JSFunction* f = JSFunction::cast(obj);
+ if (f->unchecked_context()->IsContext()) {
+ size += f->context()->Size();
+ }
+ if (f->literals()->length() != 0) {
+ size += f->literals()->Size();
+ }
+ }
return size;
}
@@ -127,15 +141,15 @@ class ReferencesExtractor : public ObjectVisitor {
}
void VisitPointer(Object** o) {
- if ((*o)->IsJSObject() || (*o)->IsString()) {
- profile_->StoreReference(cluster_, HeapObject::cast(*o));
- } else if ((*o)->IsFixedArray() && !inside_array_) {
+ if ((*o)->IsFixedArray() && !inside_array_) {
// Traverse one level deep for data members that are fixed arrays.
// This covers the case of 'elements' and 'properties' of JSObject,
// and function contexts.
inside_array_ = true;
FixedArray::cast(*o)->Iterate(this);
inside_array_ = false;
+ } else if ((*o)->IsHeapObject()) {
+ profile_->StoreReference(cluster_, HeapObject::cast(*o));
}
}
@@ -340,6 +354,8 @@ void JSObjectsCluster::Print(StringStream* accumulator) const {
accumulator->Add("(roots)");
} else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
accumulator->Add("(global property)");
+ } else if (constructor_ == FromSpecialCase(CODE)) {
+ accumulator->Add("(code)");
} else if (constructor_ == FromSpecialCase(SELF)) {
accumulator->Add("(self)");
} else {
@@ -520,13 +536,14 @@ RetainerHeapProfile::RetainerHeapProfile()
: zscope_(DELETE_ON_EXIT) {
JSObjectsCluster roots(JSObjectsCluster::ROOTS);
ReferencesExtractor extractor(roots, this);
- Heap::IterateRoots(&extractor);
+ Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
}
void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
HeapObject* ref) {
JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
+ if (ref_cluster.is_null()) return;
JSObjectsRetainerTree::Locator ref_loc;
if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
ref_loc.set_value(new JSObjectsClusterTree());
@@ -537,15 +554,10 @@ void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
void RetainerHeapProfile::CollectStats(HeapObject* obj) {
- if (obj->IsJSObject()) {
- const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
- ReferencesExtractor extractor(cluster, this);
- obj->Iterate(&extractor);
- } else if (obj->IsJSGlobalPropertyCell()) {
- JSObjectsCluster global_prop(JSObjectsCluster::GLOBAL_PROPERTY);
- ReferencesExtractor extractor(global_prop, this);
- obj->Iterate(&extractor);
- }
+ const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
+ if (cluster.is_null()) return;
+ ReferencesExtractor extractor(cluster, this);
+ obj->Iterate(&extractor);
}
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index bd875df2..f8cb04da 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -54,7 +54,8 @@ class JSObjectsCluster BASE_EMBEDDED {
enum SpecialCase {
ROOTS = 1,
GLOBAL_PROPERTY = 2,
- SELF = 3 // This case is used in ClustersCoarser only.
+ CODE = 3,
+ SELF = 100 // This case is used in ClustersCoarser only.
};
JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
@@ -97,6 +98,7 @@ class JSObjectsCluster BASE_EMBEDDED {
switch (special) {
case ROOTS: return Heap::result_symbol();
case GLOBAL_PROPERTY: return Heap::code_symbol();
+ case CODE: return Heap::arguments_shadow_symbol();
case SELF: return Heap::catch_var_symbol();
default:
UNREACHABLE();
diff --git a/src/heap.cc b/src/heap.cc
index 50840583..4e4cd1c0 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -43,6 +43,7 @@
#include "v8threads.h"
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#include "regexp-macro-assembler.h"
+#include "arm/regexp-macro-assembler-arm.h"
#endif
namespace v8 {
@@ -113,6 +114,7 @@ int Heap::mc_count_ = 0;
int Heap::gc_count_ = 0;
int Heap::always_allocate_scope_depth_ = 0;
+int Heap::linear_allocation_scope_depth_ = 0;
bool Heap::context_disposed_pending_ = false;
#ifdef DEBUG
@@ -731,10 +733,7 @@ void Heap::Scavenge() {
ScavengeVisitor scavenge_visitor;
// Copy roots.
- IterateRoots(&scavenge_visitor);
-
- // Copy objects reachable from weak pointers.
- GlobalHandles::IterateWeakRoots(&scavenge_visitor);
+ IterateRoots(&scavenge_visitor, VISIT_ALL);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
@@ -1188,34 +1187,14 @@ bool Heap::CreateInitialMaps() {
roots_[entry.index] = Map::cast(obj);
}
- obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize);
- if (obj->IsFailure()) return false;
- set_undetectable_short_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize);
- if (obj->IsFailure()) return false;
- set_undetectable_medium_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize);
+ obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize);
if (obj->IsFailure()) return false;
- set_undetectable_long_string_map(Map::cast(obj));
+ set_undetectable_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
- obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
+ obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
if (obj->IsFailure()) return false;
- set_undetectable_short_ascii_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
- if (obj->IsFailure()) return false;
- set_undetectable_medium_ascii_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
- if (obj->IsFailure()) return false;
- set_undetectable_long_ascii_string_map(Map::cast(obj));
+ set_undetectable_ascii_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
@@ -1728,6 +1707,7 @@ Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate proxies in paged spaces.
STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ if (always_allocate()) space = OLD_DATA_SPACE;
Object* result = Allocate(proxy_map(), space);
if (result->IsFailure()) return result;
@@ -1762,14 +1742,63 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
}
+// Returns true for a character in a range. Both limits are inclusive.
+static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
+ // This makes uses of the the unsigned wraparound.
+ return character - from <= to - from;
+}
+
+
+static inline Object* MakeOrFindTwoCharacterString(uint32_t c1, uint32_t c2) {
+ String* symbol;
+ // Numeric strings have a different hash algorithm not known by
+ // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
+ if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
+ Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
+ return symbol;
+ // Now we know the length is 2, we might as well make use of that fact
+ // when building the new string.
+ } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
+ Object* result = Heap::AllocateRawAsciiString(2);
+ if (result->IsFailure()) return result;
+ char* dest = SeqAsciiString::cast(result)->GetChars();
+ dest[0] = c1;
+ dest[1] = c2;
+ return result;
+ } else {
+ Object* result = Heap::AllocateRawTwoByteString(2);
+ if (result->IsFailure()) return result;
+ uc16* dest = SeqTwoByteString::cast(result)->GetChars();
+ dest[0] = c1;
+ dest[1] = c2;
+ return result;
+ }
+}
+
+
Object* Heap::AllocateConsString(String* first, String* second) {
int first_length = first->length();
- if (first_length == 0) return second;
+ if (first_length == 0) {
+ return second;
+ }
int second_length = second->length();
- if (second_length == 0) return first;
+ if (second_length == 0) {
+ return first;
+ }
int length = first_length + second_length;
+
+ // Optimization for 2-byte strings often used as keys in a decompression
+ // dictionary. Check whether we already have the string in the symbol
+ // table to prevent creation of many unneccesary strings.
+ if (length == 2) {
+ unsigned c1 = first->Get(0);
+ unsigned c2 = second->Get(0);
+ return MakeOrFindTwoCharacterString(c1, c2);
+ }
+
bool is_ascii = first->IsAsciiRepresentation()
&& second->IsAsciiRepresentation();
@@ -1790,10 +1819,19 @@ Object* Heap::AllocateConsString(String* first, String* second) {
// Copy the characters into the new object.
char* dest = SeqAsciiString::cast(result)->GetChars();
// Copy first part.
- char* src = SeqAsciiString::cast(first)->GetChars();
+ const char* src;
+ if (first->IsExternalString()) {
+ src = ExternalAsciiString::cast(first)->resource()->data();
+ } else {
+ src = SeqAsciiString::cast(first)->GetChars();
+ }
for (int i = 0; i < first_length; i++) *dest++ = src[i];
// Copy second part.
- src = SeqAsciiString::cast(second)->GetChars();
+ if (second->IsExternalString()) {
+ src = ExternalAsciiString::cast(second)->resource()->data();
+ } else {
+ src = SeqAsciiString::cast(second)->GetChars();
+ }
for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
} else {
@@ -1807,62 +1845,17 @@ Object* Heap::AllocateConsString(String* first, String* second) {
}
}
- Map* map;
- if (length <= String::kMaxShortStringSize) {
- map = is_ascii ? short_cons_ascii_string_map()
- : short_cons_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = is_ascii ? medium_cons_ascii_string_map()
- : medium_cons_string_map();
- } else {
- map = is_ascii ? long_cons_ascii_string_map()
- : long_cons_string_map();
- }
+ Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map();
- Object* result = Allocate(map, NEW_SPACE);
+ Object* result = Allocate(map,
+ always_allocate() ? OLD_POINTER_SPACE : NEW_SPACE);
if (result->IsFailure()) return result;
- ASSERT(InNewSpace(result));
ConsString* cons_string = ConsString::cast(result);
- cons_string->set_first(first, SKIP_WRITE_BARRIER);
- cons_string->set_second(second, SKIP_WRITE_BARRIER);
+ WriteBarrierMode mode = cons_string->GetWriteBarrierMode();
cons_string->set_length(length);
- return result;
-}
-
-
-Object* Heap::AllocateSlicedString(String* buffer,
- int start,
- int end) {
- int length = end - start;
-
- // If the resulting string is small make a sub string.
- if (length <= String::kMinNonFlatLength) {
- return Heap::AllocateSubString(buffer, start, end);
- }
-
- Map* map;
- if (length <= String::kMaxShortStringSize) {
- map = buffer->IsAsciiRepresentation() ?
- short_sliced_ascii_string_map() :
- short_sliced_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = buffer->IsAsciiRepresentation() ?
- medium_sliced_ascii_string_map() :
- medium_sliced_string_map();
- } else {
- map = buffer->IsAsciiRepresentation() ?
- long_sliced_ascii_string_map() :
- long_sliced_string_map();
- }
-
- Object* result = Allocate(map, NEW_SPACE);
- if (result->IsFailure()) return result;
-
- SlicedString* sliced_string = SlicedString::cast(result);
- sliced_string->set_buffer(buffer);
- sliced_string->set_start(start);
- sliced_string->set_length(length);
-
+ cons_string->set_hash_field(String::kEmptyHashField);
+ cons_string->set_first(first, mode);
+ cons_string->set_second(second, mode);
return result;
}
@@ -1875,6 +1868,13 @@ Object* Heap::AllocateSubString(String* buffer,
if (length == 1) {
return Heap::LookupSingleCharacterStringFromCode(
buffer->Get(start));
+ } else if (length == 2) {
+ // Optimization for 2-byte strings often used as keys in a decompression
+ // dictionary. Check whether we already have the string in the symbol
+ // table to prevent creation of many unneccesary strings.
+ unsigned c1 = buffer->Get(start);
+ unsigned c2 = buffer->Get(start + 1);
+ return MakeOrFindTwoCharacterString(c1, c2);
}
// Make an attempt to flatten the buffer to reduce access time.
@@ -1886,43 +1886,39 @@ Object* Heap::AllocateSubString(String* buffer,
? AllocateRawAsciiString(length)
: AllocateRawTwoByteString(length);
if (result->IsFailure()) return result;
+ String* string_result = String::cast(result);
// Copy the characters into the new object.
- String* string_result = String::cast(result);
- StringHasher hasher(length);
- int i = 0;
- for (; i < length && hasher.is_array_index(); i++) {
- uc32 c = buffer->Get(start + i);
- hasher.AddCharacter(c);
- string_result->Set(i, c);
- }
- for (; i < length; i++) {
- uc32 c = buffer->Get(start + i);
- hasher.AddCharacterNoIndex(c);
- string_result->Set(i, c);
+ if (buffer->IsAsciiRepresentation()) {
+ ASSERT(string_result->IsAsciiRepresentation());
+ char* dest = SeqAsciiString::cast(string_result)->GetChars();
+ String::WriteToFlat(buffer, dest, start, end);
+ } else {
+ ASSERT(string_result->IsTwoByteRepresentation());
+ uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
+ String::WriteToFlat(buffer, dest, start, end);
}
- string_result->set_length_field(hasher.GetHashField());
+
return result;
}
Object* Heap::AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource) {
- Map* map;
- int length = resource->length();
- if (length <= String::kMaxShortStringSize) {
- map = short_external_ascii_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = medium_external_ascii_string_map();
- } else {
- map = long_external_ascii_string_map();
+ size_t length = resource->length();
+ if (length > static_cast<size_t>(String::kMaxLength)) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
}
- Object* result = Allocate(map, NEW_SPACE);
+ Map* map = external_ascii_string_map();
+ Object* result = Allocate(map,
+ always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
if (result->IsFailure()) return result;
ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
- external_string->set_length(length);
+ external_string->set_length(static_cast<int>(length));
+ external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
return result;
@@ -1931,14 +1927,20 @@ Object* Heap::AllocateExternalStringFromAscii(
Object* Heap::AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource) {
- int length = resource->length();
+ size_t length = resource->length();
+ if (length > static_cast<size_t>(String::kMaxLength)) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
- Map* map = ExternalTwoByteString::StringMap(length);
- Object* result = Allocate(map, NEW_SPACE);
+ Map* map = Heap::external_string_map();
+ Object* result = Allocate(map,
+ always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
if (result->IsFailure()) return result;
ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
- external_string->set_length(length);
+ external_string->set_length(static_cast<int>(length));
+ external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
return result;
@@ -2256,9 +2258,8 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
// descriptors for these to the initial map as the object cannot be
// constructed without having these properties.
ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
- if (fun->shared()->has_only_this_property_assignments() &&
- fun->shared()->this_property_assignments_count() > 0 &&
- fun->shared()->has_only_simple_this_property_assignments()) {
+ if (fun->shared()->has_only_simple_this_property_assignments() &&
+ fun->shared()->this_property_assignments_count() > 0) {
int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) {
count = in_object_properties;
@@ -2320,6 +2321,7 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+ if (always_allocate()) space = OLD_POINTER_SPACE;
Object* obj = Allocate(map, space);
if (obj->IsFailure()) return obj;
@@ -2579,62 +2581,12 @@ Map* Heap::SymbolMapForString(String* string) {
// Find the corresponding symbol map for strings.
Map* map = string->map();
-
- if (map == short_ascii_string_map()) return short_ascii_symbol_map();
- if (map == medium_ascii_string_map()) return medium_ascii_symbol_map();
- if (map == long_ascii_string_map()) return long_ascii_symbol_map();
-
- if (map == short_string_map()) return short_symbol_map();
- if (map == medium_string_map()) return medium_symbol_map();
- if (map == long_string_map()) return long_symbol_map();
-
- if (map == short_cons_string_map()) return short_cons_symbol_map();
- if (map == medium_cons_string_map()) return medium_cons_symbol_map();
- if (map == long_cons_string_map()) return long_cons_symbol_map();
-
- if (map == short_cons_ascii_string_map()) {
- return short_cons_ascii_symbol_map();
- }
- if (map == medium_cons_ascii_string_map()) {
- return medium_cons_ascii_symbol_map();
- }
- if (map == long_cons_ascii_string_map()) {
- return long_cons_ascii_symbol_map();
- }
-
- if (map == short_sliced_string_map()) return short_sliced_symbol_map();
- if (map == medium_sliced_string_map()) return medium_sliced_symbol_map();
- if (map == long_sliced_string_map()) return long_sliced_symbol_map();
-
- if (map == short_sliced_ascii_string_map()) {
- return short_sliced_ascii_symbol_map();
- }
- if (map == medium_sliced_ascii_string_map()) {
- return medium_sliced_ascii_symbol_map();
- }
- if (map == long_sliced_ascii_string_map()) {
- return long_sliced_ascii_symbol_map();
- }
-
- if (map == short_external_string_map()) {
- return short_external_symbol_map();
- }
- if (map == medium_external_string_map()) {
- return medium_external_symbol_map();
- }
- if (map == long_external_string_map()) {
- return long_external_symbol_map();
- }
-
- if (map == short_external_ascii_string_map()) {
- return short_external_ascii_symbol_map();
- }
- if (map == medium_external_ascii_string_map()) {
- return medium_external_ascii_symbol_map();
- }
- if (map == long_external_ascii_string_map()) {
- return long_external_ascii_symbol_map();
- }
+ if (map == ascii_string_map()) return ascii_symbol_map();
+ if (map == string_map()) return symbol_map();
+ if (map == cons_string_map()) return cons_symbol_map();
+ if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
+ if (map == external_string_map()) return external_symbol_map();
+ if (map == external_ascii_string_map()) return external_ascii_symbol_map();
// No match found.
return NULL;
@@ -2643,7 +2595,7 @@ Map* Heap::SymbolMapForString(String* string) {
Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
int chars,
- uint32_t length_field) {
+ uint32_t hash_field) {
// Ensure the chars matches the number of characters in the buffer.
ASSERT(static_cast<unsigned>(chars) == buffer->Length());
// Determine whether the string is ascii.
@@ -2658,22 +2610,10 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Map* map;
if (is_ascii) {
- if (chars <= String::kMaxShortStringSize) {
- map = short_ascii_symbol_map();
- } else if (chars <= String::kMaxMediumStringSize) {
- map = medium_ascii_symbol_map();
- } else {
- map = long_ascii_symbol_map();
- }
+ map = ascii_symbol_map();
size = SeqAsciiString::SizeFor(chars);
} else {
- if (chars <= String::kMaxShortStringSize) {
- map = short_symbol_map();
- } else if (chars <= String::kMaxMediumStringSize) {
- map = medium_symbol_map();
- } else {
- map = long_symbol_map();
- }
+ map = symbol_map();
size = SeqTwoByteString::SizeFor(chars);
}
@@ -2684,9 +2624,10 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
if (result->IsFailure()) return result;
reinterpret_cast<HeapObject*>(result)->set_map(map);
- // The hash value contains the length of the string.
+ // Set length and hash fields of the allocated string.
String* answer = String::cast(result);
- answer->set_length_field(length_field);
+ answer->set_length(chars);
+ answer->set_hash_field(hash_field);
ASSERT_EQ(size, answer->Size());
@@ -2717,19 +2658,10 @@ Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
}
if (result->IsFailure()) return result;
- // Determine the map based on the string's length.
- Map* map;
- if (length <= String::kMaxShortStringSize) {
- map = short_ascii_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = medium_ascii_string_map();
- } else {
- map = long_ascii_string_map();
- }
-
// Partially initialize the object.
- HeapObject::cast(result)->set_map(map);
+ HeapObject::cast(result)->set_map(ascii_string_map());
String::cast(result)->set_length(length);
+ String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
return result;
}
@@ -2754,19 +2686,10 @@ Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
}
if (result->IsFailure()) return result;
- // Determine the map based on the string's length.
- Map* map;
- if (length <= String::kMaxShortStringSize) {
- map = short_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = medium_string_map();
- } else {
- map = long_string_map();
- }
-
// Partially initialize the object.
- HeapObject::cast(result)->set_map(map);
+ HeapObject::cast(result)->set_map(string_map());
String::cast(result)->set_length(length);
+ String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
return result;
}
@@ -2987,6 +2910,11 @@ bool Heap::IdleNotification() {
last_gc_count = gc_count_;
} else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
+ // Before doing the mark-sweep collections we clear the
+ // compilation cache to avoid hanging on to source code and
+ // generated code for cached functions.
+ CompilationCache::Clear();
+
CollectAllGarbage(false);
new_space_.Shrink();
last_gc_count = gc_count_;
@@ -3116,7 +3044,7 @@ void Heap::Verify() {
ASSERT(HasBeenSetup());
VerifyPointersVisitor visitor;
- IterateRoots(&visitor);
+ IterateRoots(&visitor, VISIT_ONLY_STRONG);
new_space_.Verify();
@@ -3243,60 +3171,57 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
}
-#ifdef DEBUG
-#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
-#else
-#define SYNCHRONIZE_TAG(tag)
-#endif
-
-void Heap::IterateRoots(ObjectVisitor* v) {
- IterateStrongRoots(v);
+void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
+ IterateStrongRoots(v, mode);
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
- SYNCHRONIZE_TAG("symbol_table");
+ v->Synchronize("symbol_table");
}
-void Heap::IterateStrongRoots(ObjectVisitor* v) {
+void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
- SYNCHRONIZE_TAG("strong_root_list");
+ v->Synchronize("strong_root_list");
v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
- SYNCHRONIZE_TAG("symbol");
+ v->Synchronize("symbol");
Bootstrapper::Iterate(v);
- SYNCHRONIZE_TAG("bootstrapper");
+ v->Synchronize("bootstrapper");
Top::Iterate(v);
- SYNCHRONIZE_TAG("top");
+ v->Synchronize("top");
Relocatable::Iterate(v);
- SYNCHRONIZE_TAG("relocatable");
+ v->Synchronize("relocatable");
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug::Iterate(v);
#endif
- SYNCHRONIZE_TAG("debug");
+ v->Synchronize("debug");
CompilationCache::Iterate(v);
- SYNCHRONIZE_TAG("compilationcache");
+ v->Synchronize("compilationcache");
// Iterate over local handles in handle scopes.
HandleScopeImplementer::Iterate(v);
- SYNCHRONIZE_TAG("handlescope");
+ v->Synchronize("handlescope");
// Iterate over the builtin code objects and code stubs in the heap. Note
// that it is not strictly necessary to iterate over code objects on
// scavenge collections. We still do it here because this same function
// is used by the mark-sweep collector and the deserializer.
Builtins::IterateBuiltins(v);
- SYNCHRONIZE_TAG("builtins");
+ v->Synchronize("builtins");
// Iterate over global handles.
- GlobalHandles::IterateRoots(v);
- SYNCHRONIZE_TAG("globalhandles");
+ if (mode == VISIT_ONLY_STRONG) {
+ GlobalHandles::IterateStrongRoots(v);
+ } else {
+ GlobalHandles::IterateAllRoots(v);
+ }
+ v->Synchronize("globalhandles");
// Iterate over pointers being held by inactive threads.
ThreadManager::Iterate(v);
- SYNCHRONIZE_TAG("threadmanager");
+ v->Synchronize("threadmanager");
}
-#undef SYNCHRONIZE_TAG
// Flag is set when the heap has been configured. The heap can be repeatedly
@@ -3348,6 +3273,26 @@ bool Heap::ConfigureHeapDefault() {
}
+void Heap::RecordStats(HeapStats* stats) {
+ *stats->start_marker = 0xDECADE00;
+ *stats->end_marker = 0xDECADE01;
+ *stats->new_space_size = new_space_.Size();
+ *stats->new_space_capacity = new_space_.Capacity();
+ *stats->old_pointer_space_size = old_pointer_space_->Size();
+ *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
+ *stats->old_data_space_size = old_data_space_->Size();
+ *stats->old_data_space_capacity = old_data_space_->Capacity();
+ *stats->code_space_size = code_space_->Size();
+ *stats->code_space_capacity = code_space_->Capacity();
+ *stats->map_space_size = map_space_->Size();
+ *stats->map_space_capacity = map_space_->Capacity();
+ *stats->cell_space_size = cell_space_->Size();
+ *stats->cell_space_capacity = cell_space_->Capacity();
+ *stats->lo_space_size = lo_space_->Size();
+ GlobalHandles::RecordStats(stats);
+}
+
+
int Heap::PromotedSpaceSize() {
return old_pointer_space_->Size()
+ old_data_space_->Size()
@@ -3461,14 +3406,18 @@ bool Heap::Setup(bool create_heap_objects) {
}
-void Heap::SetStackLimit(intptr_t limit) {
+void Heap::SetStackLimits() {
// On 64 bit machines, pointers are generally out of range of Smis. We write
// something that looks like an out of range Smi to the GC.
- // Set up the special root array entry containing the stack guard.
- // This is actually an address, but the tag makes the GC ignore it.
+ // Set up the special root array entries containing the stack limits.
+ // These are actually addresses, but the tag makes the GC ignore it.
roots_[kStackLimitRootIndex] =
- reinterpret_cast<Object*>((limit & ~kSmiTagMask) | kSmiTag);
+ reinterpret_cast<Object*>(
+ (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
+ roots_[kRealStackLimitRootIndex] =
+ reinterpret_cast<Object*>(
+ (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
}
@@ -3895,7 +3844,7 @@ void Heap::TracePathToObject() {
search_for_any_global = false;
MarkRootVisitor root_visitor;
- IterateRoots(&root_visitor);
+ IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
}
@@ -3907,7 +3856,7 @@ void Heap::TracePathToGlobal() {
search_for_any_global = true;
MarkRootVisitor root_visitor;
- IterateRoots(&root_visitor);
+ IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
}
#endif
diff --git a/src/heap.h b/src/heap.h
index cd49a8d7..b37fe4b5 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -38,7 +38,13 @@ namespace internal {
// Defines all the roots in Heap.
#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
- /* Cluster the most popular ones in a few cache lines here at the top. */ \
+ /* Put the byte array map early. We need it to be in place by the time */ \
+ /* the deserializer hits the next page, since it wants to put a byte */ \
+ /* array in the unused space at the end of the page. */ \
+ V(Map, byte_array_map, ByteArrayMap) \
+ V(Map, one_pointer_filler_map, OnePointerFillerMap) \
+ V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
+ /* Cluster the most popular ones in a few cache lines here at the top. */ \
V(Smi, stack_limit, StackLimit) \
V(Object, undefined_value, UndefinedValue) \
V(Object, the_hole_value, TheHoleValue) \
@@ -53,63 +59,20 @@ namespace internal {
V(Object, termination_exception, TerminationException) \
V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
- V(Map, short_string_map, ShortStringMap) \
- V(Map, medium_string_map, MediumStringMap) \
- V(Map, long_string_map, LongStringMap) \
- V(Map, short_ascii_string_map, ShortAsciiStringMap) \
- V(Map, medium_ascii_string_map, MediumAsciiStringMap) \
- V(Map, long_ascii_string_map, LongAsciiStringMap) \
- V(Map, short_symbol_map, ShortSymbolMap) \
- V(Map, medium_symbol_map, MediumSymbolMap) \
- V(Map, long_symbol_map, LongSymbolMap) \
- V(Map, short_ascii_symbol_map, ShortAsciiSymbolMap) \
- V(Map, medium_ascii_symbol_map, MediumAsciiSymbolMap) \
- V(Map, long_ascii_symbol_map, LongAsciiSymbolMap) \
- V(Map, short_cons_symbol_map, ShortConsSymbolMap) \
- V(Map, medium_cons_symbol_map, MediumConsSymbolMap) \
- V(Map, long_cons_symbol_map, LongConsSymbolMap) \
- V(Map, short_cons_ascii_symbol_map, ShortConsAsciiSymbolMap) \
- V(Map, medium_cons_ascii_symbol_map, MediumConsAsciiSymbolMap) \
- V(Map, long_cons_ascii_symbol_map, LongConsAsciiSymbolMap) \
- V(Map, short_sliced_symbol_map, ShortSlicedSymbolMap) \
- V(Map, medium_sliced_symbol_map, MediumSlicedSymbolMap) \
- V(Map, long_sliced_symbol_map, LongSlicedSymbolMap) \
- V(Map, short_sliced_ascii_symbol_map, ShortSlicedAsciiSymbolMap) \
- V(Map, medium_sliced_ascii_symbol_map, MediumSlicedAsciiSymbolMap) \
- V(Map, long_sliced_ascii_symbol_map, LongSlicedAsciiSymbolMap) \
- V(Map, short_external_symbol_map, ShortExternalSymbolMap) \
- V(Map, medium_external_symbol_map, MediumExternalSymbolMap) \
- V(Map, long_external_symbol_map, LongExternalSymbolMap) \
- V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \
- V(Map, medium_external_ascii_symbol_map, MediumExternalAsciiSymbolMap) \
- V(Map, long_external_ascii_symbol_map, LongExternalAsciiSymbolMap) \
- V(Map, short_cons_string_map, ShortConsStringMap) \
- V(Map, medium_cons_string_map, MediumConsStringMap) \
- V(Map, long_cons_string_map, LongConsStringMap) \
- V(Map, short_cons_ascii_string_map, ShortConsAsciiStringMap) \
- V(Map, medium_cons_ascii_string_map, MediumConsAsciiStringMap) \
- V(Map, long_cons_ascii_string_map, LongConsAsciiStringMap) \
- V(Map, short_sliced_string_map, ShortSlicedStringMap) \
- V(Map, medium_sliced_string_map, MediumSlicedStringMap) \
- V(Map, long_sliced_string_map, LongSlicedStringMap) \
- V(Map, short_sliced_ascii_string_map, ShortSlicedAsciiStringMap) \
- V(Map, medium_sliced_ascii_string_map, MediumSlicedAsciiStringMap) \
- V(Map, long_sliced_ascii_string_map, LongSlicedAsciiStringMap) \
- V(Map, short_external_string_map, ShortExternalStringMap) \
- V(Map, medium_external_string_map, MediumExternalStringMap) \
- V(Map, long_external_string_map, LongExternalStringMap) \
- V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
- V(Map, medium_external_ascii_string_map, MediumExternalAsciiStringMap) \
- V(Map, long_external_ascii_string_map, LongExternalAsciiStringMap) \
- V(Map, undetectable_short_string_map, UndetectableShortStringMap) \
- V(Map, undetectable_medium_string_map, UndetectableMediumStringMap) \
- V(Map, undetectable_long_string_map, UndetectableLongStringMap) \
- V(Map, undetectable_short_ascii_string_map, UndetectableShortAsciiStringMap) \
- V(Map, \
- undetectable_medium_ascii_string_map, \
- UndetectableMediumAsciiStringMap) \
- V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \
- V(Map, byte_array_map, ByteArrayMap) \
+ V(Map, string_map, StringMap) \
+ V(Map, ascii_string_map, AsciiStringMap) \
+ V(Map, symbol_map, SymbolMap) \
+ V(Map, ascii_symbol_map, AsciiSymbolMap) \
+ V(Map, cons_symbol_map, ConsSymbolMap) \
+ V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
+ V(Map, external_symbol_map, ExternalSymbolMap) \
+ V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
+ V(Map, cons_string_map, ConsStringMap) \
+ V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
+ V(Map, external_string_map, ExternalStringMap) \
+ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
+ V(Map, undetectable_string_map, UndetectableStringMap) \
+ V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
V(Map, pixel_array_map, PixelArrayMap) \
V(Map, external_byte_array_map, ExternalByteArrayMap) \
V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
@@ -126,8 +89,6 @@ namespace internal {
V(Map, boilerplate_function_map, BoilerplateFunctionMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, proxy_map, ProxyMap) \
- V(Map, one_pointer_filler_map, OnePointerFillerMap) \
- V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
V(Object, nan_value, NanValue) \
V(Object, minus_zero_value, MinusZeroValue) \
V(String, empty_string, EmptyString) \
@@ -145,6 +106,7 @@ namespace internal {
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
V(Object, last_script_id, LastScriptId) \
+ V(Smi, real_stack_limit, RealStackLimit) \
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#define STRONG_ROOT_LIST(V) \
@@ -221,11 +183,13 @@ namespace internal {
V(exec_symbol, "exec") \
V(zero_symbol, "0") \
V(global_eval_symbol, "GlobalEval") \
- V(identity_hash_symbol, "v8::IdentityHash")
+ V(identity_hash_symbol, "v8::IdentityHash") \
+ V(closure_symbol, "(closure)")
// Forward declaration of the GCTracer class.
class GCTracer;
+class HeapStats;
// The all static Heap captures the interface to the global object heap.
@@ -246,10 +210,10 @@ class Heap : public AllStatic {
// Destroys all memory allocated by the heap.
static void TearDown();
- // Sets the stack limit in the roots_ array. Some architectures generate code
- // that looks here, because it is faster than loading from the static jslimit_
- // variable.
- static void SetStackLimit(intptr_t limit);
+ // Set the stack limit in the roots_ array. Some architectures generate
+ // code that looks here, because it is faster than loading from the static
+ // jslimit_/real_jslimit_ variable in the StackGuard.
+ static void SetStackLimits();
// Returns whether Setup has been called.
static bool HasBeenSetup();
@@ -304,6 +268,9 @@ class Heap : public AllStatic {
static Address always_allocate_scope_depth_address() {
return reinterpret_cast<Address>(&always_allocate_scope_depth_);
}
+ static bool linear_allocation() {
+ return linear_allocation_scope_depth_ != 0;
+ }
static Address* NewSpaceAllocationTopAddress() {
return new_space_.allocation_top_address();
@@ -413,11 +380,11 @@ class Heap : public AllStatic {
// Please note this function does not perform a garbage collection.
static inline Object* AllocateSymbol(Vector<const char> str,
int chars,
- uint32_t length_field);
+ uint32_t hash_field);
static Object* AllocateInternalSymbol(unibrow::CharacterStream* buffer,
int chars,
- uint32_t length_field);
+ uint32_t hash_field);
static Object* AllocateExternalSymbol(Vector<const char> str,
int chars);
@@ -579,16 +546,6 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection.
static Object* AllocateConsString(String* first, String* second);
- // Allocates a new sliced string object which is a slice of an underlying
- // string buffer stretching from the index start (inclusive) to the index
- // end (exclusive).
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- static Object* AllocateSlicedString(String* buffer,
- int start,
- int end);
-
// Allocates a new sub string object which is a substring of an underlying
// string buffer stretching from the index start (inclusive) to the index
// end (exclusive).
@@ -645,6 +602,7 @@ class Heap : public AllStatic {
}
static Object* LookupSymbol(String* str);
static bool LookupSymbolIfExists(String* str, String** symbol);
+ static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
// Compute the matching symbol map for a string if possible.
// NULL is returned if string is in new space or not flattened.
@@ -722,9 +680,9 @@ class Heap : public AllStatic {
static String* hidden_symbol() { return hidden_symbol_; }
// Iterates over all roots in the heap.
- static void IterateRoots(ObjectVisitor* v);
+ static void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
- static void IterateStrongRoots(ObjectVisitor* v);
+ static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
// Iterates remembered set of an old space.
static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
@@ -749,7 +707,7 @@ class Heap : public AllStatic {
static bool Contains(HeapObject* value);
// Checks whether an address/object in a space.
- // Currently used by tests and heap verification only.
+ // Currently used by tests, serialization and heap verification only.
static bool InSpace(Address addr, AllocationSpace space);
static bool InSpace(HeapObject* value, AllocationSpace space);
@@ -908,6 +866,8 @@ class Heap : public AllStatic {
static RootListIndex RootIndexForExternalArrayType(
ExternalArrayType array_type);
+ static void RecordStats(HeapStats* stats);
+
private:
static int reserved_semispace_size_;
static int max_semispace_size_;
@@ -920,9 +880,13 @@ class Heap : public AllStatic {
static int survived_since_last_expansion_;
static int always_allocate_scope_depth_;
+ static int linear_allocation_scope_depth_;
static bool context_disposed_pending_;
- static const int kMaxMapSpaceSize = 8*MB;
+ // The number of MapSpace pages is limited by the way we pack
+ // Map pointers during GC.
+ static const int kMaxMapSpaceSize =
+ (1 << MapWord::kMapPageIndexBits) * Page::kPageSize;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB;
@@ -1135,6 +1099,32 @@ class Heap : public AllStatic {
friend class Factory;
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
+ friend class LinearAllocationScope;
+};
+
+
+class HeapStats {
+ public:
+ int *start_marker;
+ int *new_space_size;
+ int *new_space_capacity;
+ int *old_pointer_space_size;
+ int *old_pointer_space_capacity;
+ int *old_data_space_size;
+ int *old_data_space_capacity;
+ int *code_space_size;
+ int *code_space_capacity;
+ int *map_space_size;
+ int *map_space_capacity;
+ int *cell_space_size;
+ int *cell_space_capacity;
+ int *lo_space_size;
+ int *global_handle_count;
+ int *weak_global_handle_count;
+ int *pending_global_handle_count;
+ int *near_death_global_handle_count;
+ int *destroyed_global_handle_count;
+ int *end_marker;
};
@@ -1156,6 +1146,19 @@ class AlwaysAllocateScope {
};
+class LinearAllocationScope {
+ public:
+ LinearAllocationScope() {
+ Heap::linear_allocation_scope_depth_++;
+ }
+
+ ~LinearAllocationScope() {
+ Heap::linear_allocation_scope_depth_--;
+ ASSERT(Heap::linear_allocation_scope_depth_ >= 0);
+ }
+};
+
+
#ifdef DEBUG
// Visitor class to verify interior pointers that do not have remembered set
// bits. All heap object pointers have to point into the heap to a location
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 5fa75ec8..69f2a8da 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -89,7 +89,7 @@ Object* RelocInfo::target_object() {
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(pc_);
}
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 698377a0..d6f55508 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -49,6 +49,7 @@ namespace internal {
// Safe default is no features.
uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::enabled_ = 0;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
// The Probe method needs executable memory, so it uses Heap::CreateCode.
@@ -56,7 +57,10 @@ uint64_t CpuFeatures::enabled_ = 0;
void CpuFeatures::Probe() {
ASSERT(Heap::HasBeenSetup());
ASSERT(supported_ == 0);
- if (Serializer::enabled()) return; // No features if we might serialize.
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
Assembler assm(NULL, 0);
Label cpuid, done;
@@ -124,6 +128,10 @@ void CpuFeatures::Probe() {
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
+ found_by_runtime_probing_ = supported_;
+ uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
+ supported_ |= os_guarantees;
+ found_by_runtime_probing_ &= ~os_guarantees;
}
@@ -360,7 +368,7 @@ void Assembler::Align(int m) {
void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+ ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@@ -712,7 +720,7 @@ void Assembler::movzx_w(Register dst, const Operand& src) {
void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
@@ -723,7 +731,7 @@ void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
@@ -734,7 +742,7 @@ void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: 0f 40 + cc /r
@@ -1083,7 +1091,7 @@ void Assembler::sar(Register dst, uint8_t imm8) {
}
-void Assembler::sar(Register dst) {
+void Assembler::sar_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD3);
@@ -1123,7 +1131,7 @@ void Assembler::shl(Register dst, uint8_t imm8) {
}
-void Assembler::shl(Register dst) {
+void Assembler::shl_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD3);
@@ -1144,24 +1152,21 @@ void Assembler::shr(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
- EMIT(0xC1);
- EMIT(0xE8 | dst.code());
- EMIT(imm8);
-}
-
-
-void Assembler::shr(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD3);
- EMIT(0xE8 | dst.code());
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xE8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xE8 | dst.code());
+ EMIT(imm8);
+ }
}
void Assembler::shr_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- EMIT(0xD1);
+ EMIT(0xD3);
EMIT(0xE8 | dst.code());
}
@@ -1316,7 +1321,7 @@ void Assembler::nop() {
void Assembler::rdtsc() {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::RDTSC));
+ ASSERT(CpuFeatures::IsEnabled(RDTSC));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@@ -1662,7 +1667,7 @@ void Assembler::fistp_s(const Operand& adr) {
void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDB);
@@ -1923,7 +1928,7 @@ void Assembler::setcc(Condition cc, Register reg) {
void Assembler::cvttss2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@@ -1934,7 +1939,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
void Assembler::cvttsd2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -1945,7 +1950,7 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -1956,7 +1961,7 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -1967,7 +1972,7 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -1978,7 +1983,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -1989,7 +1994,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2000,7 +2005,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2025,7 +2030,7 @@ void Assembler::movdbl(const Operand& dst, XMMRegister src) {
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
@@ -2036,7 +2041,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) {
void Assembler::movsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
@@ -2245,10 +2250,15 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode reloc_info) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(rmode != RelocInfo::NONE);
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !Serializer::enabled() &&
- !FLAG_debug_code) {
- return;
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !FLAG_debug_code) {
+ return;
+ }
}
RelocInfo rinfo(pc_, rmode, data);
reloc_info_writer.Write(&rinfo);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 4d9f08b0..662ebc90 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -37,6 +37,8 @@
#ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_IA32_ASSEMBLER_IA32_H_
+#include "serialize.h"
+
namespace v8 {
namespace internal {
@@ -358,15 +360,11 @@ class Displacement BASE_EMBEDDED {
// }
class CpuFeatures : public AllStatic {
public:
- // Feature flags bit positions. They are mostly based on the CPUID spec.
- // (We assign CPUID itself to one of the currently reserved bits --
- // feel free to change this if needed.)
- enum Feature { SSE3 = 32, SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 };
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
- static bool IsSupported(Feature f) {
+ static bool IsSupported(CpuFeature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -374,29 +372,32 @@ class CpuFeatures : public AllStatic {
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
- static bool IsEnabled(Feature f) {
+ static bool IsEnabled(CpuFeature f) {
return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit Scope(Feature f) {
+ explicit Scope(CpuFeature f) {
+ uint64_t mask = static_cast<uint64_t>(1) << f;
ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= (static_cast<uint64_t>(1) << f);
+ CpuFeatures::enabled_ |= mask;
}
~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
uint64_t old_enabled_;
#else
public:
- explicit Scope(Feature f) {}
+ explicit Scope(CpuFeature f) {}
#endif
};
private:
static uint64_t supported_;
static uint64_t enabled_;
+ static uint64_t found_by_runtime_probing_;
};
@@ -439,6 +440,23 @@ class Assembler : public Malloced {
inline static Address target_address_at(Address pc);
inline static void set_target_address_at(Address pc, Address target);
+ // This sets the branch destination (which is in the instruction on x86).
+ // This is for calls and branches within generated code.
+ inline static void set_target_at(Address instruction_payload,
+ Address target) {
+ set_target_address_at(instruction_payload, target);
+ }
+
+ // This sets the branch destination (which is in the instruction on x86).
+ // This is for calls and branches to runtime code.
+ inline static void set_external_target_at(Address instruction_payload,
+ Address target) {
+ set_target_address_at(instruction_payload, target);
+ }
+
+ static const int kCallTargetSize = kPointerSize;
+ static const int kExternalTargetSize = kPointerSize;
+
// Distance between the address of the code target in the call instruction
// and the return address
static const int kCallTargetAddressOffset = kPointerSize;
@@ -446,6 +464,8 @@ class Assembler : public Malloced {
// to jump to.
static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
+ static const int kCallInstructionLength = 5;
+ static const int kJSReturnSequenceLength = 6;
// ---------------------------------------------------------------------------
// Code generation
@@ -579,19 +599,18 @@ class Assembler : public Malloced {
void rcl(Register dst, uint8_t imm8);
void sar(Register dst, uint8_t imm8);
- void sar(Register dst);
+ void sar_cl(Register dst);
void sbb(Register dst, const Operand& src);
void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8);
- void shl(Register dst);
+ void shl_cl(Register dst);
void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8);
- void shr(Register dst);
void shr_cl(Register dst);
void subb(const Operand& dst, int8_t imm8);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index ad44026c..a164cfa8 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -462,6 +462,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalIndex));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, kGlobalIndex));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -520,48 +522,31 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(Operand(ebp, 2 * kPointerSize)); // push arguments
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
- if (FLAG_check_stack) {
- // We need to catch preemptions right here, otherwise an unlucky preemption
- // could show up as a failed apply.
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- Label retry_preemption;
- Label no_preemption;
- __ bind(&retry_preemption);
- __ mov(edi, Operand::StaticVariable(stack_guard_limit));
- __ cmp(esp, Operand(edi));
- __ j(above, &no_preemption, taken);
-
- // Preemption!
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack.
- __ push(eax);
- __ push(Immediate(Smi::FromInt(0)));
-
- // Do call to runtime routine.
- __ CallRuntime(Runtime::kStackGuard, 1);
- __ pop(eax);
- __ jmp(&retry_preemption);
-
- __ bind(&no_preemption);
-
- Label okay;
- // Make ecx the space we have left.
- __ mov(ecx, Operand(esp));
- __ sub(ecx, Operand(edi));
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, Operand(eax));
- __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
- __ cmp(ecx, Operand(edx));
- __ j(greater, &okay, taken);
-
- // Too bad: Out of stack space.
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(eax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- }
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit();
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, Operand(esp));
+ __ sub(ecx, Operand(edi));
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, Operand(eax));
+ __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, Operand(edx));
+ __ j(greater, &okay, taken); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(ebp, 4 * kPointerSize)); // push this
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
// Push current index and limit.
const int kLimitOffset =
@@ -606,6 +591,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
@@ -894,7 +881,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// be preserved.
static void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
- Label *call_generic_code) {
+ Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
// Push the constructor and argc. No need to tag argc as a smi, as there will
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index a339e90c..7c8ff31f 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
#include "parser.h"
@@ -75,7 +76,6 @@ void DeferredCode::RestoreRegisters() {
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- typeof_state_(NOT_INSIDE_TYPEOF),
destination_(NULL),
previous_(NULL) {
owner_->set_state(this);
@@ -83,10 +83,8 @@ CodeGenState::CodeGenState(CodeGenerator* owner)
CodeGenState::CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
ControlDestination* destination)
: owner_(owner),
- typeof_state_(typeof_state),
destination_(destination),
previous_(owner->state()) {
owner_->set_state(this);
@@ -415,13 +413,12 @@ Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
// partially compiled) into control flow to the control destination.
// If force_control is true, control flow is forced.
void CodeGenerator::LoadCondition(Expression* x,
- TypeofState typeof_state,
ControlDestination* dest,
bool force_control) {
ASSERT(!in_spilled_code());
int original_height = frame_->height();
- { CodeGenState new_state(this, typeof_state, dest);
+ { CodeGenState new_state(this, dest);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@@ -450,17 +447,16 @@ void CodeGenerator::LoadCondition(Expression* x,
}
-void CodeGenerator::LoadAndSpill(Expression* expression,
- TypeofState typeof_state) {
+void CodeGenerator::LoadAndSpill(Expression* expression) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
- Load(expression, typeof_state);
+ Load(expression);
frame_->SpillAll();
set_in_spilled_code(true);
}
-void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
@@ -468,7 +464,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
JumpTarget true_target;
JumpTarget false_target;
ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(x, typeof_state, &dest, false);
+ LoadCondition(expr, &dest, false);
if (dest.false_was_fall_through()) {
// The false target was just bound.
@@ -543,23 +539,25 @@ void CodeGenerator::LoadGlobalReceiver() {
}
-// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
-// variables w/o reference errors elsewhere.
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
- Variable* variable = x->AsVariableProxy()->AsVariable();
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // NOTE: This is somewhat nasty. We force the compiler to load
- // the variable as if through '<global>.<variable>' to make sure we
- // do not get reference errors.
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
Literal key(variable->name());
- // TODO(1241834): Fetch the position from the variable instead of using
- // no position.
Property property(&global, &key, RelocInfo::kNoPosition);
- Load(&property);
+ Reference ref(this, &property);
+ ref.GetValue();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
} else {
- Load(x, INSIDE_TYPEOF);
+ // Anything else can be handled normally.
+ Load(expr);
}
}
@@ -1190,12 +1188,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// Perform the operation.
switch (op) {
case Token::SAR:
- __ sar(answer.reg());
+ __ sar_cl(answer.reg());
// No checks of result necessary
break;
case Token::SHR: {
Label result_ok;
- __ shr(answer.reg());
+ __ shr_cl(answer.reg());
// Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set:
// * 0x80000000: high bit would be lost when smi tagging.
@@ -1216,7 +1214,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
}
case Token::SHL: {
Label result_ok;
- __ shl(answer.reg());
+ __ shl_cl(answer.reg());
// Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000);
__ j(positive, &result_ok);
@@ -1970,27 +1968,6 @@ void CodeGenerator::Comparison(Condition cc,
}
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#ifdef DEBUG
- void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
// Call the function just below TOS on the stack with the given
// arguments. The receiver is the TOS.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
@@ -2027,7 +2004,7 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Load the apply function onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Reference ref(this, apply);
- ref.GetValue(NOT_INSIDE_TYPEOF);
+ ref.GetValue();
ASSERT(ref.type() == Reference::NAMED);
// Load the receiver and the existing arguments object onto the
@@ -2203,14 +2180,12 @@ void DeferredStackCheck::Generate() {
void CodeGenerator::CheckStack() {
- if (FLAG_check_stack) {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
- deferred->Branch(below);
- deferred->BindExit();
- }
+ DeferredStackCheck* deferred = new DeferredStackCheck;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ deferred->Branch(below);
+ deferred->BindExit();
}
@@ -2368,7 +2343,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
JumpTarget then;
JumpTarget else_;
ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The else target was bound, so we compile the else part first.
@@ -2395,7 +2370,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_else_stm);
JumpTarget then;
ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The exit label was bound. We may have dangling jumps to the
@@ -2415,7 +2390,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_then_stm);
JumpTarget else_;
ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.true_was_fall_through()) {
// The exit label was bound. We may have dangling jumps to the
@@ -2437,7 +2412,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
// or control flow effect). LoadCondition is called without
// forcing control flow.
ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->condition(), &dest, false);
if (!dest.is_used()) {
// We got a value on the frame rather than (or in addition to)
// control flow.
@@ -2474,6 +2449,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
CodeForStatementPosition(node);
Load(node->expression());
Result return_value = frame_->Pop();
+ masm()->WriteRecordedPositions();
if (function_return_is_shadowed_) {
function_return_.Jump(&return_value);
} else {
@@ -2514,7 +2490,7 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is
// expected by the debugger.
- ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
@@ -2737,8 +2713,10 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
node->continue_target()->Bind();
}
if (has_valid_frame()) {
+ Comment cmnt(masm_, "[ DoWhileCondition");
+ CodeForDoWhileConditionPosition(node);
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@@ -2793,7 +2771,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
if (dest.false_was_fall_through()) {
// If we got the break target as fall-through, the test may have
@@ -2840,7 +2818,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
// The break target is the fall-through (body is a backward
// jump from here and thus an invalid fall-through).
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
} else {
// If we have chosen not to recompile the test at the bottom,
@@ -2931,7 +2909,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
if (dest.false_was_fall_through()) {
// If we got the break target as fall-through, the test may have
@@ -3001,7 +2979,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// The break target is the fall-through (body is a backward
// jump from here).
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
} else {
// Otherwise, jump back to the test at the top.
@@ -3078,13 +3056,59 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
// eax: value to be iterated over
- frame_->EmitPush(eax); // push the object being iterated over (slot 4)
+ frame_->EmitPush(eax); // Push the object being iterated over.
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ JumpTarget call_runtime;
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ JumpTarget check_prototype;
+ JumpTarget use_cache;
+ __ mov(ecx, eax);
+ loop.Bind();
+ // Check that there are no elements.
+ __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
+ __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
+ call_runtime.Branch(not_equal);
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in ebx for the subsequent
+ // prototype load.
+ __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
+ __ cmp(Operand(edx), Immediate(Factory::empty_descriptor_array()));
+ call_runtime.Branch(equal);
+ // Check that there in an enum cache in the non-empty instance
+ // descriptors. This is the case if the next enumeration index
+ // field does not contain a smi.
+ __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
+ __ test(edx, Immediate(kSmiTagMask));
+ call_runtime.Branch(zero);
+ // For all objects but the receiver, check that the cache is empty.
+ __ cmp(ecx, Operand(eax));
+ check_prototype.Branch(equal);
+ __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
+ call_runtime.Branch(not_equal);
+ check_prototype.Bind();
+ // Load the prototype from the map and loop if non-null.
+ __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+ __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+ loop.Branch(not_equal);
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ use_cache.Jump();
+
+ call_runtime.Bind();
+ // Call the runtime to get the property names for the object.
frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
// eax: map or fixed array (result from call to
// Runtime::kGetPropertyNamesFast)
__ mov(edx, Operand(eax));
@@ -3092,9 +3116,13 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ cmp(ecx, Factory::meta_map());
fixed_array.Branch(not_equal);
+ use_cache.Bind();
// Get enum cache
- // eax: map (result from call to Runtime::kGetPropertyNamesFast)
+ // eax: map (either the result from a call to
+ // Runtime::kGetPropertyNamesFast or has been fetched directly from
+ // the object)
__ mov(ecx, Operand(eax));
+
__ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
// Get the bridge array held in the enumeration index field.
__ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
@@ -3576,7 +3604,8 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(node, script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
InstantiateBoilerplate(boilerplate);
@@ -3596,25 +3625,25 @@ void CodeGenerator::VisitConditional(Conditional* node) {
JumpTarget else_;
JumpTarget exit;
ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The else target was bound, so we compile the else part first.
- Load(node->else_expression(), typeof_state());
+ Load(node->else_expression());
if (then.is_linked()) {
exit.Jump();
then.Bind();
- Load(node->then_expression(), typeof_state());
+ Load(node->then_expression());
}
} else {
// The then target was bound, so we compile the then part first.
- Load(node->then_expression(), typeof_state());
+ Load(node->then_expression());
if (else_.is_linked()) {
exit.Jump();
else_.Bind();
- Load(node->else_expression(), typeof_state());
+ Load(node->else_expression());
}
}
@@ -3936,7 +3965,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, typeof_state());
+ LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
}
@@ -3949,7 +3978,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
} else {
ASSERT(var->is_global());
Reference ref(this, node);
- ref.GetValue(typeof_state());
+ ref.GetValue();
}
}
@@ -3960,12 +3989,28 @@ void CodeGenerator::VisitLiteral(Literal* node) {
}
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
+ ASSERT(value->IsSmi());
+ int bits = reinterpret_cast<int>(*value);
+ __ push(Immediate(bits & 0x0000FFFF));
+ __ or_(Operand(esp, 0), Immediate(bits & 0xFFFF0000));
+}
+
+
+void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) {
+ ASSERT(value->IsSmi());
+ int bits = reinterpret_cast<int>(*value);
+ __ mov(Operand(ebp, offset), Immediate(bits & 0x0000FFFF));
+ __ or_(Operand(ebp, offset), Immediate(bits & 0xFFFF0000));
+}
+
+
+void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) {
ASSERT(target.is_valid());
ASSERT(value->IsSmi());
int bits = reinterpret_cast<int>(*value);
__ Set(target, Immediate(bits & 0x0000FFFF));
- __ xor_(target, bits & 0xFFFF0000);
+ __ or_(target, bits & 0xFFFF0000);
}
@@ -4356,9 +4401,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
// the target, with an implicit promise that it will be written to again
// before it is read.
if (literal != NULL || (right_var != NULL && right_var != var)) {
- target.TakeValue(NOT_INSIDE_TYPEOF);
+ target.TakeValue();
} else {
- target.GetValue(NOT_INSIDE_TYPEOF);
+ target.GetValue();
}
Load(node->value());
GenericBinaryOperation(node->binary_op(),
@@ -4406,7 +4451,7 @@ void CodeGenerator::VisitThrow(Throw* node) {
void CodeGenerator::VisitProperty(Property* node) {
Comment cmnt(masm_, "[ Property");
Reference property(this, node);
- property.GetValue(typeof_state());
+ property.GetValue();
}
@@ -4591,7 +4636,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Load the function to call from the property through a reference.
Reference ref(this, property);
- ref.GetValue(NOT_INSIDE_TYPEOF);
+ ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
@@ -4701,10 +4746,10 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
// This generates code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It can handle flat and sliced strings, 8 and 16 bit characters and
-// cons strings where the answer is found in the left hand branch of the
-// cons. The slow case will flatten the string, which will ensure that
-// the answer is in the left hand side the next time around.
+// It can handle flat, 8 and 16 bit characters and cons strings where the
+// answer is found in the left hand branch of the cons. The slow case will
+// flatten the string, which will ensure that the answer is in the left hand
+// side the next time around.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateFastCharCodeAt");
ASSERT(args->length() == 2);
@@ -4712,7 +4757,6 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Label slow_case;
Label end;
Label not_a_flat_string;
- Label a_cons_string;
Label try_again_with_new_string;
Label ascii_string;
Label got_char_code;
@@ -4783,18 +4827,8 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ test(ecx, Immediate(kIsNotStringMask));
__ j(not_zero, &slow_case);
- // Here we make assumptions about the tag values and the shifts needed.
- // See the comment in objects.h.
- ASSERT(kLongStringTag == 0);
- ASSERT(kMediumStringTag + String::kLongLengthShift ==
- String::kMediumLengthShift);
- ASSERT(kShortStringTag + String::kLongLengthShift ==
- String::kShortLengthShift);
- __ and_(ecx, kStringSizeMask);
- __ add(Operand(ecx), Immediate(String::kLongLengthShift));
// Fetch the length field into the temporary register.
__ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
- __ shr(temp.reg()); // The shift amount in ecx is implicit operand.
// Check for index out of range.
__ cmp(index.reg(), Operand(temp.reg()));
__ j(greater_equal, &slow_case);
@@ -4834,21 +4868,16 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ bind(&not_a_flat_string);
__ and_(temp.reg(), kStringRepresentationMask);
__ cmp(temp.reg(), kConsStringTag);
- __ j(equal, &a_cons_string);
- __ cmp(temp.reg(), kSlicedStringTag);
__ j(not_equal, &slow_case);
- // SlicedString.
- // Add the offset to the index and trigger the slow case on overflow.
- __ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
- __ j(overflow, &slow_case);
- // Getting the underlying string is done by running the cons string code.
-
// ConsString.
- __ bind(&a_cons_string);
- // Get the first of the two strings. Both sliced and cons strings
- // store their source string at the same offset.
- ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+ // Check that the right hand side is the empty string (ie if this is really a
+ // flat string in a cons string). If that is not the case we would rather go
+ // to the runtime system now, to flatten the string.
+ __ mov(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
+ __ cmp(Operand(temp.reg()), Immediate(Handle<String>(Heap::empty_string())));
+ __ j(not_equal, &slow_case);
+ // Get the first of the two strings.
__ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
__ jmp(&try_again_with_new_string);
@@ -4881,6 +4910,55 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+
+ __ test(obj.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ __ cmp(obj.reg(), Factory::null_value());
+ destination()->true_target()->Branch(equal);
+
+ Result map = allocator()->Allocate();
+ ASSERT(map.is_valid());
+ __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
+ __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
+ __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
+ destination()->false_target()->Branch(less);
+ __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
+ obj.Unuse();
+ map.Unuse();
+ destination()->Split(less_equal);
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ __ test(obj.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, temp.reg());
+ obj.Unuse();
+ temp.Unuse();
+ destination()->Split(equal);
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -5184,6 +5262,18 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@@ -5226,9 +5316,6 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- // Note that because of NOT and an optimization in comparison of a typeof
- // expression to a literal string, this function can fail to leave a value
- // on top of the frame or in the cc register.
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
@@ -5237,7 +5324,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
// Swap the true and false targets but keep the same actual label
// as the fall through.
destination()->Invert();
- LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+ LoadCondition(node->expression(), destination(), true);
// Swap the labels back.
destination()->Invert();
@@ -5487,7 +5574,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
if (!is_postfix) frame_->Push(Smi::FromInt(0));
return;
}
- target.TakeValue(NOT_INSIDE_TYPEOF);
+ target.TakeValue();
Result new_value = frame_->Pop();
new_value.ToRegister();
@@ -5565,9 +5652,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- // Note that due to an optimization in comparison operations (typeof
- // compared to a string literal), we can evaluate a binary expression such
- // as AND or OR and not leave a value on the frame or in the cc register.
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
@@ -5583,7 +5667,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
if (op == Token::AND) {
JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->left(), &dest, false);
if (dest.false_was_fall_through()) {
// The current false target was used as the fall-through. If
@@ -5602,7 +5686,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
is_true.Bind();
// The left subexpression compiled to control flow, so the
// right one is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have actually just jumped to or bound the current false
// target but the current control destination is not marked as
@@ -5613,7 +5697,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (dest.is_used()) {
// The left subexpression compiled to control flow (and is_true
// was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have a materialized value on the frame, so we exit with
@@ -5646,7 +5730,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (op == Token::OR) {
JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->left(), &dest, false);
if (dest.true_was_fall_through()) {
// The current true target was used as the fall-through. If
@@ -5665,7 +5749,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
is_false.Bind();
// The left subexpression compiled to control flow, so the
// right one is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have just jumped to or bound the current true target but
// the current control destination is not marked as used.
@@ -5675,7 +5759,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (dest.is_used()) {
// The left subexpression compiled to control flow (and is_false
// was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have a materialized value on the frame, so we exit with
@@ -5807,6 +5891,9 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->false_target()->Branch(zero);
frame_->Spill(answer.reg());
__ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+ destination()->true_target()->Branch(equal);
+ // Regular expressions are callable so typeof == 'function'.
+ __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
answer.Unuse();
destination()->Split(equal);
@@ -5816,10 +5903,13 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
__ cmp(answer.reg(), Factory::null_value());
destination()->true_target()->Branch(equal);
- // It can be an undetectable object.
Result map = allocator()->Allocate();
ASSERT(map.is_valid());
- __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ // Regular expressions are typeof == 'function', not 'object'.
+ __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
+ destination()->false_target()->Branch(equal);
+
+ // It can be an undetectable object.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
__ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
destination()->false_target()->Branch(not_zero);
@@ -6068,7 +6158,7 @@ Handle<String> Reference::GetName() {
}
-void Reference::GetValue(TypeofState typeof_state) {
+void Reference::GetValue() {
ASSERT(!cgen_->in_spilled_code());
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
@@ -6085,17 +6175,11 @@ void Reference::GetValue(TypeofState typeof_state) {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
+ cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
- // TODO(1241834): Make sure that it is safe to ignore the
- // distinction between expressions in a typeof and not in a
- // typeof. If there is a chance that reference errors can be
- // thrown below, we must distinguish between the two kinds of
- // loads (typeof expression loads must not throw a reference
- // error).
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
@@ -6165,8 +6249,6 @@ void Reference::GetValue(TypeofState typeof_state) {
}
case KEYED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof.
Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
@@ -6285,13 +6367,13 @@ void Reference::GetValue(TypeofState typeof_state) {
}
-void Reference::TakeValue(TypeofState typeof_state) {
+void Reference::TakeValue() {
// For non-constant frame-allocated slots, we invalidate the value in the
// slot. For all others, we fall back on GetValue.
ASSERT(!cgen_->in_spilled_code());
ASSERT(!is_illegal());
if (type_ != SLOT) {
- GetValue(typeof_state);
+ GetValue();
return;
}
@@ -6301,7 +6383,7 @@ void Reference::TakeValue(TypeofState typeof_state) {
slot->type() == Slot::CONTEXT ||
slot->var()->mode() == Variable::CONST ||
slot->is_arguments()) {
- GetValue(typeof_state);
+ GetValue();
return;
}
@@ -6472,11 +6554,8 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
// String value => false iff empty.
__ cmp(ecx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string);
- __ and_(ecx, kStringSizeMask);
- __ cmp(ecx, kShortStringTag);
- __ j(not_equal, &true_result); // Empty string is always short.
__ mov(edx, FieldOperand(eax, String::kLengthOffset));
- __ shr(edx, String::kShortLengthShift);
+ __ test(edx, Operand(edx));
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -6510,42 +6589,47 @@ void GenericBinaryOpStub::GenerateCall(
__ push(right);
} else {
// The calling convention with registers is left in edx and right in eax.
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- if (!(left.is(edx) && right.is(eax))) {
- if (left.is(eax) && right.is(edx)) {
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
if (IsOperationCommutative()) {
SetArgsReversed();
} else {
__ xchg(left, right);
}
- } else if (left.is(edx)) {
- __ mov(eax, right);
- } else if (left.is(eax)) {
+ } else if (left.is(left_arg)) {
+ __ mov(right_arg, right);
+ } else if (left.is(right_arg)) {
if (IsOperationCommutative()) {
- __ mov(edx, right);
+ __ mov(left_arg, right);
SetArgsReversed();
} else {
- __ mov(edx, left);
- __ mov(eax, right);
+ // Order of moves important to avoid destroying left argument.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
}
- } else if (right.is(edx)) {
+ } else if (right.is(left_arg)) {
if (IsOperationCommutative()) {
- __ mov(eax, left);
+ __ mov(right_arg, left);
SetArgsReversed();
} else {
- __ mov(eax, right);
- __ mov(edx, left);
+ // Order of moves important to avoid destroying right argument.
+ __ mov(right_arg, right);
+ __ mov(left_arg, left);
}
- } else if (right.is(eax)) {
- __ mov(edx, left);
+ } else if (right.is(right_arg)) {
+ __ mov(left_arg, left);
} else {
- __ mov(edx, left);
- __ mov(eax, right);
+ // Order of moves is not important.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
}
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
@@ -6562,19 +6646,22 @@ void GenericBinaryOpStub::GenerateCall(
__ push(left);
__ push(Immediate(right));
} else {
- // Adapt arguments to the calling convention left in edx and right in eax.
- if (left.is(edx)) {
- __ mov(eax, Immediate(right));
- } else if (left.is(eax) && IsOperationCommutative()) {
- __ mov(edx, Immediate(right));
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (left.is(left_arg)) {
+ __ mov(right_arg, Immediate(right));
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ mov(left_arg, Immediate(right));
SetArgsReversed();
} else {
- __ mov(edx, left);
- __ mov(eax, Immediate(right));
+ __ mov(left_arg, left);
+ __ mov(right_arg, Immediate(right));
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
@@ -6591,18 +6678,21 @@ void GenericBinaryOpStub::GenerateCall(
__ push(Immediate(left));
__ push(right);
} else {
- // Adapt arguments to the calling convention left in edx and right in eax.
- bool is_commutative = (op_ == (Token::ADD) || (op_ == Token::MUL));
- if (right.is(eax)) {
- __ mov(edx, Immediate(left));
- } else if (right.is(edx) && is_commutative) {
- __ mov(eax, Immediate(left));
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (right.is(right_arg)) {
+ __ mov(left_arg, Immediate(left));
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ mov(right_arg, Immediate(left));
+ SetArgsReversed();
} else {
- __ mov(edx, Immediate(left));
- __ mov(eax, right);
+ __ mov(left_arg, Immediate(left));
+ __ mov(right_arg, right);
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
@@ -6719,11 +6809,11 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform the operation.
switch (op_) {
case Token::SAR:
- __ sar(eax);
+ __ sar_cl(eax);
// No checks of result necessary
break;
case Token::SHR:
- __ shr(eax);
+ __ shr_cl(eax);
// Check that the *unsigned* result fits in a smi.
// Neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging.
@@ -6734,7 +6824,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ j(not_zero, slow, not_taken);
break;
case Token::SHL:
- __ shl(eax);
+ __ shl_cl(eax);
// Check that the *signed* result fits in a smi.
__ cmp(eax, 0xc0000000);
__ j(sign, slow, not_taken);
@@ -6784,8 +6874,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// eax: y
// edx: x
- if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
- CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
switch (op_) {
@@ -6880,7 +6970,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
if (use_sse3_) {
// Truncate the operands to 32-bit integers and check for
// exceptions in doing so.
- CpuFeatures::Scope scope(CpuFeatures::SSE3);
+ CpuFeatures::Scope scope(SSE3);
__ fisttp_s(Operand(esp, 0 * kPointerSize));
__ fisttp_s(Operand(esp, 1 * kPointerSize));
__ fnstsw_ax();
@@ -6909,9 +6999,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar(eax); break;
- case Token::SHL: __ shl(eax); break;
- case Token::SHR: __ shr(eax); break;
+ case Token::SAR: __ sar_cl(eax); break;
+ case Token::SHL: __ shl_cl(eax); break;
+ case Token::SHR: __ shr_cl(eax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
@@ -6926,7 +7016,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Tag smi result and return.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, eax, times_1, kSmiTag));
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
if (op_ != Token::SHR) {
@@ -6953,7 +7043,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
__ fild_s(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
}
// Clear the FPU exception flag and reset the stack before calling
@@ -6985,7 +7075,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// If all else fails, use the runtime system to get the correct
// result. If arguments was passed in registers now place them on the
- // stack in the correct order.
+ // stack in the correct order below the return address.
__ bind(&call_runtime);
if (HasArgumentsInRegisters()) {
__ pop(ecx);
@@ -7001,7 +7091,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
switch (op_) {
case Token::ADD: {
// Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1;
+ Label not_strings, not_string1, string1;
Result answer;
__ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
@@ -7016,8 +7106,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, &string1);
- // First and second argument are strings.
- __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+ // First and second argument are strings. Jump to the string add stub.
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&stub);
// Only first argument is a string.
__ bind(&string1);
@@ -7400,20 +7491,19 @@ void CompareStub::Generate(MacroAssembler* masm) {
// not NaN.
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
// Read top bits of double representation (second word of value).
- __ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ not_(eax);
- __ test(eax, Immediate(0x7ff00000));
- __ j(not_zero, &return_equal);
- __ not_(eax);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ shl(eax, 12);
- // Or with all low-bits of mantissa.
- __ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset));
- // Return zero equal if all bits in mantissa is zero (it's an Infinity)
- // and non-zero if not (it's a NaN).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(eax, Operand(eax));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost bits.
+ __ add(edx, Operand(edx));
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ __ setcc(above_equal, eax);
__ ret(0);
__ bind(&not_identical);
@@ -7508,9 +7598,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call builtin if operands are not floating point or smi.
Label check_for_symbols;
Label unordered;
- if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
- CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
- CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatures::Scope use_cmov(CMOV);
FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
__ comisd(xmm0, xmm1);
@@ -7699,11 +7789,84 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
}
+// If true, a Handle<T> passed by value is passed and returned by
+// using the location_ field directly. If false, it is passed and
+// returned as a pointer to a handle.
+#ifdef USING_MAC_ABI
+static const bool kPassHandlesDirectly = true;
+#else
+static const bool kPassHandlesDirectly = false;
+#endif
+
+
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ Label get_result;
+ Label prologue;
+ Label promote_scheduled_exception;
+ __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
+ ASSERT_EQ(kArgc, 4);
+ if (kPassHandlesDirectly) {
+ // When handles as passed directly we don't have to allocate extra
+ // space for and pass an out parameter.
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
+ } else {
+ // The function expects three arguments to be passed but we allocate
+ // four to get space for the output cell. The argument slots are filled
+ // as follows:
+ //
+ // 3: output cell
+ // 2: arguments pointer
+ // 1: name
+ // 0: pointer to the output cell
+ //
+ // Note that this is one more "argument" than the function expects
+ // so the out cell will have to be popped explicitly after returning
+ // from the function.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
+ __ mov(ebx, esp);
+ __ add(Operand(ebx), Immediate(3 * kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // output
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
+ }
+ // Call the api function!
+ __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address();
+ __ cmp(Operand::StaticVariable(scheduled_exception_address),
+ Immediate(Factory::the_hole_value()));
+ __ j(not_equal, &promote_scheduled_exception, not_taken);
+ if (!kPassHandlesDirectly) {
+ // The returned value is a pointer to the handle holding the result.
+ // Dereference this to get to the location.
+ __ mov(eax, Operand(eax, 0));
+ }
+ // Check if the result handle holds 0
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &get_result, taken);
+ // It was zero; the result is undefined.
+ __ mov(eax, Factory::undefined_value());
+ __ jmp(&prologue);
+ // It was non-zero. Dereference to get the result value.
+ __ bind(&get_result);
+ __ mov(eax, Operand(eax, 0));
+ __ bind(&prologue);
+ __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
+ __ ret(0);
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException),
+ 0,
+ 1);
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
@@ -7753,7 +7916,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned, not_taken);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(frame_type);
+ __ LeaveExitFrame(mode);
__ ret(0);
// Handling of failure.
@@ -7852,12 +8015,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// of a proper result. The builtin entry handles this by performing
// a garbage collection and retrying the builtin (twice).
- StackFrame::Type frame_type = is_debug_break ?
- StackFrame::EXIT_DEBUG :
- StackFrame::EXIT;
+ ExitFrame::Mode mode = is_debug_break
+ ? ExitFrame::MODE_DEBUG
+ : ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(frame_type);
+ __ EnterExitFrame(mode);
// eax: result parameter for PerformGC, if any (setup below)
// ebx: pointer to builtin function (C callee-saved)
@@ -7875,7 +8038,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
false,
false);
@@ -7884,7 +8047,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
false);
@@ -7895,7 +8058,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
true);
@@ -8072,6 +8235,224 @@ int CompareStub::MinorKey() {
return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
}
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime;
+
+ // Load the two arguments.
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (string_check_) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &string_add_runtime);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
+ __ j(above_equal, &string_add_runtime);
+
+ // First argument is a a string, test second.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &string_add_runtime);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
+ __ j(above_equal, &string_add_runtime);
+ }
+
+ // Both arguments are strings.
+ // eax: first string
+ // edx: second string
+ // Check if either of the strings are empty. In that case return the other.
+ Label second_not_zero_length, both_not_zero_length;
+ __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
+ __ test(ecx, Operand(ecx));
+ __ j(not_zero, &second_not_zero_length);
+ // Second string is empty, result is first string which is already in eax.
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&second_not_zero_length);
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+ __ test(ebx, Operand(ebx));
+ __ j(not_zero, &both_not_zero_length);
+ // First string is empty, result is second string which is in edx.
+ __ mov(eax, edx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Both strings are non-empty.
+ // eax: first string
+ // ebx: length of first string
+ // ecx: length of second string
+ // edx: second string
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result;
+ __ bind(&both_not_zero_length);
+ __ add(ebx, Operand(ecx));
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ cmp(ebx, 2);
+ __ j(equal, &string_add_runtime);
+ // Check if resulting string will be flat.
+ __ cmp(ebx, String::kMinNonFlatLength);
+ __ j(below, &string_add_flat_result);
+ // Handle exceptionally long strings in the runtime system.
+ ASSERT((String::kMaxLength & 0x80000000) == 0);
+ __ cmp(ebx, String::kMaxLength);
+ __ j(above, &string_add_runtime);
+
+ // If result is not supposed to be flat allocate a cons string object. If both
+ // strings are ascii the result is an ascii cons string.
+ Label non_ascii, allocated;
+ __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
+ __ and_(ecx, Operand(edi));
+ __ test(ecx, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii);
+ // Allocate an acsii cons string.
+ __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
+ __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+ __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
+ __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+ __ mov(eax, ecx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&non_ascii);
+ // Allocate a two byte cons string.
+ __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are not
+ // external strings.
+ // eax: first string
+ // ebx: length of resulting flat string
+ // edx: second string
+ __ bind(&string_add_flat_result);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kExternalStringTag);
+ __ j(equal, &string_add_runtime);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kExternalStringTag);
+ __ j(equal, &string_add_runtime);
+ // Now check if both strings are ascii strings.
+ // eax: first string
+ // ebx: length of resulting flat string
+ // edx: second string
+ Label non_ascii_string_add_flat_result;
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ ASSERT(kAsciiStringTag != 0);
+ __ test(ecx, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii_string_add_flat_result);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ test(ecx, Immediate(kAsciiStringTag));
+ __ j(zero, &string_add_runtime);
+ // Both strings are ascii strings. As they are short they are both flat.
+ __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ // eax: result string
+ __ mov(ecx, eax);
+ // Locate first character of result.
+ __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Load first argument and locate first character.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: first character of result
+ // edx: first char of first argument
+ // edi: length of first argument
+ GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+ // Load second argument and locate first character.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: next character of result
+ // edx: first char of second argument
+ // edi: length of second argument
+ GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Handle creating a flat two byte result.
+ // eax: first string - known to be two byte
+ // ebx: length of resulting flat string
+ // edx: second string
+ __ bind(&non_ascii_string_add_flat_result);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kAsciiStringTag);
+ __ j(not_zero, &string_add_runtime);
+ // Both strings are two byte strings. As they are short they are both
+ // flat.
+ __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ // eax: result string
+ __ mov(ecx, eax);
+ // Locate first character of result.
+ __ add(Operand(ecx),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load first argument and locate first character.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ add(Operand(edx),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: first character of result
+ // edx: first char of first argument
+ // edi: length of first argument
+ GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+ // Load second argument and locate first character.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: next character of result
+ // edx: first char of second argument
+ // edi: length of second argument
+ GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+}
+
+
+void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ Label loop;
+ __ bind(&loop);
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (ascii) {
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ } else {
+ __ mov_w(scratch, Operand(src, 0));
+ __ mov_w(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(2));
+ __ add(Operand(dest), Immediate(2));
+ }
+ __ sub(Operand(count), Immediate(1));
+ __ j(not_zero, &loop);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index a37bffea..11a5163d 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -77,12 +77,12 @@ class Reference BASE_EMBEDDED {
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
- void GetValue(TypeofState typeof_state);
+ void GetValue();
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
- void TakeValue(TypeofState typeof_state);
+ void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
@@ -241,28 +241,20 @@ class CodeGenState BASE_EMBEDDED {
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
- // state. The new state may or may not be inside a typeof, and has its
- // own control destination.
- CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
- ControlDestination* destination);
+ // state. The new state has its own control destination.
+ CodeGenState(CodeGenerator* owner, ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
// Accessors for the state.
- TypeofState typeof_state() const { return typeof_state_; }
ControlDestination* destination() const { return destination_; }
private:
// The owning code generator.
CodeGenerator* owner_;
- // A flag indicating whether we are compiling the immediate subexpression
- // of a typeof expression.
- TypeofState typeof_state_;
-
// A control destination in case the expression has a control-flow
// effect.
ControlDestination* destination_;
@@ -307,17 +299,12 @@ class CodeGenerator: public AstVisitor {
static bool ShouldGenerateLog(Expression* type);
#endif
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
static void RecordPositions(MacroAssembler* masm, int pos);
// Accessors
MacroAssembler* masm() { return masm_; }
-
VirtualFrame* frame() const { return frame_; }
+ Handle<Script> script() { return script_; }
bool has_valid_frame() const { return frame_ != NULL; }
@@ -352,7 +339,6 @@ class CodeGenerator: public AstVisitor {
void ProcessDeferred();
// State
- TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
@@ -412,18 +398,16 @@ class CodeGenerator: public AstVisitor {
}
void LoadCondition(Expression* x,
- TypeofState typeof_state,
ControlDestination* destination,
bool force_control);
- void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression,
- TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void LoadAndSpill(Expression* expression);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
@@ -484,9 +468,11 @@ class CodeGenerator: public AstVisitor {
// than 16 bits.
static const int kMaxSmiInlinedBits = 16;
bool IsUnsafeSmi(Handle<Object> value);
- // Load an integer constant x into a register target using
+ // Load an integer constant x into a register target or into the stack using
// at most 16 bits of user-controlled data per assembly operation.
- void LoadUnsafeSmi(Register target, Handle<Object> value);
+ void MoveUnsafeSmi(Register target, Handle<Object> value);
+ void StoreUnsafeSmiToLocal(int offset, Handle<Object> value);
+ void PushUnsafeSmi(Handle<Object> value);
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
@@ -511,8 +497,6 @@ class CodeGenerator: public AstVisitor {
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
- static Handle<Code> ComputeLazyCompile(int argc);
- Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@@ -528,6 +512,8 @@ class CodeGenerator: public AstVisitor {
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsFunction(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -560,6 +546,9 @@ class CodeGenerator: public AstVisitor {
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
+ // Fast support for StringAdd.
+ void GenerateStringAdd(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -574,6 +563,7 @@ class CodeGenerator: public AstVisitor {
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* stmt);
+ void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -626,6 +616,27 @@ class CodeGenerator: public AstVisitor {
};
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#ifdef DEBUG
+ void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
+
+
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
@@ -638,7 +649,7 @@ class ToBooleanStub: public CodeStub {
};
-// Flag that indicates whether how to generate code for the stub.
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
@@ -647,15 +658,15 @@ enum GenericBinaryFlags {
class GenericBinaryOpStub: public CodeStub {
public:
- GenericBinaryOpStub(Token::Value operation,
+ GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
- : op_(operation),
+ : op_(op),
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false) {
- use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
+ use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -729,6 +740,37 @@ class GenericBinaryOpStub: public CodeStub {
};
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ void GenerateCopyCharacters(MacroAssembler* masm,
+ Register desc,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 2d20117a..5ebe1e07 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -45,17 +45,17 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
// for the precise return instructions sequence.
void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Debug::kIa32JSReturnSequenceLength >=
- Debug::kIa32CallInstructionLength);
+ ASSERT(Assembler::kJSReturnSequenceLength >=
+ Assembler::kCallInstructionLength);
rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
- Debug::kIa32JSReturnSequenceLength - Debug::kIa32CallInstructionLength);
+ Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
- Debug::kIa32JSReturnSequenceLength);
+ Assembler::kJSReturnSequenceLength);
}
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index adedf348..df5a28a5 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -204,7 +204,7 @@ void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
InstructionDesc* id = &instructions_[bm[i].b];
id->mnem = bm[i].mnem;
id->op_order_ = bm[i].op_order_;
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->type = type;
}
}
@@ -216,7 +216,7 @@ void InstructionTable::SetTableRange(InstructionType type,
const char* mnem) {
for (byte b = start; b <= end; b++) {
InstructionDesc* id = &instructions_[b];
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->mnem = mnem;
id->type = type;
}
@@ -226,7 +226,7 @@ void InstructionTable::SetTableRange(InstructionType type,
void InstructionTable::AddJumpConditionalShort() {
for (byte b = 0x70; b <= 0x7F; b++) {
InstructionDesc* id = &instructions_[b];
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->mnem = jump_conditional_mnem[b & 0x0F];
id->type = JUMP_CONDITIONAL_SHORT_INSTR;
}
@@ -272,6 +272,17 @@ class DisassemblerIA32 {
};
+ enum ShiftOpcodeExtension {
+ kROL = 0,
+ kROR = 1,
+ kRCL = 2,
+ kRCR = 3,
+ kSHL = 4,
+ KSHR = 5,
+ kSAR = 7
+ };
+
+
const char* NameOfCPURegister(int reg) const {
return converter_.NameOfCPURegister(reg);
}
@@ -321,6 +332,8 @@ class DisassemblerIA32 {
int SetCC(byte* data);
int CMov(byte* data);
int FPUInstruction(byte* data);
+ int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
+ int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
void AppendToBuffer(const char* format, ...);
@@ -493,7 +506,7 @@ int DisassemblerIA32::PrintImmediateOp(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerIA32::F7Instruction(byte* data) {
- assert(*data == 0xF7);
+ ASSERT_EQ(0xF7, *data);
byte modrm = *(data+1);
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
@@ -526,7 +539,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
int DisassemblerIA32::D1D3C1Instruction(byte* data) {
byte op = *data;
- assert(op == 0xD1 || op == 0xD3 || op == 0xC1);
+ ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
byte modrm = *(data+1);
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
@@ -534,33 +547,24 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
int num_bytes = 2;
if (mod == 3) {
const char* mnem = NULL;
+ switch (regop) {
+ case kROL: mnem = "rol"; break;
+ case kROR: mnem = "ror"; break;
+ case kRCL: mnem = "rcl"; break;
+ case kSHL: mnem = "shl"; break;
+ case KSHR: mnem = "shr"; break;
+ case kSAR: mnem = "sar"; break;
+ default: UnimplementedInstruction();
+ }
if (op == 0xD1) {
imm8 = 1;
- switch (regop) {
- case edx: mnem = "rcl"; break;
- case edi: mnem = "sar"; break;
- case esp: mnem = "shl"; break;
- default: UnimplementedInstruction();
- }
} else if (op == 0xC1) {
imm8 = *(data+2);
num_bytes = 3;
- switch (regop) {
- case edx: mnem = "rcl"; break;
- case esp: mnem = "shl"; break;
- case ebp: mnem = "shr"; break;
- case edi: mnem = "sar"; break;
- default: UnimplementedInstruction();
- }
} else if (op == 0xD3) {
- switch (regop) {
- case esp: mnem = "shl"; break;
- case ebp: mnem = "shr"; break;
- case edi: mnem = "sar"; break;
- default: UnimplementedInstruction();
- }
+ // Shift/rotate by cl.
}
- assert(mnem != NULL);
+ ASSERT_NE(NULL, mnem);
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
if (imm8 > 0) {
AppendToBuffer("%d", imm8);
@@ -576,7 +580,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerIA32::JumpShort(byte* data) {
- assert(*data == 0xEB);
+ ASSERT_EQ(0xEB, *data);
byte b = *(data+1);
byte* dest = data + static_cast<int8_t>(b) + 2;
AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -586,7 +590,7 @@ int DisassemblerIA32::JumpShort(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data+1) & 0x0F;
byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
const char* mnem = jump_conditional_mnem[cond];
@@ -614,18 +618,18 @@ int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
// Returns number of bytes used, including *data.
int DisassemblerIA32::SetCC(byte* data) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data+1) & 0x0F;
const char* mnem = set_conditional_mnem[cond];
AppendToBuffer("%s ", mnem);
PrintRightByteOperand(data+2);
- return 3; // includes 0x0F
+ return 3; // Includes 0x0F.
}
// Returns number of bytes used, including *data.
int DisassemblerIA32::CMov(byte* data) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
const char* mnem = conditional_move_mnem[cond];
int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
@@ -635,107 +639,165 @@ int DisassemblerIA32::CMov(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerIA32::FPUInstruction(byte* data) {
- byte b1 = *data;
- byte b2 = *(data + 1);
- if (b1 == 0xD9) {
- const char* mnem = NULL;
- switch (b2) {
- case 0xE8: mnem = "fld1"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE0: mnem = "fchs"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xE4: mnem = "ftst"; break;
- }
- if (mnem != NULL) {
- AppendToBuffer("%s", mnem);
- return 2;
- } else if ((b2 & 0xF8) == 0xC8) {
- AppendToBuffer("fxch st%d", b2 & 0x7);
- return 2;
- } else {
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case eax: mnem = "fld_s"; break;
- case ebx: mnem = "fstp_s"; break;
+ byte escape_opcode = *data;
+ ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+ byte modrm_byte = *(data+1);
+
+ if (modrm_byte >= 0xC0) {
+ return RegisterFPUInstruction(escape_opcode, modrm_byte);
+ } else {
+ return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
+ }
+}
+
+int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
+ int modrm_byte,
+ byte* modrm_start) {
+ const char* mnem = "?";
+ int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
+ switch (escape_opcode) {
+ case 0xD9: switch (regop) {
+ case 0: mnem = "fld_s"; break;
+ case 3: mnem = "fstp_s"; break;
+ case 7: mnem = "fstcw"; break;
default: UnimplementedInstruction();
}
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- }
- } else if (b1 == 0xDD) {
- if ((b2 & 0xF8) == 0xC0) {
- AppendToBuffer("ffree st%d", b2 & 0x7);
- return 2;
- } else {
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case eax: mnem = "fld_d"; break;
- case ebx: mnem = "fstp_d"; break;
+ break;
+
+ case 0xDB: switch (regop) {
+ case 0: mnem = "fild_s"; break;
+ case 1: mnem = "fisttp_s"; break;
+ case 2: mnem = "fist_s"; break;
+ case 3: mnem = "fistp_s"; break;
default: UnimplementedInstruction();
}
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- }
- } else if (b1 == 0xDB) {
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case eax: mnem = "fild_s"; break;
- case edx: mnem = "fist_s"; break;
- case ebx: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- } else if (b1 == 0xDF) {
- if (b2 == 0xE0) {
- AppendToBuffer("fnstsw_ax");
- return 2;
- }
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case ebp: mnem = "fild_d"; break;
- case edi: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- } else if (b1 == 0xDC || b1 == 0xDE) {
- bool is_pop = (b1 == 0xDE);
- if (is_pop && b2 == 0xD9) {
- AppendToBuffer("fcompp");
- return 2;
- }
- const char* mnem = "FP0xDC";
- switch (b2 & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
- return 2;
- } else if (b1 == 0xDA && b2 == 0xE9) {
- const char* mnem = "fucompp";
+ break;
+
+ case 0xDD: switch (regop) {
+ case 0: mnem = "fld_d"; break;
+ case 3: mnem = "fstp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDF: switch (regop) {
+ case 5: mnem = "fild_d"; break;
+ case 7: mnem = "fistp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(modrm_start);
+ return count + 1;
+}
+
+int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
+ byte modrm_byte) {
+ bool has_register = false; // Is the FPU register encoded in modrm_byte?
+ const char* mnem = "?";
+
+ switch (escape_opcode) {
+ case 0xD8:
+ UnimplementedInstruction();
+ break;
+
+ case 0xD9:
+ switch (modrm_byte & 0xF8) {
+ case 0xC8:
+ mnem = "fxch";
+ has_register = true;
+ break;
+ default:
+ switch (modrm_byte) {
+ case 0xE0: mnem = "fchs"; break;
+ case 0xE1: mnem = "fabs"; break;
+ case 0xE4: mnem = "ftst"; break;
+ case 0xE8: mnem = "fld1"; break;
+ case 0xEE: mnem = "fldz"; break;
+ case 0xF5: mnem = "fprem1"; break;
+ case 0xF7: mnem = "fincstp"; break;
+ case 0xF8: mnem = "fprem"; break;
+ case 0xFE: mnem = "fsin"; break;
+ case 0xFF: mnem = "fcos"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDA:
+ if (modrm_byte == 0xE9) {
+ mnem = "fucompp";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDB:
+ if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomi";
+ has_register = true;
+ } else if (modrm_byte == 0xE2) {
+ mnem = "fclex";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDC:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd"; break;
+ case 0xE8: mnem = "fsub"; break;
+ case 0xC8: mnem = "fmul"; break;
+ case 0xF8: mnem = "fdiv"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "ffree"; break;
+ case 0xD8: mnem = "fstp"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDE:
+ if (modrm_byte == 0xD9) {
+ mnem = "fcompp";
+ } else {
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "faddp"; break;
+ case 0xE8: mnem = "fsubp"; break;
+ case 0xC8: mnem = "fmulp"; break;
+ case 0xF8: mnem = "fdivp"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDF:
+ if (modrm_byte == 0xE0) {
+ mnem = "fnstsw_ax";
+ } else if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomip";
+ has_register = true;
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+
+ if (has_register) {
+ AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
+ } else {
AppendToBuffer("%s", mnem);
- return 2;
}
- AppendToBuffer("Unknown FP instruction");
return 2;
}
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
index 663d1367..807ebd4c 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -28,8 +28,10 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "fast-codegen.h"
#include "parser.h"
+#include "debug.h"
namespace v8 {
namespace internal {
@@ -60,50 +62,445 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = fun->scope()->num_stack_slots();
- for (int i = 0; i < locals_count; i++) {
+ if (locals_count == 1) {
__ push(Immediate(Factory::undefined_value()));
+ } else if (locals_count > 1) {
+ __ mov(eax, Immediate(Factory::undefined_value()));
+ for (int i = 0; i < locals_count; i++) {
+ __ push(eax);
+ }
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in edi.
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both eax and esi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in esi.
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ mov(eax, Operand(ebp, parameter_offset));
+ // Store it in the context
+ __ mov(Operand(esi, Context::SlotOffset(slot->index())), eax);
+ }
}
}
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(edi);
+ } else {
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ push(edx);
+ __ push(Immediate(Smi::FromInt(fun->num_parameters())));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ __ mov(ecx, eax); // Duplicate result.
+ Move(arguments->slot(), eax, ebx, edx);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, ecx, ebx, edx);
+ }
+
+
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(fun->scope()->declarations());
+ }
+
{ Comment cmnt(masm_, "[ Stack check");
Label ok;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, taken);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
}
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(fun->scope()->declarations());
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
{ Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
}
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the
- // body.
+ // Emit a 'return undefined' in case control fell off the end of the body.
__ mov(eax, Factory::undefined_value());
- SetReturnPosition(fun);
+ EmitReturnSequence(function_->end_position());
+ }
+}
+
+void FastCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ // Common return label
+ __ bind(&return_label_);
if (FLAG_trace) {
__ push(eax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ CodeGenerator::RecordPositions(masm_, position);
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
__ mov(esp, ebp);
__ pop(ebp);
- __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+ __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Register source) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(source);
+ break;
+ case Expression::kTest:
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ }
+ }
+}
+
+
+template <>
+Operand FastCodeGenerator::CreateSlotOperand<Operand>(Slot* source,
+ Register scratch) {
+ switch (source->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return Operand(ebp, SlotOffset(source));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(source->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, source->index());
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ // Fall-through.
+ default:
+ UNREACHABLE();
+ return Operand(eax, 0); // Dead code to make the compiler happy.
+ }
+}
+
+
+void FastCodeGenerator::Move(Register dst, Slot* source) {
+ Operand location = CreateSlotOperand<Operand>(source, dst);
+ __ mov(dst, location);
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context,
+ Slot* source,
+ Register scratch) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: {
+ Operand location = CreateSlotOperand<Operand>(source, scratch);
+ __ push(location);
+ break;
+ }
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ Move(scratch, source);
+ Move(context, scratch);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(Immediate(expr->handle()));
+ break;
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ __ mov(eax, expr->handle());
+ Move(context, eax);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ switch (dst->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ __ mov(Operand(ebp, SlotOffset(dst)), src);
+ break;
+ case Slot::CONTEXT: {
+ ASSERT(!src.is(scratch1));
+ ASSERT(!src.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ int context_chain_length =
+ function_->scope()->ContextChainLength(dst->var()->scope());
+ __ LoadContext(scratch1, context_chain_length);
+ __ mov(Operand(scratch1, Context::SlotOffset(dst->index())), src);
+ int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ __ RecordWrite(scratch1, offset, src, scratch2);
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FastCodeGenerator::DropAndMove(Expression::Context context,
+ Register source,
+ int count) {
+ ASSERT(count > 0);
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ __ add(Operand(esp), Immediate(count * kPointerSize));
+ break;
+ case Expression::kValue:
+ if (count > 1) {
+ __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
+ }
+ __ mov(Operand(esp, 0), source);
+ break;
+ case Expression::kTest:
+ ASSERT(!source.is(esp));
+ __ add(Operand(esp), Immediate(count * kPointerSize));
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (count > 1) {
+ __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
+ }
+ __ mov(Operand(esp, 0), source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (count > 1) {
+ __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
+ }
+ __ mov(Operand(esp, 0), source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+void FastCodeGenerator::TestAndBranch(Register source,
+ Label* true_label,
+ Label* false_label) {
+ ASSERT_NE(NULL, true_label);
+ ASSERT_NE(NULL, false_label);
+ // Use the shared ToBoolean stub to compile the value in the register into
+ // control flow to the code generator's true and false labels. Perform
+ // the fast checks assumed by the stub.
+ __ cmp(source, Factory::undefined_value()); // The undefined value is false.
+ __ j(equal, false_label);
+ __ cmp(source, Factory::true_value()); // True is true.
+ __ j(equal, true_label);
+ __ cmp(source, Factory::false_value()); // False is false.
+ __ j(equal, false_label);
+ ASSERT_EQ(0, kSmiTag);
+ __ test(source, Operand(source)); // The smi zero is false.
+ __ j(zero, false_label);
+ __ test(source, Immediate(kSmiTagMask)); // All other smis are true.
+ __ j(zero, true_label);
+
+ // Call the stub for all other cases.
+ __ push(source);
+ ToBooleanStub stub;
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax)); // The stub returns nonzero for true.
+ __ j(not_zero, true_label);
+ __ jmp(false_label);
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER: // Fall through.
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ mov(Operand(ebp, SlotOffset(var->slot())),
+ Immediate(Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(Operand(ebp, SlotOffset(var->slot())));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ mov(ebx,
+ CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX));
+ __ cmp(ebx, Operand(esi));
+ __ Check(equal, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ mov(eax, Immediate(Factory::the_hole_value()));
+ __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
+ // No write barrier since the hole value is in old space.
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(eax);
+ __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
+ int offset = Context::SlotOffset(slot->index());
+ __ RecordWrite(esi, offset, eax, ecx);
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ push(esi);
+ __ push(Immediate(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ push(Immediate(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ push(Immediate(Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ } else {
+ __ push(Immediate(Smi::FromInt(0))); // No initial value!
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ Visit(prop->key());
+
+ if (decl->fun() != NULL) {
+ ASSERT_EQ(Expression::kValue, decl->fun()->context());
+ Visit(decl->fun());
+ __ pop(eax);
+ } else {
+ __ Set(eax, Immediate(Factory::the_hole_value()));
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Absence of a test eax instruction following the call
+ // indicates that none of the load was inlined.
+
+ // Value in eax is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ }
}
}
@@ -118,47 +515,17 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FastCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- SetStatementPosition(stmt);
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- Visit(stmt->expression());
-}
-
-
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
Expression* expr = stmt->expression();
- Visit(expr);
-
- // Complete the statement based on the location of the subexpression.
- Location source = expr->location();
- ASSERT(!source.is_nowhere());
- if (source.is_temporary()) {
- __ pop(eax);
- } else {
- ASSERT(source.is_constant());
- ASSERT(expr->AsLiteral() != NULL);
+ if (expr->AsLiteral() != NULL) {
__ mov(eax, expr->AsLiteral()->handle());
+ } else {
+ ASSERT_EQ(Expression::kValue, expr->context());
+ Visit(expr);
+ __ pop(eax);
}
- if (FLAG_trace) {
- __ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ RecordJSReturn();
-
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ mov(esp, ebp);
- __ pop(ebp);
- __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+ EmitReturnSequence(stmt->statement_pos());
}
@@ -166,7 +533,8 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
@@ -175,12 +543,7 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
__ push(esi);
__ push(Immediate(boilerplate));
__ CallRuntime(Runtime::kNewClosure, 2);
-
- if (expr->location().is_temporary()) {
- __ push(eax);
- } else {
- ASSERT(expr->location().is_nowhere());
- }
+ Move(expr->context(), eax);
}
@@ -188,6 +551,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
+ ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
@@ -195,34 +559,76 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
__ mov(ecx, expr->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
- // A test eax instruction following the call is used by the IC to
- // indicate that the inobject property case was inlined. Ensure there
- // is no test eax instruction here. Remember that the assembler may
- // choose to do peephole optimization (eg, push/pop elimination).
- if (expr->location().is_temporary()) {
- // Replace the global object with the result.
- __ mov(Operand(esp, 0), eax);
- } else {
- ASSERT(expr->location().is_nowhere());
- __ add(Operand(esp), Immediate(kPointerSize));
- }
-
- } else {
- Comment cmnt(masm_, "Stack slot");
+ // By emitting a nop we make sure that we do not have a test eax
+ // instruction after the call it is treated specially by the LoadIC code
+ // Remember that the assembler may choose to do peephole optimization
+ // (eg, push/pop elimination).
+ __ nop();
+
+ DropAndMove(expr->context(), eax);
+ } else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
- ASSERT(slot != NULL);
- if (expr->location().is_temporary()) {
- __ push(Operand(ebp, SlotOffset(slot)));
- } else {
- ASSERT(expr->location().is_nowhere());
+ if (FLAG_debug_code) {
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ Comment cmnt(masm_, "Stack slot");
+ break;
+ }
+ case Slot::CONTEXT: {
+ Comment cmnt(masm_, "Context slot");
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ Move(expr->context(), slot, eax);
+ } else {
+ Comment cmnt(masm_, "Variable rewritten to Property");
+ // A variable has been rewritten into an explicit access to
+ // an object property.
+ Property* property = rewrite->AsProperty();
+ ASSERT_NOT_NULL(property);
+
+ // Currently the only parameter expressions that can occur are
+ // on the form "slot[literal]".
+
+ // Check that the object is in a slot.
+ Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object_var);
+ Slot* object_slot = object_var->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ Move(Expression::kValue, object_slot, eax);
+
+ // Check that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ Move(Expression::kValue, key_literal);
+
+ // Do a KEYED property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Notice: We must not have a "test eax, ..." instruction after
+ // the call. It is treated specially by the LoadIC code.
+ __ nop();
+
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndMove(expr->context(), eax, 2);
}
}
void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExp Literal");
+ Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
// edi = JS function.
@@ -244,10 +650,130 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
// Label done:
__ bind(&done);
- if (expr->location().is_temporary()) {
- __ push(eax);
+ Move(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ Label exists;
+ // Registers will be used as follows:
+ // edi = JS function.
+ // ebx = literals array.
+ // eax = boilerplate
+
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ mov(eax, FieldOperand(ebx, literal_offset));
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, &exists);
+ // Create boilerplate if it does not exist.
+ // Literal array (0).
+ __ push(ebx);
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ // Constant properties (2).
+ __ push(Immediate(expr->constant_properties()));
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&exists);
+ // eax contains boilerplate.
+ // Clone boilerplate.
+ __ push(eax);
+ if (expr->depth() == 1) {
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ }
+
+ // If result_saved == true: The result is saved on top of the
+ // stack and in eax.
+ // If result_saved == false: The result not on the stack, just in eax.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(eax); // Save result on the stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ pop(eax);
+ __ mov(ecx, Immediate(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // StoreIC leaves the receiver on the stack.
+ __ mov(eax, Operand(esp, 0)); // Restore result into eax.
+ break;
+ }
+ // fall through
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(eax);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ __ mov(eax, Operand(esp, 0)); // Restore result into eax.
+ break;
+ case ObjectLiteral::Property::SETTER: // fall through
+ case ObjectLiteral::Property::GETTER:
+ __ push(eax);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ mov(eax, Operand(esp, 0)); // Restore result into eax.
+ break;
+ default: UNREACHABLE();
+ }
+ }
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(eax);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(eax);
+ TestAndBranch(eax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(eax);
+ TestAndBranch(eax, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(eax);
+ TestAndBranch(eax, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
}
}
@@ -300,7 +826,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
result_saved = true;
}
Visit(subexpr);
- ASSERT(subexpr->location().is_temporary());
+ ASSERT_EQ(Expression::kValue, subexpr->context());
// Store the subexpression value in the array's elements.
__ pop(eax); // Subexpression value.
@@ -313,233 +839,851 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ RecordWrite(ebx, offset, eax, ecx);
}
- Location destination = expr->location();
- if (destination.is_nowhere() && result_saved) {
- __ add(Operand(esp), Immediate(kPointerSize));
- } else if (destination.is_temporary() && !result_saved) {
- __ push(eax);
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(eax);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(eax);
+ TestAndBranch(eax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(eax);
+ TestAndBranch(eax, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(eax);
+ TestAndBranch(eax, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
}
}
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
- Expression* rhs = expr->value();
- Visit(rhs);
-
- // Left-hand side can only be a global or a (parameter or local) slot.
+void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
-
- // Complete the assignment based on the location of the right-hand-side
- // value and the desired location of the assignment value.
- Location destination = expr->location();
- Location source = rhs->location();
- ASSERT(!destination.is_constant());
- ASSERT(!source.is_nowhere());
-
if (var->is_global()) {
- // Assignment to a global variable, use inline caching. Right-hand-side
- // value is passed in eax, variable name in ecx, and the global object
- // on the stack.
- if (source.is_temporary()) {
- __ pop(eax);
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- __ mov(eax, rhs->AsLiteral()->handle());
- }
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in eax, variable name in
+ // ecx, and the global object on the stack.
+ __ pop(eax);
__ mov(ecx, var->name());
__ push(CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
- // Overwrite the global object on the stack with the result if needed.
- if (destination.is_temporary()) {
- __ mov(Operand(esp, 0), eax);
- } else {
- ASSERT(destination.is_nowhere());
- __ add(Operand(esp), Immediate(kPointerSize));
- }
-
- } else {
- // Local or parameter assignment.
- if (source.is_temporary()) {
- if (destination.is_temporary()) {
- // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side
- // temporary on the stack.
- __ mov(eax, Operand(esp, 0));
- __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
- } else {
- ASSERT(destination.is_nowhere());
- // Case 'var = temp'. Discard right-hand-side temporary.
- __ pop(Operand(ebp, SlotOffset(var->slot())));
+ // Overwrite the receiver on the stack with the result if needed.
+ DropAndMove(expr->context(), eax);
+
+ } else if (var->slot() != NULL) {
+ Slot* slot = var->slot();
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ Operand target = Operand(ebp, SlotOffset(var->slot()));
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Perform assignment and discard value.
+ __ pop(target);
+ break;
+ case Expression::kValue:
+ // Perform assignment and preserve value.
+ __ mov(eax, Operand(esp, 0));
+ __ mov(target, eax);
+ break;
+ case Expression::kTest:
+ // Perform assignment and test (and discard) value.
+ __ pop(eax);
+ __ mov(target, eax);
+ TestAndBranch(eax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ mov(eax, Operand(esp, 0));
+ __ mov(target, eax);
+ TestAndBranch(eax, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ mov(eax, Operand(esp, 0));
+ __ mov(target, eax);
+ TestAndBranch(eax, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+ break;
}
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
- // discarded result. Always perform the assignment.
- __ mov(eax, rhs->AsLiteral()->handle());
- __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
- if (destination.is_temporary()) {
- // Case 'temp <- (var = constant)'. Save result.
- __ push(eax);
+
+ case Slot::CONTEXT: {
+ int chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ if (chain_length > 0) {
+ // Move up the context chain to the context containing the slot.
+ __ mov(eax,
+ Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset));
+ for (int i = 1; i < chain_length; i++) {
+ __ mov(eax,
+ Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset));
+ }
+ } else { // Slot is in the current context. Generate optimized code.
+ __ mov(eax, esi); // RecordWrite destroys the object register.
+ }
+ if (FLAG_debug_code) {
+ __ cmp(eax,
+ Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ Check(equal, "Context Slot chain length wrong.");
+ }
+ __ pop(ecx);
+ __ mov(Operand(eax, Context::SlotOffset(slot->index())), ecx);
+
+ // RecordWrite may destroy all its register arguments.
+ if (expr->context() == Expression::kValue) {
+ __ push(ecx);
+ } else if (expr->context() != Expression::kEffect) {
+ __ mov(edx, ecx);
+ }
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(eax, offset, ecx, ebx);
+ if (expr->context() != Expression::kEffect &&
+ expr->context() != Expression::kValue) {
+ Move(expr->context(), edx);
+ }
+ break;
}
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
}
}
}
-void FastCodeGenerator::VisitCall(Call* expr) {
- Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- Variable* var = fun->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL && !var->is_this() && var->is_global());
- ASSERT(!var->is_possibly_eval());
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
- __ push(Immediate(var->name()));
- // Push global object (receiver).
- __ push(CodeGenerator::GlobalObject());
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(Operand(esp, kPointerSize)); // Receiver is under value.
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(eax);
+ __ mov(ecx, prop->key()->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(eax); // Result of assignment, saved even if not needed.
+ __ push(Operand(esp, kPointerSize)); // Receiver is under value.
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(eax);
+ }
+
+ DropAndMove(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ // Reciever is under the key and value.
+ __ push(Operand(esp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(eax);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(eax); // Result of assignment, saved even if not needed.
+ // Reciever is under the key and value.
+ __ push(Operand(esp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(eax);
+ }
+
+ // Receiver and key are still on stack.
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ Move(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+ uint32_t dummy;
+
+ // Record the source position for the property load.
+ SetSourcePosition(expr->position());
+
+ // Evaluate receiver.
+ Visit(expr->obj());
+
+ if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+ !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+ // Do a NAMED property load.
+ // The IC expects the property name in ecx and the receiver on the stack.
+ __ mov(ecx, Immediate(key->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a test eax
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ } else {
+ // Do a KEYED property load.
+ Visit(expr->key());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test eax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key left on the stack by IC.
+ __ add(Operand(esp), Immediate(kPointerSize));
+ }
+ DropAndMove(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ push(Immediate(args->at(i)->AsLiteral()->handle()));
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
- // Record source position for debugger
+ // Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ call(ic, reloc_info);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+ __ CallStub(&stub);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- if (expr->location().is_temporary()) {
- __ mov(Operand(esp, 0), eax);
+ DropAndMove(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Call to a global variable.
+ __ push(Immediate(var->name()));
+ // Push global object as receiver for the call IC lookup.
+ __ push(CodeGenerator::GlobalObject());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ __ push(Immediate(key->handle()));
+ Visit(prop->obj());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ Visit(prop->obj());
+ Visit(prop->key());
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test eax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key left on the stack by IC.
+ __ add(Operand(esp), Immediate(kPointerSize));
+ // Pop receiver.
+ __ pop(ebx);
+ // Push result (function).
+ __ push(eax);
+ // Push receiver object on stack.
+ if (prop->is_synthetic()) {
+ __ push(CodeGenerator::GlobalObject());
+ } else {
+ __ push(ebx);
+ }
+ EmitCallWithStub(expr);
+ }
} else {
- ASSERT(expr->location().is_nowhere());
- __ add(Operand(esp), Immediate(kPointerSize));
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the fast code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_fast_codegen(true);
+ }
+ Visit(fun);
+ // Load global receiver object.
+ __ mov(ebx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+ // Emit function call.
+ EmitCallWithStub(expr);
}
}
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ // Push global object (receiver).
+ __ push(CodeGenerator::GlobalObject());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ // If location is value, it is already on the stack,
+ // so nothing to do here.
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function, arg_count into edi and eax.
+ __ Set(eax, Immediate(arg_count));
+ // Function is in esp[arg_count + 1].
+ __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in eax, or pop it.
+ DropAndMove(expr->context(), eax);
+}
+
+
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
- Runtime::Function* function = expr->function();
- ASSERT(function != NULL);
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ push(Immediate(expr->name()));
+ __ mov(eax, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
+ }
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ push(Immediate(args->at(i)->AsLiteral()->handle()));
- } else {
- ASSERT(args->at(i)->location().is_temporary());
- // If location is temporary, it is already on the stack,
- // so nothing to do here.
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ }
+
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), eax);
+ } else {
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ Move(expr->context(), eax);
+ }
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kEffect, expr->expression()->context());
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(Immediate(Factory::undefined_value()));
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ __ push(Immediate(Factory::undefined_value()));
+ // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
}
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ ASSERT_EQ(Expression::kTest, expr->expression()->context());
+
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ push(Immediate(Factory::false_value()));
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ Visit(expr->expression());
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ true_label_ = saved_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = saved_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ true_label_ = &push_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ __ bind(&push_false);
+ __ push(Immediate(Factory::false_value()));
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ push(CodeGenerator::GlobalObject());
+ __ mov(ecx, Immediate(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ mov(Operand(esp, 0), eax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(esi);
+ __ push(Immediate(proxy->name()));
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(eax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ Visit(expr->expression());
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Move(expr->context(), eax);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
}
+}
+
+
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+
+ Visit(proxy);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ CallRuntime(function, arg_count);
- if (expr->location().is_temporary()) {
- __ push(eax);
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kValue: // Fall through
+ case Expression::kTest: // Fall through
+ case Expression::kTestValue: // Fall through
+ case Expression::kValueTest:
+ // Duplicate the result on the stack.
+ __ push(eax);
+ break;
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ }
+ // Call runtime for +1/-1.
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(1)));
+ if (expr->op() == Token::INC) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ // Call Store IC.
+ __ mov(ecx, proxy->AsVariable()->name());
+ __ push(CodeGenerator::GlobalObject());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Restore up stack after store IC.
+ __ add(Operand(esp), Immediate(kPointerSize));
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through
+ case Expression::kValue:
+ // Do nothing. Result in either on the stack for value context
+ // or discarded for effect context.
+ break;
+ case Expression::kTest:
+ __ pop(eax);
+ TestAndBranch(eax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ mov(eax, Operand(esp, 0));
+ TestAndBranch(eax, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ mov(eax, Operand(esp, 0));
+ TestAndBranch(eax, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
}
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- // Compile a short-circuited boolean or operation in a non-test
- // context.
- ASSERT(expr->op() == Token::OR);
- // Compile (e0 || e1) as if it were
- // (let (temp = e0) temp ? temp : e1).
-
- Label eval_right, done;
- Location destination = expr->location();
- ASSERT(!destination.is_constant());
-
- Expression* left = expr->left();
- Location left_source = left->location();
- ASSERT(!left_source.is_nowhere());
-
- Expression* right = expr->right();
- Location right_source = right->location();
- ASSERT(!right_source.is_nowhere());
-
- Visit(left);
- // Use the shared ToBoolean stub to find the boolean value of the
- // left-hand subexpression. Load the value into eax to perform some
- // inlined checks assumed by the stub.
- if (left_source.is_temporary()) {
- if (destination.is_temporary()) {
- // Copy the left-hand value into eax because we may need it as the
- // final result.
- __ mov(eax, Operand(esp, 0));
- } else {
- // Pop the left-hand value into eax because we will not need it as the
- // final result.
- __ pop(eax);
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ ASSERT_EQ(Expression::kEffect, expr->left()->context());
+ ASSERT_EQ(expr->context(), expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+ break;
+
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
+
+ Visit(expr->left());
+ Visit(expr->right());
+ GenericBinaryOpStub stub(expr->op(),
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ __ CallStub(&stub);
+ Move(expr->context(), eax);
+
+ break;
}
- } else {
- // Load the left-hand value into eax. Put it on the stack if we may
- // need it.
- ASSERT(left->AsLiteral() != NULL);
- __ mov(eax, left->AsLiteral()->handle());
- if (destination.is_temporary()) __ push(eax);
- }
- // The left-hand value is in eax. It is also on the stack iff the
- // destination location is temporary.
-
- // Perform fast checks assumed by the stub.
- __ cmp(eax, Factory::undefined_value()); // The undefined value is false.
- __ j(equal, &eval_right);
- __ cmp(eax, Factory::true_value()); // True is true.
- __ j(equal, &done);
- __ cmp(eax, Factory::false_value()); // False is false.
- __ j(equal, &eval_right);
- ASSERT(kSmiTag == 0);
- __ test(eax, Operand(eax)); // The smi zero is false.
- __ j(zero, &eval_right);
- __ test(eax, Immediate(kSmiTagMask)); // All other smis are true.
- __ j(zero, &done);
+ default:
+ UNREACHABLE();
+ }
+}
- // Call the stub for all other cases.
- __ push(eax);
- ToBooleanStub stub;
- __ CallStub(&stub);
- __ test(eax, Operand(eax)); // The stub returns nonzero for true.
- __ j(not_zero, &done);
- __ bind(&eval_right);
- // Discard the left-hand value if present on the stack.
- if (destination.is_temporary()) {
- __ add(Operand(esp), Immediate(kPointerSize));
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+
+ // Convert current context to test context: Pre-test code.
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_true;
+ false_label_ = &push_false;
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = &push_true;
+ break;
+
+ case Expression::kTestValue:
+ false_label_ = &push_false;
+ break;
}
- Visit(right);
+ // Convert current context to test context: End pre-test code.
+
+ switch (expr->op()) {
+ case Token::IN: {
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ cmp(eax, Factory::true_value());
+ __ j(equal, true_label_);
+ __ jmp(false_label_);
+ break;
+ }
- // Save or discard the right-hand value as needed.
- if (destination.is_temporary() && right_source.is_constant()) {
- ASSERT(right->AsLiteral() != NULL);
- __ push(Immediate(right->AsLiteral()->handle()));
- } else if (destination.is_nowhere() && right_source.is_temporary()) {
- __ add(Operand(esp), Immediate(kPointerSize));
+ case Token::INSTANCEOF: {
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax));
+ __ j(zero, true_label_); // The stub returns 0 for true.
+ __ jmp(false_label_);
+ break;
+ }
+
+ default: {
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = equal;
+ __ pop(eax);
+ __ pop(edx);
+ break;
+ case Token::LT:
+ cc = less;
+ __ pop(eax);
+ __ pop(edx);
+ break;
+ case Token::GT:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = less;
+ __ pop(edx);
+ __ pop(eax);
+ break;
+ case Token::LTE:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = greater_equal;
+ __ pop(edx);
+ __ pop(eax);
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ __ pop(eax);
+ __ pop(edx);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow_case, not_taken);
+ __ cmp(edx, Operand(eax));
+ __ j(cc, true_label_);
+ __ jmp(false_label_);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax));
+ __ j(cc, true_label_);
+ __ jmp(false_label_);
+ }
}
- __ bind(&done);
+ // Convert current context to test context: Post-test code.
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ __ bind(&push_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ push(Immediate(Factory::false_value()));
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(&push_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(&push_false);
+ __ push(Immediate(Factory::false_value()));
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ // Convert current context to test context: End post-test code.
}
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ Move(expr->context(), eax);
+}
+
+#undef __
+
+
} } // namespace v8::internal
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index dea439f2..5c900bed 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -56,19 +56,14 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
state->fp = fp;
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- // Determine frame type.
- if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
- return EXIT_DEBUG;
- } else {
- return EXIT;
- }
+ return EXIT;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
- // Exit frames on IA-32 do not contain any pointers. The arguments
- // are traversed as part of the expression stack of the calling
- // frame.
+ v->VisitPointer(&code_slot());
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
}
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 3a7c86bf..c3fe6c74 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -76,7 +76,7 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- static const int kDebugMarkOffset = -2 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 3aa3c346..6988fe09 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -31,6 +31,7 @@
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -108,7 +109,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ mov(r1, FieldOperand(name, String::kLengthOffset));
+ __ mov(r1, FieldOperand(name, String::kHashFieldOffset));
__ shr(r1, String::kHashShift);
if (i > 0) {
__ add(Operand(r1), Immediate(StringDictionary::GetProbeOffset(i)));
@@ -216,18 +217,6 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
}
-#ifdef DEBUG
-// For use in assert below.
-static int TenToThe(int exponent) {
- ASSERT(exponent <= 9);
- ASSERT(exponent >= 1);
- int answer = 10;
- for (int i = 1; i < exponent; i++) answer *= 10;
- return answer;
-}
-#endif
-
-
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- esp[0] : return address
@@ -309,7 +298,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+ __ mov(ebx, FieldOperand(eax, String::kHashFieldOffset));
__ test(ebx, Immediate(String::kIsArrayIndexMask));
__ j(not_zero, &index_string, not_taken);
@@ -324,20 +313,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(eax, Operand(ecx));
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
- // Array index string: If short enough use cache in length/hash field (ebx).
- // We assert that there are enough bits in an int32_t after the hash shift
- // bits have been subtracted to allow space for the length and the cached
- // array index.
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << (String::kShortLengthShift - String::kHashShift)));
+ (1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- const int kLengthFieldLimit =
- (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
- __ cmp(ebx, kLengthFieldLimit);
- __ j(above_equal, &slow);
__ mov(eax, Operand(ebx));
- __ and_(eax, (1 << String::kShortLengthShift) - 1);
- __ shr(eax, String::kLongLengthShift);
+ __ and_(eax, String::kArrayIndexHashMask);
+ __ shr(eax, String::kHashShift);
__ jmp(&index_int);
}
@@ -403,13 +388,13 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ movsx_b(eax, Operand(ecx, eax, times_1, 0));
break;
case kExternalUnsignedByteArray:
- __ mov_b(eax, Operand(ecx, eax, times_1, 0));
+ __ movzx_b(eax, Operand(ecx, eax, times_1, 0));
break;
case kExternalShortArray:
__ movsx_w(eax, Operand(ecx, eax, times_2, 0));
break;
case kExternalUnsignedShortArray:
- __ mov_w(eax, Operand(ecx, eax, times_2, 0));
+ __ movzx_w(eax, Operand(ecx, eax, times_2, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 08c4c0c5..b91caa8c 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -213,6 +213,13 @@ void MacroAssembler::RecordWrite(Register object, int offset,
}
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+ cmp(esp,
+ Operand::StaticVariable(ExternalReference::address_of_stack_limit()));
+ j(below, on_stack_overflow);
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
@@ -319,7 +326,7 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::FCmp() {
- if (CpuFeatures::IsSupported(CpuFeatures::CMOV)) {
+ if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
ffree(0);
fincstp();
@@ -355,10 +362,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
leave();
}
-
-void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
- ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
// Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -369,23 +373,24 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call
- push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+ if (mode == ExitFrame::MODE_DEBUG) {
+ push(Immediate(0));
+ } else {
+ push(Immediate(CodeObject()));
+ }
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
ExternalReference context_address(Top::k_context_address);
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
+}
- // Setup argc and argv in callee-saved registers.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- mov(edi, Operand(eax));
- lea(esi, Operand(ebp, eax, times_4, offset));
-
+void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// TODO(1243899): This should be symmetric to
// CopyRegistersFromStackToMemory() but it isn't! esp is assumed
// correct here, but computed for the other call. Very error
@@ -396,8 +401,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
#endif
- // Reserve space for two arguments: argc and argv.
- sub(Operand(esp), Immediate(2 * kPointerSize));
+ // Reserve space for arguments.
+ sub(Operand(esp), Immediate(argc * kPointerSize));
// Get the required frame alignment for the OS.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -411,15 +416,39 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
+ EnterExitFramePrologue(mode);
+
+ // Setup argc and argv in callee-saved registers.
+ int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ mov(edi, Operand(eax));
+ lea(esi, Operand(ebp, eax, times_4, offset));
+
+ EnterExitFrameEpilogue(mode, 2);
+}
+
+
+void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
+ int stack_space,
+ int argc) {
+ EnterExitFramePrologue(mode);
+
+ int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset));
+
+ EnterExitFrameEpilogue(mode, argc);
+}
+
+
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// It's okay to clobber register ebx below because we don't need
// the function pointer after this.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
lea(ebx, Operand(ebp, kOffset));
CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
}
@@ -658,6 +687,11 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
+ if (FLAG_debug_code) {
+ test(result_end, Immediate(kObjectAlignmentMask));
+ Check(zero, "Unaligned allocation in new space");
+ }
+
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
@@ -791,6 +825,109 @@ void MacroAssembler::AllocateHeapNumber(Register result,
}
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ mov(scratch1, length);
+ ASSERT(kShortSize == 2);
+ shl(scratch1, 1);
+ add(Operand(scratch1), Immediate(kObjectAlignmentMask));
+ and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+
+ // Allocate two byte string in new space.
+ AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::string_map()));
+ mov(FieldOperand(result, String::kLengthOffset), length);
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ mov(scratch1, length);
+ ASSERT(kCharSize == 1);
+ add(Operand(scratch1), Immediate(kObjectAlignmentMask));
+ and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+
+ // Allocate ascii string in new space.
+ AllocateInNewSpace(SeqAsciiString::kHeaderSize,
+ times_1,
+ scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::ascii_string_map()));
+ mov(FieldOperand(result, String::kLengthOffset), length);
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::cons_string_map()));
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::cons_ascii_string_map()));
+}
+
+
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
@@ -884,6 +1021,12 @@ void MacroAssembler::CallStub(CodeStub* stub) {
}
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
+ jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
@@ -931,6 +1074,52 @@ void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
}
+void MacroAssembler::PushHandleScope(Register scratch) {
+ // Push the number of extensions, smi-tagged so the gc will ignore it.
+ ExternalReference extensions_address =
+ ExternalReference::handle_scope_extensions_address();
+ mov(scratch, Operand::StaticVariable(extensions_address));
+ ASSERT_EQ(0, kSmiTag);
+ shl(scratch, kSmiTagSize);
+ push(scratch);
+ mov(Operand::StaticVariable(extensions_address), Immediate(0));
+ // Push next and limit pointers which will be wordsize aligned and
+ // hence automatically smi tagged.
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ push(Operand::StaticVariable(next_address));
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address();
+ push(Operand::StaticVariable(limit_address));
+}
+
+
+void MacroAssembler::PopHandleScope(Register saved, Register scratch) {
+ ExternalReference extensions_address =
+ ExternalReference::handle_scope_extensions_address();
+ Label write_back;
+ mov(scratch, Operand::StaticVariable(extensions_address));
+ cmp(Operand(scratch), Immediate(0));
+ j(equal, &write_back);
+ // Calling a runtime function messes with registers so we save and
+ // restore any one we're asked not to change
+ if (saved.is_valid()) push(saved);
+ CallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
+ if (saved.is_valid()) pop(saved);
+
+ bind(&write_back);
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address();
+ pop(Operand::StaticVariable(limit_address));
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ pop(Operand::StaticVariable(next_address));
+ pop(scratch);
+ shr(scratch, kSmiTagSize);
+ mov(Operand::StaticVariable(extensions_address), scratch);
+}
+
+
void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
@@ -1117,6 +1306,26 @@ Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
}
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ mov(dst, Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ for (int i = 1; i < context_chain_length; i++) {
+ mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ }
+ // The context may be an intermediate context, not a function context.
+ mov(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else { // Slot is in the current function context.
+ // The context may be an intermediate context, not a function context.
+ mov(dst, Operand(esi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ }
+}
+
+
+
void MacroAssembler::Ret() {
ret(0);
}
@@ -1184,11 +1393,15 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ set_allow_stub_calls(true);
+
push(eax);
push(Immediate(p0));
push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
CallRuntime(Runtime::kAbort, 2);
// will not return here
+ int3();
}
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index a0a24280..a41d42e8 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -69,6 +69,12 @@ class MacroAssembler: public Assembler {
#endif
// ---------------------------------------------------------------------------
+ // Stack limit support
+
+ // Do simple test for stack overflow. This doesn't handle an overflow.
+ void StackLimitCheck(Label* on_stack_limit_hit);
+
+ // ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -77,17 +83,21 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register eax and
+ // Enter specific kind of exit frame; either in normal or debug mode.
+ // Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer
// to the first argument in register esi.
- void EnterExitFrame(StackFrame::Type type);
+ void EnterExitFrame(ExitFrame::Mode mode);
+
+ void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi.
- void LeaveExitFrame(StackFrame::Type type);
+ void LeaveExitFrame(ExitFrame::Mode mode);
+ // Find the function context up the context chain.
+ void LoadContext(Register dst, int context_chain_length);
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -173,7 +183,7 @@ class MacroAssembler: public Assembler {
// scratch can be passed as no_reg in which case an additional object
// reference will be added to the reloc info. The returned pointers in result
// and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the contnt of result is known to be
+ // result_contains_top_on_entry is true the content of result is known to be
// the allocation top on entry (could be result_end from a previous call to
// AllocateInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
@@ -215,6 +225,32 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* gc_required);
+ // Allocate a sequential string. All the header fields of the string object
+ // are initialized.
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+
+ // Allocate a raw cons string object. Only the map field of the result is
+ // initialized.
+ void AllocateConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -252,6 +288,9 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub);
+ // Tail call a code stub (jump).
+ void TailCallStub(CodeStub* stub);
+
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@@ -269,6 +308,12 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
+ void PushHandleScope(Register scratch);
+
+ // Pops a handle scope using the specified scratch register and
+ // ensuring that saved register, it is not no_reg, is left unchanged.
+ void PopHandleScope(Register saved, Register scratch);
+
// Jump to a runtime routine.
void JumpToRuntime(const ExternalReference& ext);
@@ -346,6 +391,9 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+ void EnterExitFramePrologue(ExitFrame::Mode mode);
+ void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc);
+
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
Register result_end,
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 7af4e89e..2e13d8ae 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -598,10 +598,10 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label stack_limit_hit;
Label stack_ok;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
__ mov(ecx, esp);
- __ sub(ecx, Operand::StaticVariable(stack_guard_limit));
+ __ sub(ecx, Operand::StaticVariable(stack_limit));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit, not_taken);
// Check if there is room for the variable number of registers above
@@ -1081,9 +1081,9 @@ void RegExpMacroAssemblerIA32::Pop(Register target) {
void RegExpMacroAssemblerIA32::CheckPreemption() {
// Check for preemption.
Label no_preempt;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above, &no_preempt, taken);
SafeCall(&check_preempt_label_);
@@ -1093,17 +1093,15 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
void RegExpMacroAssemblerIA32::CheckStackLimit() {
- if (FLAG_check_stack) {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
- __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
- __ j(above, &no_stack_overflow);
+ Label no_stack_overflow;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit();
+ __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
+ __ j(above, &no_stack_overflow);
- SafeCall(&stack_overflow_label_);
+ SafeCall(&stack_overflow_label_);
- __ bind(&no_stack_overflow);
- }
+ __ bind(&no_stack_overflow);
}
@@ -1163,10 +1161,6 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- __ int3(); // Unused on ia32.
-}
-
#undef __
#endif // V8_NATIVE_REGEXP
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
index 2914960e..0bad87d0 100644
--- a/src/ia32/register-allocator-ia32.cc
+++ b/src/ia32/register-allocator-ia32.cc
@@ -42,7 +42,7 @@ void Result::ToRegister() {
Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
ASSERT(fresh.is_valid());
if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
- CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
+ CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
} else {
CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
Immediate(handle()));
@@ -64,7 +64,7 @@ void Result::ToRegister(Register target) {
} else {
ASSERT(is_constant());
if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
- CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
+ CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
} else {
CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
Immediate(handle()));
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index 8fa4287f..ce7ed0ec 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -43,6 +43,12 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
};
// Call the generated regexp code directly. The entry function pointer should
@@ -50,4 +56,7 @@ class SimulatorStack : public v8::internal::AllStatic {
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index ca4e1421..425c51dc 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -126,7 +126,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ j(zero, &miss, not_taken);
// Get the map of the receiver and compute the hash.
- __ mov(scratch, FieldOperand(name, String::kLengthOffset));
+ __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
@@ -135,7 +135,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ProbeTable(masm, flags, kPrimary, name, scratch, extra);
// Primary miss: Compute hash for secondary probe.
- __ mov(scratch, FieldOperand(name, String::kLengthOffset));
+ __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
@@ -234,13 +234,9 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// scratch register.
GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
- // Load length directly from the string.
+ // Load length from the string and convert to a smi.
__ bind(&load_length);
- __ and_(scratch, kStringSizeMask);
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- // ecx is also the receiver.
- __ lea(ecx, Operand(scratch, String::kLongLengthShift));
- __ shr(eax); // ecx is implicit shift register.
__ shl(eax, kSmiTagSize);
__ ret(0);
@@ -776,20 +772,40 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
- // Push the arguments on the JS stack of the caller.
- __ pop(scratch2); // remove return address
+ Handle<AccessorInfo> callback_handle(callback);
+
+ Register other = reg.is(scratch1) ? scratch2 : scratch1;
+ __ EnterInternalFrame();
+ __ PushHandleScope(other);
+ // Push the stack address where the list of arguments ends
+ __ mov(other, esp);
+ __ sub(Operand(other), Immediate(2 * kPointerSize));
+ __ push(other);
__ push(receiver); // receiver
__ push(reg); // holder
- __ mov(reg, Immediate(Handle<AccessorInfo>(callback))); // callback data
- __ push(reg);
- __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+ __ mov(other, Immediate(callback_handle));
+ __ push(other);
+ __ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data
__ push(name_reg); // name
- __ push(scratch2); // restore return address
+ // Save a pointer to where we pushed the arguments pointer.
+ // This will be passed as the const Arguments& to the C++ callback.
+ __ mov(eax, esp);
+ __ add(Operand(eax), Immediate(5 * kPointerSize));
+ __ mov(ebx, esp);
+
+ // Do call through the api.
+ ASSERT_EQ(6, ApiGetterEntryStub::kStackSpace);
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ApiGetterEntryStub stub(callback_handle, &fun);
+ __ CallStub(&stub);
- // Do tail-call to the runtime system.
- ExternalReference load_callback_property =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 5, 1);
+ // We need to avoid using eax since that now holds the result.
+ Register tmp = other.is(eax) ? reg : other;
+ __ PopHandleScope(eax, tmp);
+ __ LeaveInternalFrame();
+
+ __ ret(0);
}
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 980cec8e..e770cddb 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -75,10 +75,7 @@ void VirtualFrame::SyncElementBelowStackPointer(int index) {
case FrameElement::CONSTANT:
if (cgen()->IsUnsafeSmi(element.handle())) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+ cgen()->StoreUnsafeSmiToLocal(fp_relative(index), element.handle());
} else {
__ Set(Operand(ebp, fp_relative(index)),
Immediate(element.handle()));
@@ -127,10 +124,7 @@ void VirtualFrame::SyncElementByPushing(int index) {
case FrameElement::CONSTANT:
if (cgen()->IsUnsafeSmi(element.handle())) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
- __ push(temp.reg());
+ cgen()->PushUnsafeSmi(element.handle());
} else {
__ push(Immediate(element.handle()));
}
@@ -161,7 +155,7 @@ void VirtualFrame::SyncRange(int begin, int end) {
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
- // Emit normal 'push' instructions for elements above stack pointer
+ // Emit normal push instructions for elements above stack pointer
// and use mov instructions if we are below stack pointer.
for (int i = start; i <= end; i++) {
if (!elements_[i].is_synced()) {
@@ -199,7 +193,7 @@ void VirtualFrame::MakeMergable() {
// Emit a move.
if (element.is_constant()) {
if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->LoadUnsafeSmi(fresh.reg(), element.handle());
+ cgen()->MoveUnsafeSmi(fresh.reg(), element.handle());
} else {
__ Set(fresh.reg(), Immediate(element.handle()));
}
@@ -300,7 +294,7 @@ void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
if (!source.is_synced()) {
if (cgen()->IsUnsafeSmi(source.handle())) {
esi_caches = i;
- cgen()->LoadUnsafeSmi(esi, source.handle());
+ cgen()->MoveUnsafeSmi(esi, source.handle());
__ mov(Operand(ebp, fp_relative(i)), esi);
} else {
__ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
@@ -408,7 +402,7 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
case FrameElement::CONSTANT:
if (cgen()->IsUnsafeSmi(source.handle())) {
- cgen()->LoadUnsafeSmi(target_reg, source.handle());
+ cgen()->MoveUnsafeSmi(target_reg, source.handle());
} else {
__ Set(target_reg, Immediate(source.handle()));
}
diff --git a/src/ic.cc b/src/ic.cc
index c12dba7b..2779356c 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -126,7 +126,8 @@ Address IC::OriginalCodeAddress() {
// Return the address in the original code. This is the place where
// the call which has been overwritten by the DebugBreakXXX resides
// and the place where the inline cache system should look.
- int delta = original_code->instruction_start() - code->instruction_start();
+ intptr_t delta =
+ original_code->instruction_start() - code->instruction_start();
return addr + delta;
}
#endif
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index ae914d39..a904447f 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -117,17 +117,17 @@ static void TraceInterpreter(const byte* code_base,
}
-#define BYTECODE(name) \
- case BC_##name: \
- TraceInterpreter(code_base, \
- pc, \
- backtrack_sp - backtrack_stack_base, \
- current, \
- current_char, \
- BC_##name##_LENGTH, \
+#define BYTECODE(name) \
+ case BC_##name: \
+ TraceInterpreter(code_base, \
+ pc, \
+ static_cast<int>(backtrack_sp - backtrack_stack_base), \
+ current, \
+ current_char, \
+ BC_##name##_LENGTH, \
#name);
#else
-#define BYTECODE(name) \
+#define BYTECODE(name) \
case BC_##name:
#endif
@@ -250,13 +250,14 @@ static bool RawMatch(const byte* code_base,
pc += BC_SET_CP_TO_REGISTER_LENGTH;
break;
BYTECODE(SET_REGISTER_TO_SP)
- registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack_base;
+ registers[insn >> BYTECODE_SHIFT] =
+ static_cast<int>(backtrack_sp - backtrack_stack_base);
pc += BC_SET_REGISTER_TO_SP_LENGTH;
break;
BYTECODE(SET_SP_TO_REGISTER)
backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT];
backtrack_stack_space = backtrack_stack.max_size() -
- (backtrack_sp - backtrack_stack_base);
+ static_cast<int>(backtrack_sp - backtrack_stack_base);
pc += BC_SET_SP_TO_REGISTER_LENGTH;
break;
BYTECODE(POP_CP)
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index c77f32d1..04d19441 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -2432,16 +2432,19 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
}
-void TextNode::MakeCaseIndependent() {
+void TextNode::MakeCaseIndependent(bool is_ascii) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
if (elm.type == TextElement::CHAR_CLASS) {
RegExpCharacterClass* cc = elm.data.u_char_class;
+ // None of the standard character classses is different in the case
+ // independent case and it slows us down if we don't know that.
+ if (cc->is_standard()) continue;
ZoneList<CharacterRange>* ranges = cc->ranges();
int range_count = ranges->length();
- for (int i = 0; i < range_count; i++) {
- ranges->at(i).AddCaseEquivalents(ranges);
+ for (int j = 0; j < range_count; j++) {
+ ranges->at(j).AddCaseEquivalents(ranges, is_ascii);
}
}
}
@@ -3912,19 +3915,31 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base,
}
-void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) {
+static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
+ int bottom,
+ int top);
+
+
+void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
+ bool is_ascii) {
+ uc16 bottom = from();
+ uc16 top = to();
+ if (is_ascii) {
+ if (bottom > String::kMaxAsciiCharCode) return;
+ if (top > String::kMaxAsciiCharCode) top = String::kMaxAsciiCharCode;
+ }
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- if (IsSingleton()) {
+ if (top == bottom) {
// If this is a singleton we just expand the one character.
- int length = uncanonicalize.get(from(), '\0', chars);
+ int length = uncanonicalize.get(bottom, '\0', chars);
for (int i = 0; i < length; i++) {
uc32 chr = chars[i];
- if (chr != from()) {
+ if (chr != bottom) {
ranges->Add(CharacterRange::Singleton(chars[i]));
}
}
- } else if (from() <= kRangeCanonicalizeMax &&
- to() <= kRangeCanonicalizeMax) {
+ } else if (bottom <= kRangeCanonicalizeMax &&
+ top <= kRangeCanonicalizeMax) {
// If this is a range we expand the characters block by block,
// expanding contiguous subranges (blocks) one at a time.
// The approach is as follows. For a given start character we
@@ -3943,14 +3958,14 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) {
// completely contained in a block we do this for all the blocks
// covered by the range.
unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- // First, look up the block that contains the 'from' character.
- int length = canonrange.get(from(), '\0', range);
+ // First, look up the block that contains the 'bottom' character.
+ int length = canonrange.get(bottom, '\0', range);
if (length == 0) {
- range[0] = from();
+ range[0] = bottom;
} else {
ASSERT_EQ(1, length);
}
- int pos = from();
+ int pos = bottom;
// The start of the current block. Note that except for the first
// iteration 'start' is always equal to 'pos'.
int start;
@@ -3961,10 +3976,10 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) {
} else {
start = pos;
}
- // Then we add the ranges on at a time, incrementing the current
+ // Then we add the ranges one at a time, incrementing the current
// position to be after the last block each time. The position
// always points to the start of a block.
- while (pos < to()) {
+ while (pos < top) {
length = canonrange.get(start, '\0', range);
if (length == 0) {
range[0] = start;
@@ -3975,20 +3990,122 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) {
// The start point of a block contains the distance to the end
// of the range.
int block_end = start + (range[0] & kPayloadMask) - 1;
- int end = (block_end > to()) ? to() : block_end;
+ int end = (block_end > top) ? top : block_end;
length = uncanonicalize.get(start, '\0', range);
for (int i = 0; i < length; i++) {
uc32 c = range[i];
uc16 range_from = c + (pos - start);
uc16 range_to = c + (end - start);
- if (!(from() <= range_from && range_to <= to())) {
+ if (!(bottom <= range_from && range_to <= top)) {
ranges->Add(CharacterRange(range_from, range_to));
}
}
start = pos = block_end + 1;
}
} else {
- // TODO(plesner) when we've fixed the 2^11 bug in unibrow.
+ // Unibrow ranges don't work for high characters due to the "2^11 bug".
+ // Therefore we do something dumber for these ranges.
+ AddUncanonicals(ranges, bottom, top);
+ }
+}
+
+
+static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
+ int bottom,
+ int top) {
+ unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+ // Zones with no case mappings. There is a DEBUG-mode loop to assert that
+ // this table is correct.
+ // 0x0600 - 0x0fff
+ // 0x1100 - 0x1cff
+ // 0x2000 - 0x20ff
+ // 0x2200 - 0x23ff
+ // 0x2500 - 0x2bff
+ // 0x2e00 - 0xa5ff
+ // 0xa800 - 0xfaff
+ // 0xfc00 - 0xfeff
+ const int boundary_count = 18;
+ // The ASCII boundary and the kRangeCanonicalizeMax boundary are also in this
+ // array. This is to split up big ranges and not because they actually denote
+ // a case-mapping-free-zone.
+ ASSERT(CharacterRange::kRangeCanonicalizeMax < 0x600);
+ const int kFirstRealCaselessZoneIndex = 2;
+ int boundaries[] = {0x80, CharacterRange::kRangeCanonicalizeMax,
+ 0x600, 0x1000, 0x1100, 0x1d00, 0x2000, 0x2100, 0x2200, 0x2400, 0x2500,
+ 0x2c00, 0x2e00, 0xa600, 0xa800, 0xfb00, 0xfc00, 0xff00};
+
+ // Special ASCII rule from spec can save us some work here.
+ if (bottom == 0x80 && top == 0xffff) return;
+
+ // We have optimized support for this range.
+ if (top <= CharacterRange::kRangeCanonicalizeMax) {
+ CharacterRange range(bottom, top);
+ range.AddCaseEquivalents(ranges, false);
+ return;
+ }
+
+ // Split up very large ranges. This helps remove ranges where there are no
+ // case mappings.
+ for (int i = 0; i < boundary_count; i++) {
+ if (bottom < boundaries[i] && top >= boundaries[i]) {
+ AddUncanonicals(ranges, bottom, boundaries[i] - 1);
+ AddUncanonicals(ranges, boundaries[i], top);
+ return;
+ }
+ }
+
+ // If we are completely in a zone with no case mappings then we are done.
+ // We start at 2 so as not to except the ASCII range from mappings.
+ for (int i = kFirstRealCaselessZoneIndex; i < boundary_count; i += 2) {
+ if (bottom >= boundaries[i] && top < boundaries[i + 1]) {
+#ifdef DEBUG
+ for (int j = bottom; j <= top; j++) {
+ unsigned current_char = j;
+ int length = uncanonicalize.get(current_char, '\0', chars);
+ for (int k = 0; k < length; k++) {
+ ASSERT(chars[k] == current_char);
+ }
+ }
+#endif
+ return;
+ }
+ }
+
+ // Step through the range finding equivalent characters.
+ ZoneList<unibrow::uchar> *characters = new ZoneList<unibrow::uchar>(100);
+ for (int i = bottom; i <= top; i++) {
+ int length = uncanonicalize.get(i, '\0', chars);
+ for (int j = 0; j < length; j++) {
+ uc32 chr = chars[j];
+ if (chr != i && (chr < bottom || chr > top)) {
+ characters->Add(chr);
+ }
+ }
+ }
+
+ // Step through the equivalent characters finding simple ranges and
+ // adding ranges to the character class.
+ if (characters->length() > 0) {
+ int new_from = characters->at(0);
+ int new_to = new_from;
+ for (int i = 1; i < characters->length(); i++) {
+ int chr = characters->at(i);
+ if (chr == new_to + 1) {
+ new_to++;
+ } else {
+ if (new_to == new_from) {
+ ranges->Add(CharacterRange::Singleton(new_from));
+ } else {
+ ranges->Add(CharacterRange(new_from, new_to));
+ }
+ new_from = new_to = chr;
+ }
+ }
+ if (new_to == new_from) {
+ ranges->Add(CharacterRange::Singleton(new_from));
+ } else {
+ ranges->Add(CharacterRange(new_from, new_to));
+ }
}
}
@@ -4234,7 +4351,7 @@ void TextNode::CalculateOffsets() {
void Analysis::VisitText(TextNode* that) {
if (ignore_case_) {
- that->MakeCaseIndependent();
+ that->MakeCaseIndependent(is_ascii_);
}
EnsureAnalyzed(that->on_success());
if (!has_failed()) {
@@ -4452,7 +4569,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
}
}
data->node = node;
- Analysis analysis(ignore_case);
+ Analysis analysis(ignore_case, is_ascii);
analysis.EnsureAnalyzed(node);
if (analysis.has_failed()) {
const char* error_message = analysis.error_message();
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 84f8d98c..b6811194 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -200,7 +200,7 @@ class CharacterRange {
bool is_valid() { return from_ <= to_; }
bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(ZoneList<CharacterRange>* ranges);
+ void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii);
static void Split(ZoneList<CharacterRange>* base,
Vector<const uc16> overlay,
ZoneList<CharacterRange>** included,
@@ -703,7 +703,7 @@ class TextNode: public SeqRegExpNode {
int characters_filled_in,
bool not_at_start);
ZoneList<TextElement>* elements() { return elms_; }
- void MakeCaseIndependent();
+ void MakeCaseIndependent(bool is_ascii);
virtual int GreedyLoopTextLength();
virtual TextNode* Clone() {
TextNode* result = new TextNode(*this);
@@ -1212,8 +1212,10 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
// +-------+ +------------+
class Analysis: public NodeVisitor {
public:
- explicit Analysis(bool ignore_case)
- : ignore_case_(ignore_case), error_message_(NULL) { }
+ Analysis(bool ignore_case, bool is_ascii)
+ : ignore_case_(ignore_case),
+ is_ascii_(is_ascii),
+ error_message_(NULL) { }
void EnsureAnalyzed(RegExpNode* node);
#define DECLARE_VISIT(Type) \
@@ -1232,6 +1234,7 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
}
private:
bool ignore_case_;
+ bool is_ascii_;
const char* error_message_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
diff --git a/src/list.h b/src/list.h
index 25211d9a..aff63c38 100644
--- a/src/list.h
+++ b/src/list.h
@@ -48,6 +48,7 @@ template <typename T, class P>
class List {
public:
+ List() { Initialize(0); }
INLINE(explicit List(int capacity)) { Initialize(capacity); }
INLINE(~List()) { DeleteData(data_); }
@@ -58,7 +59,9 @@ class List {
Initialize(0);
}
- INLINE(void* operator new(size_t size)) { return P::New(size); }
+ INLINE(void* operator new(size_t size)) {
+ return P::New(static_cast<int>(size));
+ }
INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
// Returns a reference to the element at index i. This reference is
diff --git a/src/log-inl.h b/src/log-inl.h
index 1844d2bf..1500252a 100644
--- a/src/log-inl.h
+++ b/src/log-inl.h
@@ -55,7 +55,7 @@ inline const char* StateToString(StateTag state) {
}
}
-VMState::VMState(StateTag state) : disabled_(true) {
+VMState::VMState(StateTag state) : disabled_(true), external_callback_(NULL) {
if (!Logger::is_logging()) {
return;
}
diff --git a/src/log-utils.cc b/src/log-utils.cc
index f327a0a0..fd956041 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -155,7 +155,7 @@ void Log::OpenMemoryBuffer() {
ASSERT(!IsEnabled());
output_buffer_ = new LogDynamicBuffer(
kDynamicBufferBlockSize, kMaxDynamicBufferSize,
- kDynamicBufferSeal, strlen(kDynamicBufferSeal));
+ kDynamicBufferSeal, StrLength(kDynamicBufferSeal));
Write = WriteToMemory;
Init();
}
@@ -195,7 +195,7 @@ int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
// Find previous log line boundary.
char* end_pos = dest_buf + actual_size - 1;
while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
- actual_size = end_pos - dest_buf + 1;
+ actual_size = static_cast<int>(end_pos - dest_buf + 1);
ASSERT(actual_size <= max_size);
return actual_size;
}
@@ -352,7 +352,7 @@ void LogMessageBuilder::WriteToLogFile() {
void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
- const int len = strlen(str);
+ const int len = StrLength(str);
const int written = Log::Write(str, len);
if (written != len && write_failure_handler != NULL) {
write_failure_handler();
@@ -461,7 +461,7 @@ bool LogRecordCompressor::RetrievePreviousCompressed(
--data_ptr;
}
const intptr_t truncated_len = prev_end - prev_ptr;
- const int copy_from_pos = data_ptr - data.start();
+ const int copy_from_pos = static_cast<int>(data_ptr - data.start());
// Check if the length of compressed tail is enough.
if (truncated_len <= kMaxBackwardReferenceSize
&& truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) {
@@ -493,7 +493,7 @@ bool LogRecordCompressor::RetrievePreviousCompressed(
prev_record->start() + unchanged_len, best.backref_size + 1);
PrintBackwardReference(backref, best.distance, best.copy_from_pos);
ASSERT(strlen(backref.start()) - best.backref_size == 0);
- prev_record->Truncate(unchanged_len + best.backref_size);
+ prev_record->Truncate(static_cast<int>(unchanged_len + best.backref_size));
}
return true;
}
diff --git a/src/log-utils.h b/src/log-utils.h
index 117f098c..3e25b0e7 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -129,9 +129,10 @@ class Log : public AllStatic {
// Implementation of writing to a log file.
static int WriteToFile(const char* msg, int length) {
ASSERT(output_handle_ != NULL);
- int rv = fwrite(msg, 1, length, output_handle_);
- ASSERT(length == rv);
- return rv;
+ size_t rv = fwrite(msg, 1, length, output_handle_);
+ ASSERT(static_cast<size_t>(length) == rv);
+ USE(rv);
+ return length;
}
// Implementation of writing to a memory buffer.
diff --git a/src/log.cc b/src/log.cc
index d1d9a31e..bbce926c 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -30,6 +30,7 @@
#include "v8.h"
#include "bootstrapper.h"
+#include "global-handles.h"
#include "log.h"
#include "macro-assembler.h"
#include "serialize.h"
@@ -125,6 +126,9 @@ class Profiler: public Thread {
bool overflow_; // Tell whether a buffer overflow has occurred.
Semaphore* buffer_semaphore_; // Sempahore used for buffer synchronization.
+ // Tells whether profiler is engaged, that is, processing thread is stated.
+ bool engaged_;
+
// Tells whether worker thread should continue running.
bool running_;
@@ -151,12 +155,18 @@ void StackTracer::Trace(TickSample* sample) {
return;
}
+ int i = 0;
+ const Address callback = Logger::current_state_ != NULL ?
+ Logger::current_state_->external_callback() : NULL;
+ if (callback != NULL) {
+ sample->stack[i++] = callback;
+ }
+
SafeStackTraceFrameIterator it(
reinterpret_cast<Address>(sample->fp),
reinterpret_cast<Address>(sample->sp),
reinterpret_cast<Address>(sample->sp),
js_entry_sp);
- int i = 0;
while (!it.done() && i < TickSample::kMaxFramesCount) {
sample->stack[i++] = it.frame()->pc();
it.Advance();
@@ -243,17 +253,25 @@ void SlidingStateWindow::AddState(StateTag state) {
//
// Profiler implementation.
//
-Profiler::Profiler() {
- buffer_semaphore_ = OS::CreateSemaphore(0);
- head_ = 0;
- tail_ = 0;
- overflow_ = false;
- running_ = false;
+Profiler::Profiler()
+ : head_(0),
+ tail_(0),
+ overflow_(false),
+ buffer_semaphore_(OS::CreateSemaphore(0)),
+ engaged_(false),
+ running_(false) {
}
void Profiler::Engage() {
- OS::LogSharedLibraryAddresses();
+ if (engaged_) return;
+ engaged_ = true;
+
+ // TODO(mnaganov): This is actually "Chromium" mode. Flags need to be revised.
+ // http://code.google.com/p/v8/issues/detail?id=487
+ if (!FLAG_prof_lazy) {
+ OS::LogSharedLibraryAddresses();
+ }
// Start thread processing the profiler buffer.
running_ = true;
@@ -268,6 +286,8 @@ void Profiler::Engage() {
void Profiler::Disengage() {
+ if (!engaged_) return;
+
// Stop receiving ticks.
Logger::ticker_->ClearProfiler();
@@ -660,6 +680,55 @@ class CompressionHelper {
#endif // ENABLE_LOGGING_AND_PROFILING
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::CallbackEventInternal(const char* prefix, const char* name,
+ Address entry_point) {
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg;
+ msg.Append("%s,%s,",
+ log_events_[CODE_CREATION_EVENT], log_events_[CALLBACK_TAG]);
+ msg.AppendAddress(entry_point);
+ msg.Append(",1,\"%s%s\"", prefix, name);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
+ msg.WriteToLogFile();
+}
+#endif
+
+
+void Logger::CallbackEvent(String* name, Address entry_point) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ SmartPointer<char> str =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ CallbackEventInternal("", *str, entry_point);
+#endif
+}
+
+
+void Logger::GetterCallbackEvent(String* name, Address entry_point) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ SmartPointer<char> str =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ CallbackEventInternal("get ", *str, entry_point);
+#endif
+}
+
+
+void Logger::SetterCallbackEvent(String* name, Address entry_point) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ SmartPointer<char> str =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ CallbackEventInternal("set ", *str, entry_point);
+#endif
+}
+
+
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
@@ -902,8 +971,9 @@ void Logger::HeapSampleJSRetainersEvent(
// Event starts with comma, so we don't have it in the format string.
static const char* event_text = "heap-js-ret-item,%s";
// We take placeholder strings into account, but it's OK to be conservative.
- static const int event_text_len = strlen(event_text);
- const int cons_len = strlen(constructor), event_len = strlen(event);
+ static const int event_text_len = StrLength(event_text);
+ const int cons_len = StrLength(constructor);
+ const int event_len = StrLength(event);
int pos = 0;
// Retainer lists can be long. We may need to split them into multiple events.
do {
@@ -1053,9 +1123,11 @@ void Logger::ResumeProfiler(int flags) {
}
if (modules_to_enable & PROFILER_MODULE_CPU) {
if (FLAG_prof_lazy) {
+ profiler_->Engage();
LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
LogCompiledFunctions();
+ LogAccessorCallbacks();
if (!FLAG_sliding_state_window) ticker_->Start();
}
profiler_->resume();
@@ -1106,6 +1178,48 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis) {
}
+void Logger::LogCodeObject(Object* object) {
+ if (FLAG_log_code) {
+ Code* code_object = Code::cast(object);
+ LogEventsAndTags tag = Logger::STUB_TAG;
+ const char* description = "Unknown code from the snapshot";
+ switch (code_object->kind()) {
+ case Code::FUNCTION:
+ return; // We log this later using LogCompiledFunctions.
+ case Code::STUB:
+ description = CodeStub::MajorName(code_object->major_key());
+ tag = Logger::STUB_TAG;
+ break;
+ case Code::BUILTIN:
+ description = "A builtin from the snapshot";
+ tag = Logger::BUILTIN_TAG;
+ break;
+ case Code::KEYED_LOAD_IC:
+ description = "A keyed load IC from the snapshot";
+ tag = Logger::KEYED_LOAD_IC_TAG;
+ break;
+ case Code::LOAD_IC:
+ description = "A load IC from the snapshot";
+ tag = Logger::LOAD_IC_TAG;
+ break;
+ case Code::STORE_IC:
+ description = "A store IC from the snapshot";
+ tag = Logger::STORE_IC_TAG;
+ break;
+ case Code::KEYED_STORE_IC:
+ description = "A keyed store IC from the snapshot";
+ tag = Logger::KEYED_STORE_IC_TAG;
+ break;
+ case Code::CALL_IC:
+ description = "A call IC from the snapshot";
+ tag = Logger::CALL_IC_TAG;
+ break;
+ }
+ LOG(CodeCreateEvent(tag, code_object, description));
+ }
+}
+
+
void Logger::LogCompiledFunctions() {
HandleScope scope;
const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);
@@ -1134,16 +1248,52 @@ void Logger::LogCompiledFunctions() {
LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
shared->code(), *script_name));
}
- continue;
+ } else {
+ LOG(CodeCreateEvent(
+ Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
+ } else if (shared->function_data()->IsFunctionTemplateInfo()) {
+ // API function.
+ FunctionTemplateInfo* fun_data =
+ FunctionTemplateInfo::cast(shared->function_data());
+ Object* raw_call_data = fun_data->call_code();
+ if (!raw_call_data->IsUndefined()) {
+ CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
+ Object* callback_obj = call_data->callback();
+ Address entry_point = v8::ToCData<Address>(callback_obj);
+ LOG(CallbackEvent(*func_name, entry_point));
+ }
+ } else {
+ LOG(CodeCreateEvent(
+ Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
- // If no script or script has no name.
- LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
DeleteArray(sfis);
}
+
+void Logger::LogAccessorCallbacks() {
+ AssertNoAllocation no_alloc;
+ HeapIterator iterator;
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
+ if (!obj->IsAccessorInfo()) continue;
+ AccessorInfo* ai = AccessorInfo::cast(obj);
+ if (!ai->name()->IsString()) continue;
+ String* name = String::cast(ai->name());
+ Address getter_entry = v8::ToCData<Address>(ai->getter());
+ if (getter_entry != 0) {
+ LOG(GetterCallbackEvent(name, getter_entry));
+ }
+ Address setter_entry = v8::ToCData<Address>(ai->setter());
+ if (setter_entry != 0) {
+ LOG(SetterCallbackEvent(name, setter_entry));
+ }
+ }
+}
+
#endif
@@ -1245,7 +1395,9 @@ bool Logger::Setup() {
} else {
is_logging_ = true;
}
- profiler_->Engage();
+ if (!FLAG_prof_lazy) {
+ profiler_->Engage();
+ }
}
LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
diff --git a/src/log.h b/src/log.h
index 13d45d2e..4d5acced 100644
--- a/src/log.h
+++ b/src/log.h
@@ -91,15 +91,20 @@ class CompressionHelper;
class VMState BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
public:
- inline explicit VMState(StateTag state);
+ inline VMState(StateTag state);
inline ~VMState();
StateTag state() { return state_; }
+ Address external_callback() { return external_callback_; }
+ void set_external_callback(Address external_callback) {
+ external_callback_ = external_callback;
+ }
private:
bool disabled_;
StateTag state_;
VMState* previous_;
+ Address external_callback_;
#else
public:
explicit VMState(StateTag state) {}
@@ -122,6 +127,7 @@ class VMState BASE_EMBEDDED {
V(CALL_MISS_TAG, "CallMiss", "cm") \
V(CALL_NORMAL_TAG, "CallNormal", "cn") \
V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic", "cpm") \
+ V(CALLBACK_TAG, "Callback", "cb") \
V(EVAL_TAG, "Eval", "e") \
V(FUNCTION_TAG, "Function", "f") \
V(KEYED_LOAD_IC_TAG, "KeyedLoadIC", "klic") \
@@ -200,6 +206,10 @@ class Logger {
// ==== Events logged by --log-code. ====
+ // Emits a code event for a callback function.
+ static void CallbackEvent(String* name, Address entry_point);
+ static void GetterCallbackEvent(String* name, Address entry_point);
+ static void SetterCallbackEvent(String* name, Address entry_point);
// Emits a code create event.
static void CodeCreateEvent(LogEventsAndTags tag,
Code* code, const char* source);
@@ -265,6 +275,10 @@ class Logger {
// Logs all compiled functions found in the heap.
static void LogCompiledFunctions();
+ // Logs all accessor callbacks found in the heap.
+ static void LogAccessorCallbacks();
+ // Used for logging stubs found in the snapshot.
+ static void LogCodeObject(Object* code_object);
private:
@@ -277,6 +291,11 @@ class Logger {
// Emits the profiler's first message.
static void ProfilerBeginEvent();
+ // Emits callback event messages.
+ static void CallbackEventInternal(const char* prefix,
+ const char* name,
+ Address entry_point);
+
// Emits aliases for compressed messages.
static void LogAliases();
@@ -328,6 +347,7 @@ class Logger {
friend class TimeLog;
friend class Profiler;
friend class SlidingStateWindow;
+ friend class StackTracer;
friend class VMState;
friend class LoggerTestHelper;
diff --git a/src/macros.py b/src/macros.py
index ddd2f13b..5b06099a 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -77,12 +77,12 @@ const kMonthShift = 5;
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
-macro IS_FUNCTION(arg) = (typeof(arg) === 'function');
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
-macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
+macro IS_OBJECT(arg) = (%_IsObject(arg));
macro IS_ARRAY(arg) = (%_IsArray(arg));
+macro IS_FUNCTION(arg) = (%_IsFunction(arg));
macro IS_REGEXP(arg) = (%_ClassOf(arg) === 'RegExp');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 5a3ab890..81819b7f 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -572,9 +572,8 @@ class SymbolMarkingVisitor : public ObjectVisitor {
void MarkCompactCollector::MarkSymbolTable() {
// Objects reachable from symbols are marked as live so as to ensure
// that if the symbol itself remains alive after GC for any reason,
- // and if it is a sliced string or a cons string backed by an
- // external string (even indirectly), then the external string does
- // not receive a weak reference callback.
+ // and if it is a cons string backed by an external string (even indirectly),
+ // then the external string does not receive a weak reference callback.
SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
// Mark the symbol table itself.
SetMark(symbol_table);
@@ -593,7 +592,7 @@ void MarkCompactCollector::MarkSymbolTable() {
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
- Heap::IterateStrongRoots(visitor);
+ Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
// Handle the symbol table specially.
MarkSymbolTable();
@@ -1074,7 +1073,7 @@ inline void EncodeForwardingAddressesInRange(Address start,
}
#endif
if (!is_prev_alive) { // Transition from non-live to live.
- EncodeFreeRegion(free_start, current - free_start);
+ EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
is_prev_alive = true;
}
} else { // Non-live object.
@@ -1088,7 +1087,9 @@ inline void EncodeForwardingAddressesInRange(Address start,
}
// If we ended on a free region, mark it.
- if (!is_prev_alive) EncodeFreeRegion(free_start, end - free_start);
+ if (!is_prev_alive) {
+ EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
+ }
}
@@ -1169,7 +1170,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
- dealloc(free_start, current - free_start);
+ dealloc(free_start, static_cast<int>(current - free_start));
is_previous_alive = true;
}
} else {
@@ -1189,7 +1190,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// If the last region was not live we need to deallocate from
// free_start to the allocation top in the page.
if (!is_previous_alive) {
- int free_size = p->AllocationTop() - free_start;
+ int free_size = static_cast<int>(p->AllocationTop() - free_start);
if (free_size > 0) {
dealloc(free_start, free_size);
}
@@ -1455,7 +1456,7 @@ void MarkCompactCollector::UpdatePointers() {
state_ = UPDATE_POINTERS;
#endif
UpdatingVisitor updating_visitor;
- Heap::IterateRoots(&updating_visitor);
+ Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&updating_visitor);
int live_maps = IterateLiveObjects(Heap::map_space(),
diff --git a/src/math.js b/src/math.js
index e3d266e4..71918965 100644
--- a/src/math.js
+++ b/src/math.js
@@ -29,7 +29,6 @@
// Keep reference to original values of some global properties. This
// has the added benefit that the code in this file is isolated from
// changes to these properties.
-const $Infinity = global.Infinity;
const $floor = MathFloor;
const $random = MathRandom;
const $abs = MathAbs;
@@ -118,10 +117,16 @@ function MathLog(x) {
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
- var r = -$Infinity;
var length = %_ArgumentsLength();
- for (var i = 0; i < length; i++) {
- var n = ToNumber(%_Arguments(i));
+ if (length == 0) {
+ return -1/0; // Compiler constant-folds this to -Infinity.
+ }
+ var r = arg1;
+ if (!IS_NUMBER(r)) r = ToNumber(r);
+ if (NUMBER_IS_NAN(r)) return r;
+ for (var i = 1; i < length; i++) {
+ var n = %_Arguments(i);
+ if (!IS_NUMBER(n)) n = ToNumber(n);
if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is considered greater than -0.
if (n > r || (r === 0 && n === 0 && !%_IsSmi(r))) r = n;
@@ -131,10 +136,16 @@ function MathMax(arg1, arg2) { // length == 2
// ECMA 262 - 15.8.2.12
function MathMin(arg1, arg2) { // length == 2
- var r = $Infinity;
var length = %_ArgumentsLength();
- for (var i = 0; i < length; i++) {
- var n = ToNumber(%_Arguments(i));
+ if (length == 0) {
+ return 1/0; // Compiler constant-folds this to Infinity.
+ }
+ var r = arg1;
+ if (!IS_NUMBER(r)) r = ToNumber(r);
+ if (NUMBER_IS_NAN(r)) return r;
+ for (var i = 1; i < length; i++) {
+ var n = %_Arguments(i);
+ if (!IS_NUMBER(n)) n = ToNumber(n);
if (NUMBER_IS_NAN(n)) return n;
// Make sure -0 is considered less than +0.
if (n < r || (r === 0 && n === 0 && !%_IsSmi(n))) r = n;
diff --git a/src/messages.js b/src/messages.js
index 27207928..1e5053d7 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -238,14 +238,15 @@ function MakeError(type, args) {
Script.prototype.lineFromPosition = function(position) {
var lower = 0;
var upper = this.lineCount() - 1;
+ var line_ends = this.line_ends;
// We'll never find invalid positions so bail right away.
- if (position > this.line_ends[upper]) {
+ if (position > line_ends[upper]) {
return -1;
}
// This means we don't have to safe-guard indexing line_ends[i - 1].
- if (position <= this.line_ends[0]) {
+ if (position <= line_ends[0]) {
return 0;
}
@@ -253,9 +254,9 @@ Script.prototype.lineFromPosition = function(position) {
while (upper >= 1) {
var i = (lower + upper) >> 1;
- if (position > this.line_ends[i]) {
+ if (position > line_ends[i]) {
lower = i + 1;
- } else if (position <= this.line_ends[i - 1]) {
+ } else if (position <= line_ends[i - 1]) {
upper = i - 1;
} else {
return i;
@@ -278,8 +279,9 @@ Script.prototype.locationFromPosition = function (position,
if (line == -1) return null;
// Determine start, end and column.
- var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
- var end = this.line_ends[line];
+ var line_ends = this.line_ends;
+ var start = line == 0 ? 0 : line_ends[line - 1] + 1;
+ var end = line_ends[line];
if (end > 0 && StringCharAt.call(this.source, end - 1) == '\r') end--;
var column = position - start;
@@ -368,8 +370,9 @@ Script.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
return null;
}
- var from_position = from_line == 0 ? 0 : this.line_ends[from_line - 1] + 1;
- var to_position = to_line == 0 ? 0 : this.line_ends[to_line - 1] + 1;
+ var line_ends = this.line_ends;
+ var from_position = from_line == 0 ? 0 : line_ends[from_line - 1] + 1;
+ var to_position = to_line == 0 ? 0 : line_ends[to_line - 1] + 1;
// Return a source slice with line numbers re-adjusted to the resource.
return new SourceSlice(this, from_line + this.line_offset, to_line + this.line_offset,
@@ -391,8 +394,9 @@ Script.prototype.sourceLine = function (opt_line) {
}
// Return the source line.
- var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
- var end = this.line_ends[line];
+ var line_ends = this.line_ends;
+ var start = line == 0 ? 0 : line_ends[line - 1] + 1;
+ var end = line_ends[line];
return StringSubstring.call(this.source, start, end);
}
@@ -625,10 +629,7 @@ CallSite.prototype.isEval = function () {
CallSite.prototype.getEvalOrigin = function () {
var script = %FunctionGetScript(this.fun);
- if (!script || script.compilation_type != 1)
- return null;
- return new CallSite(null, script.eval_from_function,
- script.eval_from_position);
+ return FormatEvalOrigin(script);
};
CallSite.prototype.getFunction = function () {
@@ -696,7 +697,7 @@ CallSite.prototype.getColumnNumber = function () {
if (script) {
location = script.locationFromPosition(this.pos, true);
}
- return location ? location.column : null;
+ return location ? location.column + 1: null;
};
CallSite.prototype.isNative = function () {
@@ -715,12 +716,44 @@ CallSite.prototype.isConstructor = function () {
return this.fun === constructor;
};
+function FormatEvalOrigin(script) {
+ var eval_origin = "";
+ if (script.eval_from_function_name) {
+ eval_origin += script.eval_from_function_name;
+ } else {
+ eval_origin += "<anonymous>";
+ }
+
+ var eval_from_script = script.eval_from_script;
+ if (eval_from_script) {
+ if (eval_from_script.compilation_type == 1) {
+ // eval script originated from another eval.
+ eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")";
+ } else {
+ // eval script originated from "real" scource.
+ if (eval_from_script.name) {
+ eval_origin += " (" + eval_from_script.name;
+ var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
+ if (location) {
+ eval_origin += ":" + (location.line + 1);
+ eval_origin += ":" + (location.column + 1);
+ }
+ eval_origin += ")"
+ } else {
+ eval_origin += " (unknown source)";
+ }
+ }
+ }
+
+ return eval_origin;
+};
+
function FormatSourcePosition(frame) {
var fileLocation = "";
if (frame.isNative()) {
fileLocation = "native";
} else if (frame.isEval()) {
- fileLocation = "eval at " + FormatSourcePosition(frame.getEvalOrigin());
+ fileLocation = "eval at " + frame.getEvalOrigin();
} else {
var fileName = frame.getFileName();
if (fileName) {
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index cde55343..ba663b2a 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -849,6 +849,33 @@ FunctionMirror.prototype.script = function() {
/**
+ * Returns the script source position for the function. Only makes sense
+ * for functions which has a script defined.
+ * @return {Number or undefined} in-script position for the function
+ */
+FunctionMirror.prototype.sourcePosition_ = function() {
+ // Return script if function is resolved. Otherwise just fall through
+ // to return undefined.
+ if (this.resolved()) {
+ return %FunctionGetScriptSourcePosition(this.value_);
+ }
+};
+
+
+/**
+ * Returns the script source location object for the function. Only makes sense
+ * for functions which has a script defined.
+ * @return {Location or undefined} in-script location for the function begin
+ */
+FunctionMirror.prototype.sourceLocation = function() {
+ if (this.resolved() && this.script()) {
+ return this.script().locationFromPosition(this.sourcePosition_(),
+ true);
+ }
+};
+
+
+/**
* Returns objects constructed by this function.
* @param {number} opt_max_instances Optional parameter specifying the maximum
* number of instances to return.
@@ -1766,16 +1793,21 @@ ScriptMirror.prototype.context = function() {
};
-ScriptMirror.prototype.evalFromFunction = function() {
- return MakeMirror(this.script_.eval_from_function);
+ScriptMirror.prototype.evalFromScript = function() {
+ return MakeMirror(this.script_.eval_from_script);
+};
+
+
+ScriptMirror.prototype.evalFromFunctionName = function() {
+ return MakeMirror(this.script_.eval_from_function_name);
};
ScriptMirror.prototype.evalFromLocation = function() {
- var eval_from_function = this.evalFromFunction();
- if (!eval_from_function.isUndefined()) {
- var position = this.script_.eval_from_position;
- return eval_from_function.script().locationFromPosition(position, true);
+ var eval_from_script = this.evalFromScript();
+ if (!eval_from_script.isUndefined()) {
+ var position = this.script_.eval_from_script_position;
+ return eval_from_script.locationFromPosition(position, true);
}
};
@@ -2053,12 +2085,15 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
// For compilation type eval emit information on the script from which
// eval was called if a script is present.
if (mirror.compilationType() == 1 &&
- mirror.evalFromFunction().script()) {
+ mirror.evalFromScript()) {
content.evalFromScript =
- this.serializeReference(mirror.evalFromFunction().script());
+ this.serializeReference(mirror.evalFromScript());
var evalFromLocation = mirror.evalFromLocation()
content.evalFromLocation = { line: evalFromLocation.line,
column: evalFromLocation.column}
+ if (mirror.evalFromFunctionName()) {
+ content.evalFromFunctionName = mirror.evalFromFunctionName();
+ }
}
if (mirror.context()) {
content.context = this.serializeReference(mirror.context());
@@ -2119,6 +2154,9 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
}
if (mirror.script()) {
content.script = this.serializeReference(mirror.script());
+ content.scriptId = mirror.script().id();
+
+ serializeLocationFields(mirror.sourceLocation(), content);
}
}
@@ -2151,6 +2189,31 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
/**
+ * Serialize location information to the following JSON format:
+ *
+ * "position":"<position>",
+ * "line":"<line>",
+ * "column":"<column>",
+ *
+ * @param {SourceLocation} location The location to serialize, may be undefined.
+ */
+function serializeLocationFields (location, content) {
+ if (!location) {
+ return;
+ }
+ content.position = location.position;
+ var line = location.line;
+ if (!IS_UNDEFINED(line)) {
+ content.line = line;
+ }
+ var column = location.column;
+ if (!IS_UNDEFINED(column)) {
+ content.column = column;
+ }
+}
+
+
+/**
* Serialize property information to the following JSON format for building the
* array of properties.
*
@@ -2218,15 +2281,7 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
x[i] = local;
}
content.locals = x;
- content.position = mirror.sourcePosition();
- var line = mirror.sourceLine();
- if (!IS_UNDEFINED(line)) {
- content.line = line;
- }
- var column = mirror.sourceColumn();
- if (!IS_UNDEFINED(column)) {
- content.column = column;
- }
+ serializeLocationFields(mirror.sourceLocation(), content);
var source_line_text = mirror.sourceLineText();
if (!IS_UNDEFINED(source_line_text)) {
content.sourceLineText = source_line_text;
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 80789ebb..eb743f81 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -87,57 +87,53 @@ class CounterCollection {
// We statically allocate a set of local counters to be used if we
// don't want to store the stats in a memory-mapped file
static CounterCollection local_counters;
-static CounterCollection* counters = &local_counters;
typedef std::map<std::string, int*> CounterMap;
typedef std::map<std::string, int*>::iterator CounterMapIterator;
static CounterMap counter_table_;
-// Callback receiver when v8 has a counter to track.
-static int* counter_callback(const char* name) {
- std::string counter = name;
- // See if this counter name is already known.
- if (counter_table_.find(counter) != counter_table_.end())
- return counter_table_[counter];
-
- Counter* ctr = counters->GetNextCounter();
- if (ctr == NULL) return NULL;
- int* ptr = ctr->Bind(name);
- counter_table_[counter] = ptr;
- return ptr;
-}
+class CppByteSink : public i::SnapshotByteSink {
+ public:
+ explicit CppByteSink(const char* snapshot_file) : bytes_written_(0) {
+ fp_ = i::OS::FOpen(snapshot_file, "wb");
+ if (fp_ == NULL) {
+ i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ exit(1);
+ }
+ fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n");
+ fprintf(fp_, "#include \"v8.h\"\n");
+ fprintf(fp_, "#include \"platform.h\"\n\n");
+ fprintf(fp_, "#include \"snapshot.h\"\n\n");
+ fprintf(fp_, "namespace v8 {\nnamespace internal {\n\n");
+ fprintf(fp_, "const byte Snapshot::data_[] = {");
+ }
-// Write C++ code that defines Snapshot::snapshot_ to contain the snapshot
-// to the file given by filename. Only the first size chars are written.
-static int WriteInternalSnapshotToFile(const char* filename,
- const v8::internal::byte* bytes,
- int size) {
- FILE* f = i::OS::FOpen(filename, "wb");
- if (f == NULL) {
- i::OS::PrintError("Cannot open file %s for reading.\n", filename);
- return 0;
+ virtual ~CppByteSink() {
+ if (fp_ != NULL) {
+ fprintf(fp_, "};\n\n");
+ fprintf(fp_, "int Snapshot::size_ = %d;\n\n", bytes_written_);
+ fprintf(fp_, "} } // namespace v8::internal\n");
+ fclose(fp_);
+ }
}
- fprintf(f, "// Autogenerated snapshot file. Do not edit.\n\n");
- fprintf(f, "#include \"v8.h\"\n");
- fprintf(f, "#include \"platform.h\"\n\n");
- fprintf(f, "#include \"snapshot.h\"\n\n");
- fprintf(f, "namespace v8 {\nnamespace internal {\n\n");
- fprintf(f, "const byte Snapshot::data_[] = {");
- int written = 0;
- written += fprintf(f, "0x%x", bytes[0]);
- for (int i = 1; i < size; ++i) {
- written += fprintf(f, ",0x%x", bytes[i]);
- // The following is needed to keep the line length low on Visual C++:
- if (i % 512 == 0) fprintf(f, "\n");
+
+ virtual void Put(int byte, const char* description) {
+ if (bytes_written_ != 0) {
+ fprintf(fp_, ",");
+ }
+ fprintf(fp_, "%d", byte);
+ bytes_written_++;
+ if ((bytes_written_ & 0x3f) == 0) {
+ fprintf(fp_, "\n");
+ }
}
- fprintf(f, "};\n\n");
- fprintf(f, "int Snapshot::size_ = %d;\n\n", size);
- fprintf(f, "} } // namespace v8::internal\n");
- fclose(f);
- return written;
-}
+
+ private:
+ FILE* fp_;
+ int bytes_written_;
+};
int main(int argc, char** argv) {
@@ -153,34 +149,20 @@ int main(int argc, char** argv) {
i::FlagList::PrintHelp();
return !i::FLAG_help;
}
-
- v8::V8::SetCounterFunction(counter_callback);
- v8::HandleScope scope;
-
- const int kExtensionCount = 1;
- const char* extension_list[kExtensionCount] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(kExtensionCount, extension_list);
-
i::Serializer::Enable();
- v8::Context::New(&extensions);
-
+ Persistent<Context> context = v8::Context::New();
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
i::Bootstrapper::NativesSourceLookup(i);
}
}
- // Get rid of unreferenced scripts with a global GC.
- i::Heap::CollectAllGarbage(false);
- i::Serializer ser;
+ context.Dispose();
+ CppByteSink sink(argv[1]);
+ i::Serializer ser(&sink);
+ // This results in a somewhat smaller snapshot, probably because it gets rid
+ // of some things that are cached between garbage collections.
+ i::Heap::CollectAllGarbage(true);
ser.Serialize();
- v8::internal::byte* bytes;
- int len;
- ser.Finalize(&bytes, &len);
-
- WriteInternalSnapshotToFile(argv[1], bytes, len);
-
- i::DeleteArray(bytes);
-
return 0;
}
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 01881346..36f65eee 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -547,54 +547,18 @@ static const char* TypeToString(InstanceType type) {
case INVALID_TYPE: return "INVALID";
case MAP_TYPE: return "MAP";
case HEAP_NUMBER_TYPE: return "HEAP_NUMBER";
- case SHORT_SYMBOL_TYPE:
- case MEDIUM_SYMBOL_TYPE:
- case LONG_SYMBOL_TYPE: return "SYMBOL";
- case SHORT_ASCII_SYMBOL_TYPE:
- case MEDIUM_ASCII_SYMBOL_TYPE:
- case LONG_ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL";
- case SHORT_SLICED_SYMBOL_TYPE:
- case MEDIUM_SLICED_SYMBOL_TYPE:
- case LONG_SLICED_SYMBOL_TYPE: return "SLICED_SYMBOL";
- case SHORT_SLICED_ASCII_SYMBOL_TYPE:
- case MEDIUM_SLICED_ASCII_SYMBOL_TYPE:
- case LONG_SLICED_ASCII_SYMBOL_TYPE: return "SLICED_ASCII_SYMBOL";
- case SHORT_CONS_SYMBOL_TYPE:
- case MEDIUM_CONS_SYMBOL_TYPE:
- case LONG_CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
- case SHORT_CONS_ASCII_SYMBOL_TYPE:
- case MEDIUM_CONS_ASCII_SYMBOL_TYPE:
- case LONG_CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
- case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE:
- case MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE:
- case LONG_EXTERNAL_ASCII_SYMBOL_TYPE:
- case SHORT_EXTERNAL_SYMBOL_TYPE:
- case MEDIUM_EXTERNAL_SYMBOL_TYPE:
- case LONG_EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
- case SHORT_ASCII_STRING_TYPE:
- case MEDIUM_ASCII_STRING_TYPE:
- case LONG_ASCII_STRING_TYPE: return "ASCII_STRING";
- case SHORT_STRING_TYPE:
- case MEDIUM_STRING_TYPE:
- case LONG_STRING_TYPE: return "TWO_BYTE_STRING";
- case SHORT_CONS_STRING_TYPE:
- case MEDIUM_CONS_STRING_TYPE:
- case LONG_CONS_STRING_TYPE:
- case SHORT_CONS_ASCII_STRING_TYPE:
- case MEDIUM_CONS_ASCII_STRING_TYPE:
- case LONG_CONS_ASCII_STRING_TYPE: return "CONS_STRING";
- case SHORT_SLICED_STRING_TYPE:
- case MEDIUM_SLICED_STRING_TYPE:
- case LONG_SLICED_STRING_TYPE:
- case SHORT_SLICED_ASCII_STRING_TYPE:
- case MEDIUM_SLICED_ASCII_STRING_TYPE:
- case LONG_SLICED_ASCII_STRING_TYPE: return "SLICED_STRING";
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- case MEDIUM_EXTERNAL_ASCII_STRING_TYPE:
- case LONG_EXTERNAL_ASCII_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE:
- case MEDIUM_EXTERNAL_STRING_TYPE:
- case LONG_EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
+ case SYMBOL_TYPE: return "SYMBOL";
+ case ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL";
+ case CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
+ case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
+ case EXTERNAL_ASCII_SYMBOL_TYPE:
+ case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
+ case ASCII_STRING_TYPE: return "ASCII_STRING";
+ case STRING_TYPE: return "TWO_BYTE_STRING";
+ case CONS_STRING_TYPE:
+ case CONS_ASCII_STRING_TYPE: return "CONS_STRING";
+ case EXTERNAL_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY";
@@ -796,8 +760,6 @@ void SharedFunctionInfo::SharedFunctionInfoPrint() {
PrintF("\n - debug info = ");
debug_info()->ShortPrint();
PrintF("\n - length = %d", length());
- PrintF("\n - has_only_this_property_assignments = %d",
- has_only_this_property_assignments());
PrintF("\n - has_only_simple_this_property_assignments = %d",
has_only_simple_this_property_assignments());
PrintF("\n - this_property_assignments = ");
@@ -979,6 +941,7 @@ void AccessorInfo::AccessorInfoVerify() {
VerifyPointer(name());
VerifyPointer(data());
VerifyPointer(flag());
+ VerifyPointer(load_stub_cache());
}
void AccessorInfo::AccessorInfoPrint() {
@@ -1172,6 +1135,20 @@ void Script::ScriptPrint() {
type()->ShortPrint();
PrintF("\n - id: ");
id()->ShortPrint();
+ PrintF("\n - data: ");
+ data()->ShortPrint();
+ PrintF("\n - context data: ");
+ context_data()->ShortPrint();
+ PrintF("\n - wrapper: ");
+ wrapper()->ShortPrint();
+ PrintF("\n - compilation type: ");
+ compilation_type()->ShortPrint();
+ PrintF("\n - line ends: ");
+ line_ends()->ShortPrint();
+ PrintF("\n - eval from shared: ");
+ eval_from_shared()->ShortPrint();
+ PrintF("\n - eval from instructions offset: ");
+ eval_from_instructions_offset()->ShortPrint();
PrintF("\n");
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 1ada5839..8514a412 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -163,11 +163,6 @@ bool Object::IsConsString() {
}
-#ifdef DEBUG
-// These are for cast checks. If you need one of these in release
-// mode you should consider using a StringShape before moving it out
-// of the ifdef
-
bool Object::IsSeqString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential();
@@ -208,15 +203,6 @@ bool Object::IsExternalTwoByteString() {
}
-bool Object::IsSlicedString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSliced();
-}
-
-
-#endif // DEBUG
-
-
StringShape::StringShape(String* str)
: type_(str->map()->instance_type()) {
set_valid();
@@ -246,9 +232,6 @@ bool StringShape::IsSymbol() {
bool String::IsAsciiRepresentation() {
uint32_t type = map()->instance_type();
- if ((type & kStringRepresentationMask) == kSlicedStringTag) {
- return SlicedString::cast(this)->buffer()->IsAsciiRepresentation();
- }
if ((type & kStringRepresentationMask) == kConsStringTag &&
ConsString::cast(this)->second()->length() == 0) {
return ConsString::cast(this)->first()->IsAsciiRepresentation();
@@ -259,9 +242,7 @@ bool String::IsAsciiRepresentation() {
bool String::IsTwoByteRepresentation() {
uint32_t type = map()->instance_type();
- if ((type & kStringRepresentationMask) == kSlicedStringTag) {
- return SlicedString::cast(this)->buffer()->IsTwoByteRepresentation();
- } else if ((type & kStringRepresentationMask) == kConsStringTag &&
+ if ((type & kStringRepresentationMask) == kConsStringTag &&
ConsString::cast(this)->second()->length() == 0) {
return ConsString::cast(this)->first()->IsTwoByteRepresentation();
}
@@ -274,11 +255,6 @@ bool StringShape::IsCons() {
}
-bool StringShape::IsSliced() {
- return (type_ & kStringRepresentationMask) == kSlicedStringTag;
-}
-
-
bool StringShape::IsExternal() {
return (type_ & kStringRepresentationMask) == kExternalStringTag;
}
@@ -304,11 +280,6 @@ STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) ==
Internals::kFullStringRepresentationMask);
-uint32_t StringShape::size_tag() {
- return (type_ & kStringSizeMask);
-}
-
-
bool StringShape::IsSequentialAscii() {
return full_representation_tag() == (kSeqStringTag | kAsciiStringTag);
}
@@ -879,7 +850,7 @@ Failure* Failure::RetryAfterGC(int requested_bytes) {
requested = static_cast<intptr_t>(
(~static_cast<uintptr_t>(0)) >> (tag_bits + 1));
}
- int value = (requested << kSpaceTagSize) | NEW_SPACE;
+ int value = static_cast<int>(requested << kSpaceTagSize) | NEW_SPACE;
return Construct(RETRY_AFTER_GC, value);
}
@@ -1014,9 +985,9 @@ Address MapWord::DecodeMapAddress(MapSpace* map_space) {
int MapWord::DecodeOffset() {
// The offset field is represented in the kForwardingOffsetBits
// most-significant bits.
- int offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
- ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
- return offset;
+ uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
+ ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize));
+ return static_cast<int>(offset);
}
@@ -1591,7 +1562,6 @@ CAST_ACCESSOR(SeqString)
CAST_ACCESSOR(SeqAsciiString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(ConsString)
-CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalAsciiString)
CAST_ACCESSOR(ExternalTwoByteString)
@@ -1641,44 +1611,25 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
INT_ACCESSORS(Array, length, kLengthOffset)
-bool String::Equals(String* other) {
- if (other == this) return true;
- if (StringShape(this).IsSymbol() && StringShape(other).IsSymbol()) {
- return false;
- }
- return SlowEquals(other);
-}
-
-
-int String::length() {
- uint32_t len = READ_INT_FIELD(this, kLengthOffset);
-
- ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
- ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
- ASSERT(kLongStringTag == 0);
-
- return len >> (StringShape(this).size_tag() + kLongLengthShift);
-}
-
+INT_ACCESSORS(String, length, kLengthOffset)
-void String::set_length(int value) {
- ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
- ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
- ASSERT(kLongStringTag == 0);
- WRITE_INT_FIELD(this,
- kLengthOffset,
- value << (StringShape(this).size_tag() + kLongLengthShift));
+uint32_t String::hash_field() {
+ return READ_UINT32_FIELD(this, kHashFieldOffset);
}
-uint32_t String::length_field() {
- return READ_UINT32_FIELD(this, kLengthOffset);
+void String::set_hash_field(uint32_t value) {
+ WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
}
-void String::set_length_field(uint32_t value) {
- WRITE_UINT32_FIELD(this, kLengthOffset, value);
+bool String::Equals(String* other) {
+ if (other == this) return true;
+ if (StringShape(this).IsSymbol() && StringShape(other).IsSymbol()) {
+ return false;
+ }
+ return SlowEquals(other);
}
@@ -1702,9 +1653,6 @@ uint16_t String::Get(int index) {
case kConsStringTag | kAsciiStringTag:
case kConsStringTag | kTwoByteStringTag:
return ConsString::cast(this)->ConsStringGet(index);
- case kSlicedStringTag | kAsciiStringTag:
- case kSlicedStringTag | kTwoByteStringTag:
- return SlicedString::cast(this)->SlicedStringGet(index);
case kExternalStringTag | kAsciiStringTag:
return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
case kExternalStringTag | kTwoByteStringTag:
@@ -1735,11 +1683,6 @@ bool String::IsFlat() {
// Only flattened strings have second part empty.
return second->length() == 0;
}
- case kSlicedStringTag: {
- StringRepresentationTag tag =
- StringShape(SlicedString::cast(this)->buffer()).representation_tag();
- return tag == kSeqStringTag || tag == kExternalStringTag;
- }
default:
return true;
}
@@ -1793,30 +1736,12 @@ void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
uint32_t length = READ_INT_FIELD(this, kLengthOffset);
-
- ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
- ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
- ASSERT(kLongStringTag == 0);
-
- // Use the map (and not 'this') to compute the size tag, since
- // TwoByteStringSize is called during GC when maps are encoded.
- length >>= StringShape(instance_type).size_tag() + kLongLengthShift;
-
return SizeFor(length);
}
int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) {
uint32_t length = READ_INT_FIELD(this, kLengthOffset);
-
- ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
- ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
- ASSERT(kLongStringTag == 0);
-
- // Use the map (and not 'this') to compute the size tag, since
- // AsciiStringSize is called during GC when maps are encoded.
- length >>= StringShape(instance_type).size_tag() + kLongLengthShift;
-
return SizeFor(length);
}
@@ -1853,27 +1778,6 @@ void ConsString::set_second(String* value, WriteBarrierMode mode) {
}
-String* SlicedString::buffer() {
- return String::cast(READ_FIELD(this, kBufferOffset));
-}
-
-
-void SlicedString::set_buffer(String* buffer) {
- WRITE_FIELD(this, kBufferOffset, buffer);
- WRITE_BARRIER(this, kBufferOffset);
-}
-
-
-int SlicedString::start() {
- return READ_INT_FIELD(this, kStartOffset);
-}
-
-
-void SlicedString::set_start(int start) {
- WRITE_INT_FIELD(this, kStartOffset, start);
-}
-
-
ExternalAsciiString::Resource* ExternalAsciiString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
@@ -1885,34 +1789,6 @@ void ExternalAsciiString::set_resource(
}
-Map* ExternalAsciiString::StringMap(int length) {
- Map* map;
- // Number of characters: determines the map.
- if (length <= String::kMaxShortStringSize) {
- map = Heap::short_external_ascii_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = Heap::medium_external_ascii_string_map();
- } else {
- map = Heap::long_external_ascii_string_map();
- }
- return map;
-}
-
-
-Map* ExternalAsciiString::SymbolMap(int length) {
- Map* map;
- // Number of characters: determines the map.
- if (length <= String::kMaxShortStringSize) {
- map = Heap::short_external_ascii_symbol_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = Heap::medium_external_ascii_symbol_map();
- } else {
- map = Heap::long_external_ascii_symbol_map();
- }
- return map;
-}
-
-
ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
@@ -1924,34 +1800,6 @@ void ExternalTwoByteString::set_resource(
}
-Map* ExternalTwoByteString::StringMap(int length) {
- Map* map;
- // Number of characters: determines the map.
- if (length <= String::kMaxShortStringSize) {
- map = Heap::short_external_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = Heap::medium_external_string_map();
- } else {
- map = Heap::long_external_string_map();
- }
- return map;
-}
-
-
-Map* ExternalTwoByteString::SymbolMap(int length) {
- Map* map;
- // Number of characters: determines the map.
- if (length <= String::kMaxShortStringSize) {
- map = Heap::short_external_symbol_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = Heap::medium_external_symbol_map();
- } else {
- map = Heap::long_external_symbol_map();
- }
- return map;
-}
-
-
byte ByteArray::get(int index) {
ASSERT(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -2417,6 +2265,7 @@ ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
+ACCESSORS(AccessorInfo, load_stub_cache, Object, kLoadStubCacheOffset)
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@@ -2476,7 +2325,7 @@ ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
ACCESSORS(Script, type, Smi, kTypeOffset)
ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
-ACCESSORS(Script, eval_from_function, Object, kEvalFromFunctionOffset)
+ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
ACCESSORS(Script, eval_from_instructions_offset, Smi,
kEvalFrominstructionsOffsetOffset)
@@ -2514,12 +2363,12 @@ BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
kIsTopLevelBit)
BOOL_GETTER(SharedFunctionInfo, compiler_hints,
- has_only_this_property_assignments,
- kHasOnlyThisPropertyAssignments)
-BOOL_GETTER(SharedFunctionInfo, compiler_hints,
has_only_simple_this_property_assignments,
kHasOnlySimpleThisPropertyAssignments)
-
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
+ try_fast_codegen,
+ kTryFastCodegen)
INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
@@ -2933,13 +2782,13 @@ NumberDictionary* JSObject::element_dictionary() {
bool String::HasHashCode() {
- return (length_field() & kHashComputedMask) != 0;
+ return (hash_field() & kHashComputedMask) != 0;
}
uint32_t String::Hash() {
// Fast case: has hash code already been computed?
- uint32_t field = length_field();
+ uint32_t field = hash_field();
if (field & kHashComputedMask) return field >> kHashShift;
// Slow case: compute hash code and set it.
return ComputeAndSetHash();
@@ -2956,7 +2805,7 @@ StringHasher::StringHasher(int length)
bool StringHasher::has_trivial_hash() {
- return length_ > String::kMaxMediumStringSize;
+ return length_ > String::kMaxHashCalcLength;
}
@@ -3012,7 +2861,7 @@ uint32_t StringHasher::GetHash() {
bool String::AsArrayIndex(uint32_t* index) {
- uint32_t field = length_field();
+ uint32_t field = hash_field();
if ((field & kHashComputedMask) && !(field & kIsArrayIndexMask)) return false;
return SlowAsArrayIndex(index);
}
@@ -3027,6 +2876,43 @@ PropertyAttributes JSObject::GetPropertyAttribute(String* key) {
return GetPropertyAttributeWithReceiver(this, key);
}
+// TODO(504): this may be useful in other places too where JSGlobalProxy
+// is used.
+Object* JSObject::BypassGlobalProxy() {
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return Heap::undefined_value();
+ ASSERT(proto->IsJSGlobalObject());
+ return proto;
+ }
+ return this;
+}
+
+
+bool JSObject::HasHiddenPropertiesObject() {
+ ASSERT(!IsJSGlobalProxy());
+ return GetPropertyAttributePostInterceptor(this,
+ Heap::hidden_symbol(),
+ false) != ABSENT;
+}
+
+
+Object* JSObject::GetHiddenPropertiesObject() {
+ ASSERT(!IsJSGlobalProxy());
+ PropertyAttributes attributes;
+ return GetLocalPropertyPostInterceptor(this,
+ Heap::hidden_symbol(),
+ &attributes);
+}
+
+
+Object* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
+ ASSERT(!IsJSGlobalProxy());
+ return SetPropertyPostInterceptor(Heap::hidden_symbol(),
+ hidden_obj,
+ DONT_ENUM);
+}
+
bool JSObject::HasElement(uint32_t index) {
return HasElementWithReceiver(this, index);
@@ -3099,8 +2985,19 @@ void Map::ClearCodeCache() {
void JSArray::EnsureSize(int required_size) {
ASSERT(HasFastElements());
- if (elements()->length() >= required_size) return;
- Expand(required_size);
+ Array* elts = elements();
+ const int kArraySizeThatFitsComfortablyInNewSpace = 128;
+ if (elts->length() < required_size) {
+ // Doubling in size would be overkill, but leave some slack to avoid
+ // constantly growing.
+ Expand(required_size + (required_size >> 3));
+ // It's a performance benefit to keep a frequently used array in new-space.
+ } else if (!Heap::new_space()->Contains(elts) &&
+ required_size < kArraySizeThatFitsComfortablyInNewSpace) {
+ // Expand will allocate a new backing store in new space even if the size
+ // we asked for isn't larger than what we had before.
+ Expand(required_size);
+ }
}
diff --git a/src/objects.cc b/src/objects.cc
index af1a0e55..0f8dca39 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -37,6 +37,7 @@
#include "scanner.h"
#include "scopeinfo.h"
#include "string-stream.h"
+#include "utils.h"
#ifdef ENABLE_DISASSEMBLER
#include "disassembler.h"
@@ -683,23 +684,6 @@ Object* String::TryFlatten() {
#endif
switch (StringShape(this).representation_tag()) {
- case kSlicedStringTag: {
- SlicedString* ss = SlicedString::cast(this);
- // The SlicedString constructor should ensure that there are no
- // SlicedStrings that are constructed directly on top of other
- // SlicedStrings.
- String* buf = ss->buffer();
- ASSERT(!buf->IsSlicedString());
- Object* ok = buf->TryFlatten();
- if (ok->IsFailure()) return ok;
- // Under certain circumstances (TryFlattenIfNotFlat fails in
- // String::Slice) we can have a cons string under a slice.
- // In this case we need to get the flat string out of the cons!
- if (StringShape(String::cast(ok)).IsCons()) {
- ss->set_buffer(ConsString::cast(ok)->first());
- }
- return this;
- }
case kConsStringTag: {
ConsString* cs = ConsString::cast(this);
if (cs->second()->length() == 0) {
@@ -771,19 +755,21 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
ASSERT(size >= ExternalString::kSize);
bool is_symbol = this->IsSymbol();
int length = this->length();
+ int hash_field = this->hash_field();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
- this->set_map(ExternalTwoByteString::StringMap(length));
+ this->set_map(Heap::external_string_map());
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_length(length);
+ self->set_hash_field(hash_field);
self->set_resource(resource);
// Additionally make the object into an external symbol if the original string
// was a symbol to start with.
if (is_symbol) {
self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol.
- self->set_map(ExternalTwoByteString::SymbolMap(length));
+ this->set_map(Heap::external_symbol_map());
}
// Fill the remainder of the string with dead wood.
@@ -815,19 +801,21 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
ASSERT(size >= ExternalString::kSize);
bool is_symbol = this->IsSymbol();
int length = this->length();
+ int hash_field = this->hash_field();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
- this->set_map(ExternalAsciiString::StringMap(length));
+ this->set_map(Heap::external_ascii_string_map());
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_length(length);
+ self->set_hash_field(hash_field);
self->set_resource(resource);
// Additionally make the object into an external symbol if the original string
// was a symbol to start with.
if (is_symbol) {
self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol.
- self->set_map(ExternalAsciiString::SymbolMap(length));
+ this->set_map(Heap::external_ascii_symbol_map());
}
// Fill the remainder of the string with dead wood.
@@ -839,7 +827,7 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
void String::StringShortPrint(StringStream* accumulator) {
int len = length();
- if (len > kMaxMediumStringSize) {
+ if (len > kMaxShortPrintLength) {
accumulator->Add("<Very long string[%u]>", len);
return;
}
@@ -1135,8 +1123,14 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case kConsStringTag:
reinterpret_cast<ConsString*>(this)->ConsStringIterateBody(v);
break;
- case kSlicedStringTag:
- reinterpret_cast<SlicedString*>(this)->SlicedStringIterateBody(v);
+ case kExternalStringTag:
+ if ((type & kStringEncodingMask) == kAsciiStringTag) {
+ reinterpret_cast<ExternalAsciiString*>(this)->
+ ExternalAsciiStringIterateBody(v);
+ } else {
+ reinterpret_cast<ExternalTwoByteString*>(this)->
+ ExternalTwoByteStringIterateBody(v);
+ }
break;
}
return;
@@ -1251,7 +1245,8 @@ String* JSObject::class_name() {
String* JSObject::constructor_name() {
if (IsJSFunction()) {
- return Heap::function_class_symbol();
+ return JSFunction::cast(this)->IsBoilerplate() ?
+ Heap::function_class_symbol() : Heap::closure_symbol();
}
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
@@ -1473,8 +1468,8 @@ Object* JSObject::SetPropertyPostInterceptor(String* name,
Object* JSObject::ReplaceSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes) {
+ Object* value,
+ PropertyAttributes attributes) {
StringDictionary* dictionary = property_dictionary();
int old_index = dictionary->FindEntry(name);
int new_enumeration_index = 0; // 0 means "Use the next available index."
@@ -1488,6 +1483,7 @@ Object* JSObject::ReplaceSlowProperty(String* name,
return SetNormalizedProperty(name, value, new_details);
}
+
Object* JSObject::ConvertDescriptorToFieldAndMapTransition(
String* name,
Object* new_value,
@@ -1879,6 +1875,14 @@ Object* JSObject::SetProperty(LookupResult* result,
// interceptor calls.
AssertNoContextChange ncc;
+ // Optimization for 2-byte strings often used as keys in a decompression
+ // dictionary. We make these short keys into symbols to avoid constantly
+ // reallocating them.
+ if (!name->IsSymbol() && name->length() <= 2) {
+ Object* symbol_version = Heap::LookupSymbol(name);
+ if (!symbol_version->IsFailure()) name = String::cast(symbol_version);
+ }
+
// Check access rights if needed.
if (IsAccessCheckNeeded()
&& !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
@@ -2629,33 +2633,24 @@ bool JSObject::ReferencesObject(Object* obj) {
// Tests for the fast common case for property enumeration:
-// - this object has an enum cache
-// - this object has no elements
-// - no prototype has enumerable properties/elements
-// - neither this object nor any prototype has interceptors
+// - This object and all prototypes has an enum cache (which means that it has
+// no interceptors and needs no access checks).
+// - This object has no elements.
+// - No prototype has enumerable properties/elements.
bool JSObject::IsSimpleEnum() {
- JSObject* arguments_boilerplate =
- Top::context()->global_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
- if (IsAccessCheckNeeded()) return false;
- if (map()->constructor() == arguments_function) return false;
-
for (Object* o = this;
o != Heap::null_value();
o = JSObject::cast(o)->GetPrototype()) {
JSObject* curr = JSObject::cast(o);
- if (!curr->HasFastProperties()) return false;
if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
+ ASSERT(!curr->HasNamedInterceptor());
+ ASSERT(!curr->HasIndexedInterceptor());
+ ASSERT(!curr->IsAccessCheckNeeded());
if (curr->NumberOfEnumElements() > 0) return false;
- if (curr->HasNamedInterceptor()) return false;
- if (curr->HasIndexedInterceptor()) return false;
if (curr != this) {
FixedArray* curr_fixed_array =
FixedArray::cast(curr->map()->instance_descriptors()->GetEnumCache());
- if (curr_fixed_array->length() > 0) {
- return false;
- }
+ if (curr_fixed_array->length() > 0) return false;
}
}
return true;
@@ -3561,12 +3556,7 @@ Vector<const char> String::ToAsciiVector() {
int length = this->length();
StringRepresentationTag string_tag = StringShape(this).representation_tag();
String* string = this;
- if (string_tag == kSlicedStringTag) {
- SlicedString* sliced = SlicedString::cast(string);
- offset += sliced->start();
- string = sliced->buffer();
- string_tag = StringShape(string).representation_tag();
- } else if (string_tag == kConsStringTag) {
+ if (string_tag == kConsStringTag) {
ConsString* cons = ConsString::cast(string);
ASSERT(cons->second()->length() == 0);
string = cons->first();
@@ -3592,12 +3582,7 @@ Vector<const uc16> String::ToUC16Vector() {
int length = this->length();
StringRepresentationTag string_tag = StringShape(this).representation_tag();
String* string = this;
- if (string_tag == kSlicedStringTag) {
- SlicedString* sliced = SlicedString::cast(string);
- offset += sliced->start();
- string = String::cast(sliced->buffer());
- string_tag = StringShape(string).representation_tag();
- } else if (string_tag == kConsStringTag) {
+ if (string_tag == kConsStringTag) {
ConsString* cons = ConsString::cast(string);
ASSERT(cons->second()->length() == 0);
string = cons->first();
@@ -3688,17 +3673,6 @@ const uc16* String::GetTwoByteData(unsigned start) {
case kExternalStringTag:
return ExternalTwoByteString::cast(this)->
ExternalTwoByteStringGetData(start);
- case kSlicedStringTag: {
- SlicedString* sliced_string = SlicedString::cast(this);
- String* buffer = sliced_string->buffer();
- if (StringShape(buffer).IsCons()) {
- ConsString* cs = ConsString::cast(buffer);
- // Flattened string.
- ASSERT(cs->second()->length() == 0);
- buffer = cs->first();
- }
- return buffer->GetTwoByteData(start + sliced_string->start());
- }
case kConsStringTag:
UNREACHABLE();
return NULL;
@@ -3853,22 +3827,6 @@ const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb,
}
-const unibrow::byte* SlicedString::SlicedStringReadBlock(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- String* backing = buffer();
- unsigned offset = start() + *offset_ptr;
- unsigned length = backing->length();
- if (max_chars > length - offset) {
- max_chars = length - offset;
- }
- const unibrow::byte* answer =
- String::ReadBlock(backing, rbb, &offset, max_chars);
- *offset_ptr = offset - start();
- return answer;
-}
-
-
uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
ASSERT(index >= 0 && index < length());
return resource()->data()[index];
@@ -3992,10 +3950,6 @@ const unibrow::byte* String::ReadBlock(String* input,
return ConsString::cast(input)->ConsStringReadBlock(rbb,
offset_ptr,
max_chars);
- case kSlicedStringTag:
- return SlicedString::cast(input)->SlicedStringReadBlock(rbb,
- offset_ptr,
- max_chars);
case kExternalStringTag:
if (input->IsAsciiRepresentation()) {
return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
@@ -4138,20 +4092,15 @@ void String::ReadBlockIntoBuffer(String* input,
offset_ptr,
max_chars);
return;
- case kSlicedStringTag:
- SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
case kExternalStringTag:
if (input->IsAsciiRepresentation()) {
- ExternalAsciiString::cast(input)->
- ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
- } else {
- ExternalTwoByteString::cast(input)->
- ExternalTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
+ ExternalAsciiString::cast(input)->
+ ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
+ } else {
+ ExternalTwoByteString::cast(input)->
+ ExternalTwoByteStringReadBlockIntoBuffer(rbb,
+ offset_ptr,
+ max_chars);
}
return;
default:
@@ -4257,20 +4206,6 @@ void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
}
-void SlicedString::SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- String* backing = buffer();
- unsigned offset = start() + *offset_ptr;
- unsigned length = backing->length();
- if (max_chars > length - offset) {
- max_chars = length - offset;
- }
- String::ReadBlockIntoBuffer(backing, rbb, &offset, max_chars);
- *offset_ptr = offset - start();
-}
-
-
void ConsString::ConsStringIterateBody(ObjectVisitor* v) {
IteratePointers(v, kFirstOffset, kSecondOffset + kPointerSize);
}
@@ -4349,15 +4284,6 @@ void String::WriteToFlat(String* src,
to - from);
return;
}
- case kAsciiStringTag | kSlicedStringTag:
- case kTwoByteStringTag | kSlicedStringTag: {
- SlicedString* sliced_string = SlicedString::cast(source);
- int start = sliced_string->start();
- from += start;
- to += start;
- source = String::cast(sliced_string->buffer());
- break;
- }
case kAsciiStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
ConsString* cons_string = ConsString::cast(source);
@@ -4393,18 +4319,23 @@ void String::WriteToFlat(String* src,
}
-void SlicedString::SlicedStringIterateBody(ObjectVisitor* v) {
- IteratePointer(v, kBufferOffset);
+#define FIELD_ADDR(p, offset) \
+ (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
+
+void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
+ typedef v8::String::ExternalAsciiStringResource Resource;
+ v->VisitExternalAsciiString(
+ reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
-uint16_t SlicedString::SlicedStringGet(int index) {
- ASSERT(index >= 0 && index < this->length());
- // Delegate to the buffer string.
- String* underlying = buffer();
- return underlying->Get(start() + index);
+void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
+ typedef v8::String::ExternalStringResource Resource;
+ v->VisitExternalTwoByteString(
+ reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
+#undef FIELD_ADDR
template <typename IteratorA, typename IteratorB>
static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
@@ -4549,23 +4480,11 @@ bool String::MarkAsUndetectable() {
if (StringShape(this).IsSymbol()) return false;
Map* map = this->map();
- if (map == Heap::short_string_map()) {
- this->set_map(Heap::undetectable_short_string_map());
- return true;
- } else if (map == Heap::medium_string_map()) {
- this->set_map(Heap::undetectable_medium_string_map());
- return true;
- } else if (map == Heap::long_string_map()) {
- this->set_map(Heap::undetectable_long_string_map());
- return true;
- } else if (map == Heap::short_ascii_string_map()) {
- this->set_map(Heap::undetectable_short_ascii_string_map());
+ if (map == Heap::string_map()) {
+ this->set_map(Heap::undetectable_string_map());
return true;
- } else if (map == Heap::medium_ascii_string_map()) {
- this->set_map(Heap::undetectable_medium_ascii_string_map());
- return true;
- } else if (map == Heap::long_ascii_string_map()) {
- this->set_map(Heap::undetectable_long_ascii_string_map());
+ } else if (map == Heap::ascii_string_map()) {
+ this->set_map(Heap::undetectable_ascii_string_map());
return true;
}
// Rest cannot be marked as undetectable
@@ -4588,17 +4507,17 @@ bool String::IsEqualTo(Vector<const char> str) {
uint32_t String::ComputeAndSetHash() {
// Should only be called if hash code has not yet been computed.
- ASSERT(!(length_field() & kHashComputedMask));
+ ASSERT(!(hash_field() & kHashComputedMask));
// Compute the hash code.
StringInputBuffer buffer(this);
- uint32_t field = ComputeLengthAndHashField(&buffer, length());
+ uint32_t field = ComputeHashField(&buffer, length());
// Store the hash code in the object.
- set_length_field(field);
+ set_hash_field(field);
// Check the hash code is there.
- ASSERT(length_field() & kHashComputedMask);
+ ASSERT(hash_field() & kHashComputedMask);
uint32_t result = field >> kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@@ -4638,9 +4557,10 @@ bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer,
bool String::SlowAsArrayIndex(uint32_t* index) {
if (length() <= kMaxCachedArrayIndexLength) {
Hash(); // force computation of hash code
- uint32_t field = length_field();
+ uint32_t field = hash_field();
if ((field & kIsArrayIndexMask) == 0) return false;
- *index = (field & ((1 << kShortLengthShift) - 1)) >> kLongLengthShift;
+ // Isolate the array index form the full hash field.
+ *index = (kArrayIndexHashMask & field) >> kHashShift;
return true;
} else {
StringInputBuffer buffer(this);
@@ -4649,37 +4569,42 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
}
-static inline uint32_t HashField(uint32_t hash, bool is_array_index) {
+static inline uint32_t HashField(uint32_t hash,
+ bool is_array_index,
+ int length = -1) {
uint32_t result =
- (hash << String::kLongLengthShift) | String::kHashComputedMask;
- if (is_array_index) result |= String::kIsArrayIndexMask;
+ (hash << String::kHashShift) | String::kHashComputedMask;
+ if (is_array_index) {
+ // For array indexes mix the length into the hash as an array index could
+ // be zero.
+ ASSERT(length > 0);
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ result |= String::kIsArrayIndexMask;
+ result |= length << String::kArrayIndexHashLengthShift;
+ }
return result;
}
uint32_t StringHasher::GetHashField() {
ASSERT(is_valid());
- if (length_ <= String::kMaxShortStringSize) {
- uint32_t payload;
+ if (length_ <= String::kMaxHashCalcLength) {
if (is_array_index()) {
- payload = v8::internal::HashField(array_index(), true);
+ return v8::internal::HashField(array_index(), true, length_);
} else {
- payload = v8::internal::HashField(GetHash(), false);
+ return v8::internal::HashField(GetHash(), false);
}
- return (payload & ((1 << String::kShortLengthShift) - 1)) |
- (length_ << String::kShortLengthShift);
- } else if (length_ <= String::kMaxMediumStringSize) {
uint32_t payload = v8::internal::HashField(GetHash(), false);
- return (payload & ((1 << String::kMediumLengthShift) - 1)) |
- (length_ << String::kMediumLengthShift);
+ return payload;
} else {
return v8::internal::HashField(length_, false);
}
}
-uint32_t String::ComputeLengthAndHashField(unibrow::CharacterStream* buffer,
- int length) {
+uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
+ int length) {
StringHasher hasher(length);
// Very long strings have a trivial hash that doesn't inspect the
@@ -4704,43 +4629,10 @@ uint32_t String::ComputeLengthAndHashField(unibrow::CharacterStream* buffer,
}
-Object* String::Slice(int start, int end) {
+Object* String::SubString(int start, int end) {
if (start == 0 && end == length()) return this;
- if (StringShape(this).representation_tag() == kSlicedStringTag) {
- // Translate slices of a SlicedString into slices of the
- // underlying string buffer.
- SlicedString* str = SlicedString::cast(this);
- String* buf = str->buffer();
- return Heap::AllocateSlicedString(buf,
- str->start() + start,
- str->start() + end);
- }
- Object* result = Heap::AllocateSlicedString(this, start, end);
- if (result->IsFailure()) {
- return result;
- }
- // Due to the way we retry after GC on allocation failure we are not allowed
- // to fail on allocation after this point. This is the one-allocation rule.
-
- // Try to flatten a cons string that is under the sliced string.
- // This is to avoid memory leaks and possible stack overflows caused by
- // building 'towers' of sliced strings on cons strings.
- // This may fail due to an allocation failure (when a GC is needed), but it
- // will succeed often enough to avoid the problem. We only have to do this
- // if Heap::AllocateSlicedString actually returned a SlicedString. It will
- // return flat strings for small slices for efficiency reasons.
- String* answer = String::cast(result);
- if (StringShape(answer).IsSliced() &&
- StringShape(this).representation_tag() == kConsStringTag) {
- TryFlatten();
- // If the flatten succeeded we might as well make the sliced string point
- // to the flat string rather than the cons string.
- String* second = ConsString::cast(this)->second();
- if (second->length() == 0) {
- SlicedString::cast(answer)->set_buffer(ConsString::cast(this)->first());
- }
- }
- return answer;
+ Object* result = Heap::AllocateSubString(this, start, end);
+ return result;
}
@@ -4920,13 +4812,9 @@ int SharedFunctionInfo::CalculateInObjectProperties() {
void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
- bool only_this_property_assignments,
bool only_simple_this_property_assignments,
FixedArray* assignments) {
set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlyThisPropertyAssignments,
- only_this_property_assignments));
- set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlySimpleThisPropertyAssignments,
only_simple_this_property_assignments));
set_this_property_assignments(assignments);
@@ -4936,9 +4824,6 @@ void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlyThisPropertyAssignments,
- false));
- set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlySimpleThisPropertyAssignments,
false));
set_this_property_assignments(Heap::undefined_value());
@@ -4993,7 +4878,7 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
return;
}
- // Get the slice of the source for this function.
+ // Get the source for the script which this function came from.
// Don't use String::cast because we don't want more assertion errors while
// we are already creating a stack dump.
String* script_source =
@@ -5082,7 +4967,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
}
-void Code::Relocate(int delta) {
+void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
}
@@ -5148,8 +5033,9 @@ int Code::SourcePosition(Address pc) {
// Only look at positions after the current pc.
if (it.rinfo()->pc() < pc) {
// Get position and distance.
- int dist = pc - it.rinfo()->pc();
- int pos = it.rinfo()->data();
+
+ int dist = static_cast<int>(pc - it.rinfo()->pc());
+ int pos = static_cast<int>(it.rinfo()->data());
// If this position is closer than the current candidate or if it has the
// same distance as the current candidate and the position is higher then
// this position is the new candidate.
@@ -5176,7 +5062,7 @@ int Code::SourceStatementPosition(Address pc) {
RelocIterator it(this, RelocInfo::kPositionMask);
while (!it.done()) {
if (RelocInfo::IsStatementPosition(it.rinfo()->rmode())) {
- int p = it.rinfo()->data();
+ int p = static_cast<int>(it.rinfo()->data());
if (statement_position < p && p <= position) {
statement_position = p;
}
@@ -5353,9 +5239,7 @@ void JSArray::Expand(int required_size) {
Handle<JSArray> self(this);
Handle<FixedArray> old_backing(FixedArray::cast(elements()));
int old_size = old_backing->length();
- // Doubling in size would be overkill, but leave some slack to avoid
- // constantly growing.
- int new_size = required_size + (required_size >> 3);
+ int new_size = required_size > old_size ? required_size : old_size;
Handle<FixedArray> new_backing = Factory::NewFixedArray(new_size);
// Can't use this any more now because we may have had a GC!
for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
@@ -6284,6 +6168,18 @@ Object* JSObject::GetPropertyPostInterceptor(JSObject* receiver,
}
+Object* JSObject::GetLocalPropertyPostInterceptor(
+ JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes) {
+ // Check local property in holder, ignore interceptor.
+ LookupResult result;
+ LocalLookupRealNamedProperty(name, &result);
+ if (!result.IsValid()) return Heap::undefined_value();
+ return GetProperty(receiver, &result, name, attributes);
+}
+
+
Object* JSObject::GetPropertyWithInterceptor(
JSObject* receiver,
String* name,
@@ -6573,6 +6469,15 @@ int JSObject::NumberOfLocalElements(PropertyAttributes filter) {
int JSObject::NumberOfEnumElements() {
+ // Fast case for objects with no elements.
+ if (!IsJSValue() && HasFastElements()) {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ if (length == 0) return 0;
+ }
+ // Compute the number of enumerable elements.
return NumberOfLocalElements(static_cast<PropertyAttributes>(DONT_ENUM));
}
@@ -6832,19 +6737,19 @@ class RegExpKey : public HashTableKey {
class Utf8SymbolKey : public HashTableKey {
public:
explicit Utf8SymbolKey(Vector<const char> string)
- : string_(string), length_field_(0) { }
+ : string_(string), hash_field_(0) { }
bool IsMatch(Object* string) {
return String::cast(string)->IsEqualTo(string_);
}
uint32_t Hash() {
- if (length_field_ != 0) return length_field_ >> String::kHashShift;
+ if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
unibrow::Utf8InputBuffer<> buffer(string_.start(),
static_cast<unsigned>(string_.length()));
chars_ = buffer.Length();
- length_field_ = String::ComputeLengthAndHashField(&buffer, chars_);
- uint32_t result = length_field_ >> String::kHashShift;
+ hash_field_ = String::ComputeHashField(&buffer, chars_);
+ uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
@@ -6854,12 +6759,12 @@ class Utf8SymbolKey : public HashTableKey {
}
Object* AsObject() {
- if (length_field_ == 0) Hash();
- return Heap::AllocateSymbol(string_, chars_, length_field_);
+ if (hash_field_ == 0) Hash();
+ return Heap::AllocateSymbol(string_, chars_, hash_field_);
}
Vector<const char> string_;
- uint32_t length_field_;
+ uint32_t hash_field_;
int chars_; // Caches the number of characters when computing the hash code.
};
@@ -6900,7 +6805,7 @@ class SymbolKey : public HashTableKey {
StringInputBuffer buffer(string_);
return Heap::AllocateInternalSymbol(&buffer,
string_->length(),
- string_->length_field());
+ string_->hash_field());
}
static uint32_t StringHash(Object* obj) {
@@ -7429,6 +7334,67 @@ Object* SymbolTable::LookupString(String* string, Object** s) {
}
+// This class is used for looking up two character strings in the symbol table.
+// If we don't have a hit we don't want to waste much time so we unroll the
+// string hash calculation loop here for speed. Doesn't work if the two
+// characters form a decimal integer, since such strings have a different hash
+// algorithm.
+class TwoCharHashTableKey : public HashTableKey {
+ public:
+ TwoCharHashTableKey(uint32_t c1, uint32_t c2)
+ : c1_(c1), c2_(c2) {
+ // Char 1.
+ uint32_t hash = c1 + (c1 << 10);
+ hash ^= hash >> 6;
+ // Char 2.
+ hash += c2;
+ hash += hash << 10;
+ hash ^= hash >> 6;
+ // GetHash.
+ hash += hash << 3;
+ hash ^= hash >> 11;
+ hash += hash << 15;
+ if (hash == 0) hash = 27;
+#ifdef DEBUG
+ StringHasher hasher(2);
+ hasher.AddCharacter(c1);
+ hasher.AddCharacter(c2);
+ // If this assert fails then we failed to reproduce the two-character
+ // version of the string hashing algorithm above. One reason could be
+ // that we were passed two digits as characters, since the hash
+ // algorithm is different in that case.
+ ASSERT_EQ(static_cast<int>(hasher.GetHash()), static_cast<int>(hash));
+#endif
+ hash_ = hash;
+ }
+
+ bool IsMatch(Object* o) {
+ if (!o->IsString()) return false;
+ String* other = String::cast(o);
+ if (other->length() != 2) return false;
+ if (other->Get(0) != c1_) return false;
+ return other->Get(1) == c2_;
+ }
+
+ uint32_t Hash() { return hash_; }
+ uint32_t HashForObject(Object* key) {
+ if (!key->IsString()) return 0;
+ return String::cast(key)->Hash();
+ }
+
+ Object* AsObject() {
+ // The TwoCharHashTableKey is only used for looking in the symbol
+ // table, not for adding to it.
+ UNREACHABLE();
+ return NULL;
+ }
+ private:
+ uint32_t c1_;
+ uint32_t c2_;
+ uint32_t hash_;
+};
+
+
bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) {
SymbolKey key(string);
int entry = FindEntry(&key);
@@ -7443,6 +7409,22 @@ bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) {
}
+bool SymbolTable::LookupTwoCharsSymbolIfExists(uint32_t c1,
+ uint32_t c2,
+ String** symbol) {
+ TwoCharHashTableKey key(c1, c2);
+ int entry = FindEntry(&key);
+ if (entry == kNotFound) {
+ return false;
+ } else {
+ String* result = String::cast(KeyAt(entry));
+ ASSERT(StringShape(result).IsSymbol());
+ *symbol = result;
+ return true;
+ }
+}
+
+
Object* SymbolTable::LookupSymbol(Vector<const char> str, Object** s) {
Utf8SymbolKey key(str);
return LookupKey(&key, s);
diff --git a/src/objects.h b/src/objects.h
index 68bed6c6..671978ab 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -78,7 +78,6 @@
// - SeqAsciiString
// - SeqTwoByteString
// - ConsString
-// - SlicedString
// - ExternalString
// - ExternalAsciiString
// - ExternalTwoByteString
@@ -210,7 +209,7 @@ enum PropertyNormalizationMode {
// considered TWO_BYTE. It is not mentioned in the name. ASCII encoding is
// mentioned explicitly in the name. Likewise, the default representation is
// considered sequential. It is not mentioned in the name. The other
-// representations (eg, CONS, SLICED, EXTERNAL) are explicitly mentioned.
+// representations (eg, CONS, EXTERNAL) are explicitly mentioned.
// Finally, the string is either a SYMBOL_TYPE (if it is a symbol) or a
// STRING_TYPE (if it is not a symbol).
//
@@ -222,308 +221,128 @@ enum PropertyNormalizationMode {
// NOTE: Everything following JS_VALUE_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST_ALL(V) \
- V(SHORT_SYMBOL_TYPE) \
- V(MEDIUM_SYMBOL_TYPE) \
- V(LONG_SYMBOL_TYPE) \
- V(SHORT_ASCII_SYMBOL_TYPE) \
- V(MEDIUM_ASCII_SYMBOL_TYPE) \
- V(LONG_ASCII_SYMBOL_TYPE) \
- V(SHORT_CONS_SYMBOL_TYPE) \
- V(MEDIUM_CONS_SYMBOL_TYPE) \
- V(LONG_CONS_SYMBOL_TYPE) \
- V(SHORT_CONS_ASCII_SYMBOL_TYPE) \
- V(MEDIUM_CONS_ASCII_SYMBOL_TYPE) \
- V(LONG_CONS_ASCII_SYMBOL_TYPE) \
- V(SHORT_SLICED_SYMBOL_TYPE) \
- V(MEDIUM_SLICED_SYMBOL_TYPE) \
- V(LONG_SLICED_SYMBOL_TYPE) \
- V(SHORT_SLICED_ASCII_SYMBOL_TYPE) \
- V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE) \
- V(LONG_SLICED_ASCII_SYMBOL_TYPE) \
- V(SHORT_EXTERNAL_SYMBOL_TYPE) \
- V(MEDIUM_EXTERNAL_SYMBOL_TYPE) \
- V(LONG_EXTERNAL_SYMBOL_TYPE) \
- V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE) \
- V(MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE) \
- V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE) \
- V(SHORT_STRING_TYPE) \
- V(MEDIUM_STRING_TYPE) \
- V(LONG_STRING_TYPE) \
- V(SHORT_ASCII_STRING_TYPE) \
- V(MEDIUM_ASCII_STRING_TYPE) \
- V(LONG_ASCII_STRING_TYPE) \
- V(SHORT_CONS_STRING_TYPE) \
- V(MEDIUM_CONS_STRING_TYPE) \
- V(LONG_CONS_STRING_TYPE) \
- V(SHORT_CONS_ASCII_STRING_TYPE) \
- V(MEDIUM_CONS_ASCII_STRING_TYPE) \
- V(LONG_CONS_ASCII_STRING_TYPE) \
- V(SHORT_SLICED_STRING_TYPE) \
- V(MEDIUM_SLICED_STRING_TYPE) \
- V(LONG_SLICED_STRING_TYPE) \
- V(SHORT_SLICED_ASCII_STRING_TYPE) \
- V(MEDIUM_SLICED_ASCII_STRING_TYPE) \
- V(LONG_SLICED_ASCII_STRING_TYPE) \
- V(SHORT_EXTERNAL_STRING_TYPE) \
- V(MEDIUM_EXTERNAL_STRING_TYPE) \
- V(LONG_EXTERNAL_STRING_TYPE) \
- V(SHORT_EXTERNAL_ASCII_STRING_TYPE) \
- V(MEDIUM_EXTERNAL_ASCII_STRING_TYPE) \
- V(LONG_EXTERNAL_ASCII_STRING_TYPE) \
- V(LONG_PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
- \
- V(MAP_TYPE) \
- V(HEAP_NUMBER_TYPE) \
- V(FIXED_ARRAY_TYPE) \
- V(CODE_TYPE) \
- V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
- V(ODDBALL_TYPE) \
- V(PROXY_TYPE) \
- V(BYTE_ARRAY_TYPE) \
- V(PIXEL_ARRAY_TYPE) \
- /* Note: the order of these external array */ \
- /* types is relied upon in */ \
- /* Object::IsExternalArray(). */ \
- V(EXTERNAL_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_INT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
- V(EXTERNAL_FLOAT_ARRAY_TYPE) \
- V(FILLER_TYPE) \
- \
- V(ACCESSOR_INFO_TYPE) \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- V(CALL_HANDLER_INFO_TYPE) \
- V(FUNCTION_TEMPLATE_INFO_TYPE) \
- V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(SIGNATURE_INFO_TYPE) \
- V(TYPE_SWITCH_INFO_TYPE) \
- V(SCRIPT_TYPE) \
- \
- V(JS_VALUE_TYPE) \
- V(JS_OBJECT_TYPE) \
- V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
- V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_BUILTINS_OBJECT_TYPE) \
- V(JS_GLOBAL_PROXY_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_REGEXP_TYPE) \
- \
- V(JS_FUNCTION_TYPE) \
+#define INSTANCE_TYPE_LIST_ALL(V) \
+ V(SYMBOL_TYPE) \
+ V(ASCII_SYMBOL_TYPE) \
+ V(CONS_SYMBOL_TYPE) \
+ V(CONS_ASCII_SYMBOL_TYPE) \
+ V(EXTERNAL_SYMBOL_TYPE) \
+ V(EXTERNAL_ASCII_SYMBOL_TYPE) \
+ V(STRING_TYPE) \
+ V(ASCII_STRING_TYPE) \
+ V(CONS_STRING_TYPE) \
+ V(CONS_ASCII_STRING_TYPE) \
+ V(EXTERNAL_STRING_TYPE) \
+ V(EXTERNAL_ASCII_STRING_TYPE) \
+ V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
+ \
+ V(MAP_TYPE) \
+ V(HEAP_NUMBER_TYPE) \
+ V(FIXED_ARRAY_TYPE) \
+ V(CODE_TYPE) \
+ V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
+ V(ODDBALL_TYPE) \
+ V(PROXY_TYPE) \
+ V(BYTE_ARRAY_TYPE) \
+ V(PIXEL_ARRAY_TYPE) \
+ /* Note: the order of these external array */ \
+ /* types is relied upon in */ \
+ /* Object::IsExternalArray(). */ \
+ V(EXTERNAL_BYTE_ARRAY_TYPE) \
+ V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE) \
+ V(EXTERNAL_SHORT_ARRAY_TYPE) \
+ V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE) \
+ V(EXTERNAL_INT_ARRAY_TYPE) \
+ V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT_ARRAY_TYPE) \
+ V(FILLER_TYPE) \
+ \
+ V(ACCESSOR_INFO_TYPE) \
+ V(ACCESS_CHECK_INFO_TYPE) \
+ V(INTERCEPTOR_INFO_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
+ V(CALL_HANDLER_INFO_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(OBJECT_TEMPLATE_INFO_TYPE) \
+ V(SIGNATURE_INFO_TYPE) \
+ V(TYPE_SWITCH_INFO_TYPE) \
+ V(SCRIPT_TYPE) \
+ \
+ V(JS_VALUE_TYPE) \
+ V(JS_OBJECT_TYPE) \
+ V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_GLOBAL_OBJECT_TYPE) \
+ V(JS_BUILTINS_OBJECT_TYPE) \
+ V(JS_GLOBAL_PROXY_TYPE) \
+ V(JS_ARRAY_TYPE) \
+ V(JS_REGEXP_TYPE) \
+ \
+ V(JS_FUNCTION_TYPE) \
#ifdef ENABLE_DEBUGGER_SUPPORT
-#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
- V(DEBUG_INFO_TYPE) \
+#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
+ V(DEBUG_INFO_TYPE) \
V(BREAK_POINT_INFO_TYPE)
#else
#define INSTANCE_TYPE_LIST_DEBUGGER(V)
#endif
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_ALL(V) \
+#define INSTANCE_TYPE_LIST(V) \
+ INSTANCE_TYPE_LIST_ALL(V) \
INSTANCE_TYPE_LIST_DEBUGGER(V)
// Since string types are not consecutive, this macro is used to
// iterate over them.
#define STRING_TYPE_LIST(V) \
- V(SHORT_SYMBOL_TYPE, \
+ V(SYMBOL_TYPE, \
SeqTwoByteString::kAlignedSize, \
- short_symbol, \
- ShortSymbol) \
- V(MEDIUM_SYMBOL_TYPE, \
- SeqTwoByteString::kAlignedSize, \
- medium_symbol, \
- MediumSymbol) \
- V(LONG_SYMBOL_TYPE, \
- SeqTwoByteString::kAlignedSize, \
- long_symbol, \
- LongSymbol) \
- V(SHORT_ASCII_SYMBOL_TYPE, \
- SeqAsciiString::kAlignedSize, \
- short_ascii_symbol, \
- ShortAsciiSymbol) \
- V(MEDIUM_ASCII_SYMBOL_TYPE, \
+ symbol, \
+ Symbol) \
+ V(ASCII_SYMBOL_TYPE, \
SeqAsciiString::kAlignedSize, \
- medium_ascii_symbol, \
- MediumAsciiSymbol) \
- V(LONG_ASCII_SYMBOL_TYPE, \
- SeqAsciiString::kAlignedSize, \
- long_ascii_symbol, \
- LongAsciiSymbol) \
- V(SHORT_CONS_SYMBOL_TYPE, \
- ConsString::kSize, \
- short_cons_symbol, \
- ShortConsSymbol) \
- V(MEDIUM_CONS_SYMBOL_TYPE, \
+ ascii_symbol, \
+ AsciiSymbol) \
+ V(CONS_SYMBOL_TYPE, \
ConsString::kSize, \
- medium_cons_symbol, \
- MediumConsSymbol) \
- V(LONG_CONS_SYMBOL_TYPE, \
+ cons_symbol, \
+ ConsSymbol) \
+ V(CONS_ASCII_SYMBOL_TYPE, \
ConsString::kSize, \
- long_cons_symbol, \
- LongConsSymbol) \
- V(SHORT_CONS_ASCII_SYMBOL_TYPE, \
- ConsString::kSize, \
- short_cons_ascii_symbol, \
- ShortConsAsciiSymbol) \
- V(MEDIUM_CONS_ASCII_SYMBOL_TYPE, \
- ConsString::kSize, \
- medium_cons_ascii_symbol, \
- MediumConsAsciiSymbol) \
- V(LONG_CONS_ASCII_SYMBOL_TYPE, \
- ConsString::kSize, \
- long_cons_ascii_symbol, \
- LongConsAsciiSymbol) \
- V(SHORT_SLICED_SYMBOL_TYPE, \
- SlicedString::kSize, \
- short_sliced_symbol, \
- ShortSlicedSymbol) \
- V(MEDIUM_SLICED_SYMBOL_TYPE, \
- SlicedString::kSize, \
- medium_sliced_symbol, \
- MediumSlicedSymbol) \
- V(LONG_SLICED_SYMBOL_TYPE, \
- SlicedString::kSize, \
- long_sliced_symbol, \
- LongSlicedSymbol) \
- V(SHORT_SLICED_ASCII_SYMBOL_TYPE, \
- SlicedString::kSize, \
- short_sliced_ascii_symbol, \
- ShortSlicedAsciiSymbol) \
- V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE, \
- SlicedString::kSize, \
- medium_sliced_ascii_symbol, \
- MediumSlicedAsciiSymbol) \
- V(LONG_SLICED_ASCII_SYMBOL_TYPE, \
- SlicedString::kSize, \
- long_sliced_ascii_symbol, \
- LongSlicedAsciiSymbol) \
- V(SHORT_EXTERNAL_SYMBOL_TYPE, \
- ExternalTwoByteString::kSize, \
- short_external_symbol, \
- ShortExternalSymbol) \
- V(MEDIUM_EXTERNAL_SYMBOL_TYPE, \
+ cons_ascii_symbol, \
+ ConsAsciiSymbol) \
+ V(EXTERNAL_SYMBOL_TYPE, \
ExternalTwoByteString::kSize, \
- medium_external_symbol, \
- MediumExternalSymbol) \
- V(LONG_EXTERNAL_SYMBOL_TYPE, \
- ExternalTwoByteString::kSize, \
- long_external_symbol, \
- LongExternalSymbol) \
- V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE, \
- ExternalAsciiString::kSize, \
- short_external_ascii_symbol, \
- ShortExternalAsciiSymbol) \
- V(MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE, \
- ExternalAsciiString::kSize, \
- medium_external_ascii_symbol, \
- MediumExternalAsciiSymbol) \
- V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE, \
+ external_symbol, \
+ ExternalSymbol) \
+ V(EXTERNAL_ASCII_SYMBOL_TYPE, \
ExternalAsciiString::kSize, \
- long_external_ascii_symbol, \
- LongExternalAsciiSymbol) \
- V(SHORT_STRING_TYPE, \
- SeqTwoByteString::kAlignedSize, \
- short_string, \
- ShortString) \
- V(MEDIUM_STRING_TYPE, \
- SeqTwoByteString::kAlignedSize, \
- medium_string, \
- MediumString) \
- V(LONG_STRING_TYPE, \
+ external_ascii_symbol, \
+ ExternalAsciiSymbol) \
+ V(STRING_TYPE, \
SeqTwoByteString::kAlignedSize, \
- long_string, \
- LongString) \
- V(SHORT_ASCII_STRING_TYPE, \
+ string, \
+ String) \
+ V(ASCII_STRING_TYPE, \
SeqAsciiString::kAlignedSize, \
- short_ascii_string, \
- ShortAsciiString) \
- V(MEDIUM_ASCII_STRING_TYPE, \
- SeqAsciiString::kAlignedSize, \
- medium_ascii_string, \
- MediumAsciiString) \
- V(LONG_ASCII_STRING_TYPE, \
- SeqAsciiString::kAlignedSize, \
- long_ascii_string, \
- LongAsciiString) \
- V(SHORT_CONS_STRING_TYPE, \
- ConsString::kSize, \
- short_cons_string, \
- ShortConsString) \
- V(MEDIUM_CONS_STRING_TYPE, \
- ConsString::kSize, \
- medium_cons_string, \
- MediumConsString) \
- V(LONG_CONS_STRING_TYPE, \
+ ascii_string, \
+ AsciiString) \
+ V(CONS_STRING_TYPE, \
ConsString::kSize, \
- long_cons_string, \
- LongConsString) \
- V(SHORT_CONS_ASCII_STRING_TYPE, \
+ cons_string, \
+ ConsString) \
+ V(CONS_ASCII_STRING_TYPE, \
ConsString::kSize, \
- short_cons_ascii_string, \
- ShortConsAsciiString) \
- V(MEDIUM_CONS_ASCII_STRING_TYPE, \
- ConsString::kSize, \
- medium_cons_ascii_string, \
- MediumConsAsciiString) \
- V(LONG_CONS_ASCII_STRING_TYPE, \
- ConsString::kSize, \
- long_cons_ascii_string, \
- LongConsAsciiString) \
- V(SHORT_SLICED_STRING_TYPE, \
- SlicedString::kSize, \
- short_sliced_string, \
- ShortSlicedString) \
- V(MEDIUM_SLICED_STRING_TYPE, \
- SlicedString::kSize, \
- medium_sliced_string, \
- MediumSlicedString) \
- V(LONG_SLICED_STRING_TYPE, \
- SlicedString::kSize, \
- long_sliced_string, \
- LongSlicedString) \
- V(SHORT_SLICED_ASCII_STRING_TYPE, \
- SlicedString::kSize, \
- short_sliced_ascii_string, \
- ShortSlicedAsciiString) \
- V(MEDIUM_SLICED_ASCII_STRING_TYPE, \
- SlicedString::kSize, \
- medium_sliced_ascii_string, \
- MediumSlicedAsciiString) \
- V(LONG_SLICED_ASCII_STRING_TYPE, \
- SlicedString::kSize, \
- long_sliced_ascii_string, \
- LongSlicedAsciiString) \
- V(SHORT_EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kSize, \
- short_external_string, \
- ShortExternalString) \
- V(MEDIUM_EXTERNAL_STRING_TYPE, \
+ cons_ascii_string, \
+ ConsAsciiString) \
+ V(EXTERNAL_STRING_TYPE, \
ExternalTwoByteString::kSize, \
- medium_external_string, \
- MediumExternalString) \
- V(LONG_EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kSize, \
- long_external_string, \
- LongExternalString) \
- V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \
- ExternalAsciiString::kSize, \
- short_external_ascii_string, \
- ShortExternalAsciiString) \
- V(MEDIUM_EXTERNAL_ASCII_STRING_TYPE, \
+ external_string, \
+ ExternalString) \
+ V(EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kSize, \
- medium_external_ascii_string, \
- MediumExternalAsciiString) \
- V(LONG_EXTERNAL_ASCII_STRING_TYPE, \
- ExternalAsciiString::kSize, \
- long_external_ascii_string, \
- LongExternalAsciiString)
+ external_ascii_string, \
+ ExternalAsciiString) \
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
@@ -534,27 +353,27 @@ enum PropertyNormalizationMode {
// Note that for subtle reasons related to the ordering or numerical values of
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
-#define STRUCT_LIST_ALL(V) \
- V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
- V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
- V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
- V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
- V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
- V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(SIGNATURE_INFO, SignatureInfo, signature_info) \
- V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
+#define STRUCT_LIST_ALL(V) \
+ V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
+ V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
+ V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
+ V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
+ V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
+ V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
+ V(SIGNATURE_INFO, SignatureInfo, signature_info) \
+ V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
V(SCRIPT, Script, script)
#ifdef ENABLE_DEBUGGER_SUPPORT
-#define STRUCT_LIST_DEBUGGER(V) \
- V(DEBUG_INFO, DebugInfo, debug_info) \
+#define STRUCT_LIST_DEBUGGER(V) \
+ V(DEBUG_INFO, DebugInfo, debug_info) \
V(BREAK_POINT_INFO, BreakPointInfo, break_point_info)
#else
#define STRUCT_LIST_DEBUGGER(V)
#endif
-#define STRUCT_LIST(V) \
- STRUCT_LIST_ALL(V) \
+#define STRUCT_LIST(V) \
+ STRUCT_LIST_ALL(V) \
STRUCT_LIST_DEBUGGER(V)
// We use the full 8 bits of the instance_type field to encode heap object
@@ -570,15 +389,6 @@ const uint32_t kIsSymbolMask = 0x20;
const uint32_t kNotSymbolTag = 0x0;
const uint32_t kSymbolTag = 0x20;
-// If bit 7 is clear, bits 3 and 4 are the string's size (short, medium or
-// long). These values are very special in that they are also used to shift
-// the length field to get the length, removing the hash value. This avoids
-// using if or switch when getting the length of a string.
-const uint32_t kStringSizeMask = 0x18;
-const uint32_t kShortStringTag = 0x18;
-const uint32_t kMediumStringTag = 0x10;
-const uint32_t kLongStringTag = 0x00;
-
// If bit 7 is clear then bit 2 indicates whether the string consists of
// two-byte characters or one-byte characters.
const uint32_t kStringEncodingMask = 0x4;
@@ -591,7 +401,6 @@ const uint32_t kStringRepresentationMask = 0x03;
enum StringRepresentationTag {
kSeqStringTag = 0x0,
kConsStringTag = 0x1,
- kSlicedStringTag = 0x2,
kExternalStringTag = 0x3
};
@@ -609,78 +418,20 @@ const uint32_t kShortcutTypeTag = kConsStringTag;
enum InstanceType {
- SHORT_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kSeqStringTag,
- MEDIUM_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kSeqStringTag,
- LONG_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kSeqStringTag,
- SHORT_ASCII_SYMBOL_TYPE =
- kShortStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
- MEDIUM_ASCII_SYMBOL_TYPE =
- kMediumStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
- LONG_ASCII_SYMBOL_TYPE =
- kLongStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
- SHORT_CONS_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kConsStringTag,
- MEDIUM_CONS_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kConsStringTag,
- LONG_CONS_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kConsStringTag,
- SHORT_CONS_ASCII_SYMBOL_TYPE =
- kShortStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
- MEDIUM_CONS_ASCII_SYMBOL_TYPE =
- kMediumStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
- LONG_CONS_ASCII_SYMBOL_TYPE =
- kLongStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
- SHORT_SLICED_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kSlicedStringTag,
- MEDIUM_SLICED_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kSlicedStringTag,
- LONG_SLICED_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kSlicedStringTag,
- SHORT_SLICED_ASCII_SYMBOL_TYPE =
- kShortStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
- MEDIUM_SLICED_ASCII_SYMBOL_TYPE =
- kMediumStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
- LONG_SLICED_ASCII_SYMBOL_TYPE =
- kLongStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
- SHORT_EXTERNAL_SYMBOL_TYPE =
- kShortStringTag | kSymbolTag | kExternalStringTag,
- MEDIUM_EXTERNAL_SYMBOL_TYPE =
- kMediumStringTag | kSymbolTag | kExternalStringTag,
- LONG_EXTERNAL_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kExternalStringTag,
- SHORT_EXTERNAL_ASCII_SYMBOL_TYPE =
- kShortStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
- MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE =
- kMediumStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
- LONG_EXTERNAL_ASCII_SYMBOL_TYPE =
- kLongStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
- SHORT_STRING_TYPE = kShortStringTag | kSeqStringTag,
- MEDIUM_STRING_TYPE = kMediumStringTag | kSeqStringTag,
- LONG_STRING_TYPE = kLongStringTag | kSeqStringTag,
- SHORT_ASCII_STRING_TYPE = kShortStringTag | kAsciiStringTag | kSeqStringTag,
- MEDIUM_ASCII_STRING_TYPE = kMediumStringTag | kAsciiStringTag | kSeqStringTag,
- LONG_ASCII_STRING_TYPE = kLongStringTag | kAsciiStringTag | kSeqStringTag,
- SHORT_CONS_STRING_TYPE = kShortStringTag | kConsStringTag,
- MEDIUM_CONS_STRING_TYPE = kMediumStringTag | kConsStringTag,
- LONG_CONS_STRING_TYPE = kLongStringTag | kConsStringTag,
- SHORT_CONS_ASCII_STRING_TYPE =
- kShortStringTag | kAsciiStringTag | kConsStringTag,
- MEDIUM_CONS_ASCII_STRING_TYPE =
- kMediumStringTag | kAsciiStringTag | kConsStringTag,
- LONG_CONS_ASCII_STRING_TYPE =
- kLongStringTag | kAsciiStringTag | kConsStringTag,
- SHORT_SLICED_STRING_TYPE = kShortStringTag | kSlicedStringTag,
- MEDIUM_SLICED_STRING_TYPE = kMediumStringTag | kSlicedStringTag,
- LONG_SLICED_STRING_TYPE = kLongStringTag | kSlicedStringTag,
- SHORT_SLICED_ASCII_STRING_TYPE =
- kShortStringTag | kAsciiStringTag | kSlicedStringTag,
- MEDIUM_SLICED_ASCII_STRING_TYPE =
- kMediumStringTag | kAsciiStringTag | kSlicedStringTag,
- LONG_SLICED_ASCII_STRING_TYPE =
- kLongStringTag | kAsciiStringTag | kSlicedStringTag,
- SHORT_EXTERNAL_STRING_TYPE = kShortStringTag | kExternalStringTag,
- MEDIUM_EXTERNAL_STRING_TYPE = kMediumStringTag | kExternalStringTag,
- LONG_EXTERNAL_STRING_TYPE = kLongStringTag | kExternalStringTag,
- SHORT_EXTERNAL_ASCII_STRING_TYPE =
- kShortStringTag | kAsciiStringTag | kExternalStringTag,
- MEDIUM_EXTERNAL_ASCII_STRING_TYPE =
- kMediumStringTag | kAsciiStringTag | kExternalStringTag,
- LONG_EXTERNAL_ASCII_STRING_TYPE =
- kLongStringTag | kAsciiStringTag | kExternalStringTag,
- LONG_PRIVATE_EXTERNAL_ASCII_STRING_TYPE = LONG_EXTERNAL_ASCII_STRING_TYPE,
+ SYMBOL_TYPE = kSymbolTag | kSeqStringTag,
+ ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
+ CONS_SYMBOL_TYPE = kSymbolTag | kConsStringTag,
+ CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
+ EXTERNAL_SYMBOL_TYPE = kSymbolTag | kExternalStringTag,
+ EXTERNAL_ASCII_SYMBOL_TYPE =
+ kAsciiStringTag | kSymbolTag | kExternalStringTag,
+ STRING_TYPE = kSeqStringTag,
+ ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
+ CONS_STRING_TYPE = kConsStringTag,
+ CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
+ EXTERNAL_STRING_TYPE = kExternalStringTag,
+ EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
+ PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
MAP_TYPE = kNotStringTag,
HEAP_NUMBER_TYPE,
@@ -790,16 +541,13 @@ class Object BASE_EMBEDDED {
inline bool IsHeapNumber();
inline bool IsString();
inline bool IsSymbol();
-#ifdef DEBUG
// See objects-inl.h for more details
inline bool IsSeqString();
- inline bool IsSlicedString();
inline bool IsExternalString();
inline bool IsExternalTwoByteString();
inline bool IsExternalAsciiString();
inline bool IsSeqTwoByteString();
inline bool IsSeqAsciiString();
-#endif // DEBUG
inline bool IsConsString();
inline bool IsNumber();
@@ -1082,7 +830,6 @@ class MapWord BASE_EMBEDDED {
// View this map word as a forwarding address.
inline HeapObject* ToForwardingAddress();
-
// Marking phase of full collection: the map word of live objects is
// marked, and may be marked as overflowed (eg, the object is live, its
// children have not been visited, and it does not fit in the marking
@@ -1481,6 +1228,9 @@ class JSObject: public HeapObject {
Object* GetPropertyPostInterceptor(JSObject* receiver,
String* name,
PropertyAttributes* attributes);
+ Object* GetLocalPropertyPostInterceptor(JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes);
Object* GetLazyProperty(Object* receiver,
LookupResult* result,
String* name,
@@ -1502,6 +1252,27 @@ class JSObject: public HeapObject {
return GetLocalPropertyAttribute(name) != ABSENT;
}
+ // If the receiver is a JSGlobalProxy this method will return its prototype,
+ // otherwise the result is the receiver itself.
+ inline Object* BypassGlobalProxy();
+
+ // Accessors for hidden properties object.
+ //
+ // Hidden properties are not local properties of the object itself.
+ // Instead they are stored on an auxiliary JSObject stored as a local
+ // property with a special name Heap::hidden_symbol(). But if the
+ // receiver is a JSGlobalProxy then the auxiliary object is a property
+ // of its prototype.
+ //
+ // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be
+ // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real
+ // holder.
+ //
+ // These accessors do not touch interceptors or accessors.
+ inline bool HasHiddenPropertiesObject();
+ inline Object* GetHiddenPropertiesObject();
+ inline Object* SetHiddenPropertiesObject(Object* hidden_obj);
+
Object* DeleteProperty(String* name, DeleteMode mode);
Object* DeleteElement(uint32_t index, DeleteMode mode);
Object* DeleteLazyProperty(LookupResult* result,
@@ -2238,6 +2009,7 @@ class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> {
// true if it is found, assigning the symbol to the given output
// parameter.
bool LookupSymbolIfExists(String* str, String** symbol);
+ bool LookupTwoCharsSymbolIfExists(uint32_t c1, uint32_t c2, String** symbol);
// Casting.
static inline SymbolTable* cast(Object* obj);
@@ -2864,7 +2636,7 @@ class Code: public HeapObject {
// Relocate the code by delta bytes. Called to signal that this code
// object has been moved by delta bytes.
- void Relocate(int delta);
+ void Relocate(intptr_t delta);
// Migrate code described by desc.
void CopyFrom(const CodeDesc& desc);
@@ -2901,7 +2673,8 @@ class Code: public HeapObject {
void CodeVerify();
#endif
// Code entry points are aligned to 32 bytes.
- static const int kCodeAlignment = 32;
+ static const int kCodeAlignmentBits = 5;
+ static const int kCodeAlignment = 1 << kCodeAlignmentBits;
static const int kCodeAlignmentMask = kCodeAlignment - 1;
// Layout description.
@@ -3229,12 +3002,12 @@ class Script: public Struct {
// [compilation]: how the the script was compiled.
DECL_ACCESSORS(compilation_type, Smi)
- // [line_ends]: array of line ends positions.
+ // [line_ends]: FixedArray of line ends positions.
DECL_ACCESSORS(line_ends, Object)
- // [eval_from_function]: for eval scripts the funcion from which eval was
- // called.
- DECL_ACCESSORS(eval_from_function, Object)
+ // [eval_from_shared]: for eval scripts the shared funcion info for the
+ // function from which eval was called.
+ DECL_ACCESSORS(eval_from_shared, Object)
// [eval_from_instructions_offset]: the instruction offset in the code for the
// function from which eval was called where eval was called.
@@ -3262,9 +3035,9 @@ class Script: public Struct {
static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
- static const int kEvalFromFunctionOffset = kIdOffset + kPointerSize;
+ static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
static const int kEvalFrominstructionsOffsetOffset =
- kEvalFromFunctionOffset + kPointerSize;
+ kEvalFromSharedOffset + kPointerSize;
static const int kSize = kEvalFrominstructionsOffsetOffset + kPointerSize;
private:
@@ -3362,7 +3135,6 @@ class SharedFunctionInfo: public HeapObject {
// Add information on assignments of the form this.x = ...;
void SetThisPropertyAssignmentsInfo(
- bool has_only_this_property_assignments,
bool has_only_simple_this_property_assignments,
FixedArray* this_property_assignments);
@@ -3370,13 +3142,12 @@ class SharedFunctionInfo: public HeapObject {
void ClearThisPropertyAssignmentsInfo();
// Indicate that this function only consists of assignments of the form
- // this.x = ...;.
- inline bool has_only_this_property_assignments();
-
- // Indicate that this function only consists of assignments of the form
// this.x = y; where y is either a constant or refers to an argument.
inline bool has_only_simple_this_property_assignments();
+ inline bool try_fast_codegen();
+ inline void set_try_fast_codegen(bool flag);
+
// For functions which only contains this property assignments this provides
// access to the names for the properties assigned.
DECL_ACCESSORS(this_property_assignments, Object)
@@ -3455,8 +3226,8 @@ class SharedFunctionInfo: public HeapObject {
static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
// Bit positions in compiler_hints.
- static const int kHasOnlyThisPropertyAssignments = 0;
- static const int kHasOnlySimpleThisPropertyAssignments = 1;
+ static const int kHasOnlySimpleThisPropertyAssignments = 0;
+ static const int kTryFastCodegen = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -3886,6 +3657,7 @@ class StringHasher {
bool is_array_index_;
bool is_first_char_;
bool is_valid_;
+ friend class TwoCharHashTableKey;
};
@@ -3908,7 +3680,6 @@ class StringShape BASE_EMBEDDED {
inline bool IsSequential();
inline bool IsExternal();
inline bool IsCons();
- inline bool IsSliced();
inline bool IsExternalAscii();
inline bool IsExternalTwoByte();
inline bool IsSequentialAscii();
@@ -3949,12 +3720,9 @@ class String: public HeapObject {
inline int length();
inline void set_length(int value);
- // Get and set the uninterpreted length field of the string. Notice
- // that the length field is also used to cache the hash value of
- // strings. In order to get or set the actual length of the string
- // use the length() and set_length methods.
- inline uint32_t length_field();
- inline void set_length_field(uint32_t value);
+ // Get and set the hash field of the string.
+ inline uint32_t hash_field();
+ inline void set_hash_field(uint32_t value);
inline bool IsAsciiRepresentation();
inline bool IsTwoByteRepresentation();
@@ -3966,9 +3734,8 @@ class String: public HeapObject {
inline uint16_t Get(int index);
// Try to flatten the top level ConsString that is hiding behind this
- // string. This is a no-op unless the string is a ConsString or a
- // SlicedString. Flatten mutates the ConsString and might return a
- // failure.
+ // string. This is a no-op unless the string is a ConsString. Flatten
+ // mutates the ConsString and might return a failure.
Object* TryFlatten();
// Try to flatten the string. Checks first inline to see if it is necessary.
@@ -3984,8 +3751,8 @@ class String: public HeapObject {
// ascii and two byte string types.
bool MarkAsUndetectable();
- // Slice the string and return a substring.
- Object* Slice(int from, int to);
+ // Return a substring.
+ Object* SubString(int from, int to);
// String equality operations.
inline bool Equals(String* other);
@@ -4026,8 +3793,8 @@ class String: public HeapObject {
// Returns a hash value used for the property table
inline uint32_t Hash();
- static uint32_t ComputeLengthAndHashField(unibrow::CharacterStream* buffer,
- int length);
+ static uint32_t ComputeHashField(unibrow::CharacterStream* buffer,
+ int length);
static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
uint32_t* index,
@@ -4058,13 +3825,12 @@ class String: public HeapObject {
// Layout description.
static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kSize = kLengthOffset + kIntSize;
+ static const int kHashFieldOffset = kLengthOffset + kIntSize;
+ static const int kSize = kHashFieldOffset + kIntSize;
// Notice: kSize is not pointer-size aligned if pointers are 64-bit.
- // Limits on sizes of different types of strings.
- static const int kMaxShortStringSize = 63;
- static const int kMaxMediumStringSize = 16383;
-
+ // Maximum number of characters to consider when trying to convert a string
+ // value into an array index.
static const int kMaxArrayIndexSize = 10;
// Max ascii char code.
@@ -4072,7 +3838,7 @@ class String: public HeapObject {
static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
static const int kMaxUC16CharCode = 0xffff;
- // Minimum length for a cons or sliced string.
+ // Minimum length for a cons string.
static const int kMinNonFlatLength = 13;
// Mask constant for checking if a string has a computed hash code
@@ -4084,18 +3850,30 @@ class String: public HeapObject {
static const int kIsArrayIndexMask = 1 << 1;
static const int kNofLengthBitFields = 2;
+ // Shift constant retrieving hash code from hash field.
+ static const int kHashShift = kNofLengthBitFields;
+
// Array index strings this short can keep their index in the hash
// field.
static const int kMaxCachedArrayIndexLength = 7;
- // Shift constants for retriving length and hash code from
- // length/hash field.
- static const int kHashShift = kNofLengthBitFields;
- static const int kShortLengthShift = kHashShift + kShortStringTag;
- static const int kMediumLengthShift = kHashShift + kMediumStringTag;
- static const int kLongLengthShift = kHashShift + kLongStringTag;
- // Maximal string length that can be stored in the hash/length field.
- static const int kMaxLength = (1 << (32 - kLongLengthShift)) - 1;
+ // For strings which are array indexes the hash value has the string length
+ // mixed into the hash, mainly to avoid a hash value of zero which would be
+ // the case for the string '0'. 24 bits are used for the array index value.
+ static const int kArrayIndexHashLengthShift = 24 + kNofLengthBitFields;
+ static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
+ static const int kArrayIndexValueBits =
+ kArrayIndexHashLengthShift - kHashShift;
+
+ // Value of empty hash field indicating that the hash is not computed.
+ static const int kEmptyHashField = 0;
+
+ // Maximal string length.
+ static const int kMaxLength = (1 << (32 - 2)) - 1;
+
+ // Max length for computing hash. For strings longer than this limit the
+ // string length is used as the hash value.
+ static const int kMaxHashCalcLength = 16383;
// Limit for truncation in short printing.
static const int kMaxShortPrintLength = 1024;
@@ -4141,12 +3919,6 @@ class String: public HeapObject {
unsigned remaining;
};
- // NOTE: If you call StringInputBuffer routines on strings that are
- // too deeply nested trees of cons and slice strings, then this
- // routine will overflow the stack. Strings that are merely deeply
- // nested trees of cons strings do not have a problem apart from
- // performance.
-
static inline const unibrow::byte* ReadBlock(String* input,
ReadBlockBuffer* buffer,
unsigned* offset,
@@ -4331,56 +4103,6 @@ class ConsString: public String {
};
-// The SlicedString class describes string values that are slices of
-// some other string. SlicedStrings consist of a reference to an
-// underlying heap-allocated string value, a start index, and the
-// length field common to all strings.
-class SlicedString: public String {
- public:
- // The underlying string buffer.
- inline String* buffer();
- inline void set_buffer(String* buffer);
-
- // The start index of the slice.
- inline int start();
- inline void set_start(int start);
-
- // Dispatched behavior.
- uint16_t SlicedStringGet(int index);
-
- // Casting.
- static inline SlicedString* cast(Object* obj);
-
- // Garbage collection support.
- void SlicedStringIterateBody(ObjectVisitor* v);
-
- // Layout description
-#if V8_HOST_ARCH_64_BIT
- // Optimizations expect buffer to be located at same offset as a ConsString's
- // first substring. In 64 bit mode we have room for the start offset before
- // the buffer.
- static const int kStartOffset = String::kSize;
- static const int kBufferOffset = kStartOffset + kIntSize;
- static const int kSize = kBufferOffset + kPointerSize;
-#else
- static const int kBufferOffset = String::kSize;
- static const int kStartOffset = kBufferOffset + kPointerSize;
- static const int kSize = kStartOffset + kIntSize;
-#endif
-
- // Support for StringInputBuffer.
- inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
- inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
-};
-
-
// The ExternalString class describes string values that are backed by
// a string resource that lies outside the V8 heap. ExternalStrings
// consist of the length field common to all strings, a pointer to the
@@ -4422,6 +4144,9 @@ class ExternalAsciiString: public ExternalString {
// Casting.
static inline ExternalAsciiString* cast(Object* obj);
+ // Garbage collection support.
+ void ExternalAsciiStringIterateBody(ObjectVisitor* v);
+
// Support for StringInputBuffer.
const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
unsigned* offset,
@@ -4430,9 +4155,6 @@ class ExternalAsciiString: public ExternalString {
unsigned* offset,
unsigned chars);
- // Identify the map for the external string/symbol with a particular length.
- static inline Map* StringMap(int length);
- static inline Map* SymbolMap(int length);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
};
@@ -4457,14 +4179,14 @@ class ExternalTwoByteString: public ExternalString {
// Casting.
static inline ExternalTwoByteString* cast(Object* obj);
+ // Garbage collection support.
+ void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
+
// Support for StringInputBuffer.
void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset_ptr,
unsigned chars);
- // Identify the map for the external string/symbol with a particular length.
- static inline Map* StringMap(int length);
- static inline Map* SymbolMap(int length);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
};
@@ -4708,6 +4430,7 @@ class AccessorInfo: public Struct {
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(name, Object)
DECL_ACCESSORS(flag, Smi)
+ DECL_ACCESSORS(load_stub_cache, Object)
inline bool all_can_read();
inline void set_all_can_read(bool value);
@@ -4733,7 +4456,8 @@ class AccessorInfo: public Struct {
static const int kDataOffset = kSetterOffset + kPointerSize;
static const int kNameOffset = kDataOffset + kPointerSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
+ static const int kLoadStubCacheOffset = kFlagOffset + kPointerSize;
+ static const int kSize = kLoadStubCacheOffset + kPointerSize;
private:
// Bit positions in flag.
@@ -5086,6 +4810,12 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a runtime entry in the instruction stream.
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
+ // Visits the resource of an ASCII or two-byte string.
+ virtual void VisitExternalAsciiString(
+ v8::String::ExternalAsciiStringResource** resource) {}
+ virtual void VisitExternalTwoByteString(
+ v8::String::ExternalStringResource** resource) {}
+
// Visits a debug call target in the instruction stream.
virtual void VisitDebugTarget(RelocInfo* rinfo);
@@ -5105,6 +4835,8 @@ class ObjectVisitor BASE_EMBEDDED {
// Intended for serialization/deserialization checking: insert, or
// check for the presence of, a tag at this position in the stream.
virtual void Synchronize(const char* tag) {}
+#else
+ inline void Synchronize(const char* tag) {}
#endif
};
diff --git a/src/parser.cc b/src/parser.cc
index 02fcfdc5..c37078ce 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -676,17 +676,12 @@ class TemporaryScope BASE_EMBEDDED {
int materialized_literal_count() { return materialized_literal_count_; }
void SetThisPropertyAssignmentInfo(
- bool only_this_property_assignments,
bool only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments) {
- only_this_property_assignments_ = only_this_property_assignments;
only_simple_this_property_assignments_ =
only_simple_this_property_assignments;
this_property_assignments_ = this_property_assignments;
}
- bool only_this_property_assignments() {
- return only_this_property_assignments_;
- }
bool only_simple_this_property_assignments() {
return only_simple_this_property_assignments_;
}
@@ -705,7 +700,6 @@ class TemporaryScope BASE_EMBEDDED {
// Properties count estimation.
int expected_property_count_;
- bool only_this_property_assignments_;
bool only_simple_this_property_assignments_;
Handle<FixedArray> this_property_assignments_;
@@ -720,7 +714,6 @@ class TemporaryScope BASE_EMBEDDED {
TemporaryScope::TemporaryScope(Parser* parser)
: materialized_literal_count_(0),
expected_property_count_(0),
- only_this_property_assignments_(false),
only_simple_this_property_assignments_(false),
this_property_assignments_(Factory::empty_fixed_array()),
parser_(parser),
@@ -1227,7 +1220,6 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
body.elements(),
temp_scope.materialized_literal_count(),
temp_scope.expected_property_count(),
- temp_scope.only_this_property_assignments(),
temp_scope.only_simple_this_property_assignments(),
temp_scope.this_property_assignments(),
0,
@@ -1339,7 +1331,7 @@ class ParserFinder {
// An InitializationBlockFinder finds and marks sequences of statements of the
-// form x.y.z.a = ...; x.y.z.b = ...; etc.
+// form expr.a = ...; expr.b = ...; etc.
class InitializationBlockFinder : public ParserFinder {
public:
InitializationBlockFinder()
@@ -1367,7 +1359,7 @@ class InitializationBlockFinder : public ParserFinder {
private:
// Returns true if the expressions appear to denote the same object.
// In the context of initialization blocks, we only consider expressions
- // of the form 'x.y.z'.
+ // of the form 'expr.x' or expr["x"].
static bool SameObject(Expression* e1, Expression* e2) {
VariableProxy* v1 = e1->AsVariableProxy();
VariableProxy* v2 = e2->AsVariableProxy();
@@ -1441,16 +1433,15 @@ class InitializationBlockFinder : public ParserFinder {
class ThisNamedPropertyAssigmentFinder : public ParserFinder {
public:
ThisNamedPropertyAssigmentFinder()
- : only_this_property_assignments_(true),
- only_simple_this_property_assignments_(true),
+ : only_simple_this_property_assignments_(true),
names_(NULL),
assigned_arguments_(NULL),
assigned_constants_(NULL) {}
void Update(Scope* scope, Statement* stat) {
- // Bail out if function already has non this property assignment
- // statements.
- if (!only_this_property_assignments_) {
+ // Bail out if function already has property assignment that are
+ // not simple this property assignments.
+ if (!only_simple_this_property_assignments_) {
return;
}
@@ -1459,16 +1450,10 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
if (IsThisPropertyAssignment(assignment)) {
HandleThisPropertyAssignment(scope, assignment);
} else {
- only_this_property_assignments_ = false;
only_simple_this_property_assignments_ = false;
}
}
- // Returns whether only statements of the form this.x = ...; was encountered.
- bool only_this_property_assignments() {
- return only_this_property_assignments_;
- }
-
// Returns whether only statements of the form this.x = y; where y is either a
// constant or a function argument was encountered.
bool only_simple_this_property_assignments() {
@@ -1524,28 +1509,24 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
// Constant assigned.
Literal* literal = assignment->value()->AsLiteral();
AssignmentFromConstant(key, literal->handle());
+ return;
} else if (assignment->value()->AsVariableProxy() != NULL) {
// Variable assigned.
Handle<String> name =
assignment->value()->AsVariableProxy()->name();
// Check whether the variable assigned matches an argument name.
- int index = -1;
for (int i = 0; i < scope->num_parameters(); i++) {
if (*scope->parameter(i)->name() == *name) {
// Assigned from function argument.
- index = i;
- break;
+ AssignmentFromParameter(key, i);
+ return;
}
}
- if (index != -1) {
- AssignmentFromParameter(key, index);
- } else {
- AssignmentFromSomethingElse(key);
- }
- } else {
- AssignmentFromSomethingElse(key);
}
}
+ // It is not a simple "this.x = value;" assignment with a constant
+ // or parameter value.
+ AssignmentFromSomethingElse();
}
void AssignmentFromParameter(Handle<String> name, int index) {
@@ -1562,12 +1543,7 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
assigned_constants_->Add(value);
}
- void AssignmentFromSomethingElse(Handle<String> name) {
- EnsureAllocation();
- names_->Add(name);
- assigned_arguments_->Add(-1);
- assigned_constants_->Add(Factory::undefined_value());
-
+ void AssignmentFromSomethingElse() {
// The this assignment is not a simple one.
only_simple_this_property_assignments_ = false;
}
@@ -1582,7 +1558,6 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
}
}
- bool only_this_property_assignments_;
bool only_simple_this_property_assignments_;
ZoneStringList* names_;
ZoneList<int>* assigned_arguments_;
@@ -1623,11 +1598,11 @@ void* Parser::ParseSourceElements(ZoneListWrapper<Statement>* processor,
// Propagate the collected information on this property assignments.
if (top_scope_->is_function_scope()) {
- if (this_property_assignment_finder.only_this_property_assignments()) {
+ bool only_simple_this_property_assignments =
+ this_property_assignment_finder.only_simple_this_property_assignments();
+ if (only_simple_this_property_assignments) {
temp_scope_->SetThisPropertyAssignmentInfo(
- this_property_assignment_finder.only_this_property_assignments(),
- this_property_assignment_finder.
- only_simple_this_property_assignments(),
+ only_simple_this_property_assignments,
this_property_assignment_finder.GetThisPropertyAssignments());
}
}
@@ -2567,6 +2542,12 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
Statement* body = ParseStatement(NULL, CHECK_OK);
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
+
+ if (loop != NULL) {
+ int position = scanner().location().beg_pos;
+ loop->set_condition_position(position);
+ }
+
Expression* cond = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
@@ -3624,7 +3605,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
int materialized_literal_count;
int expected_property_count;
- bool only_this_property_assignments;
bool only_simple_this_property_assignments;
Handle<FixedArray> this_property_assignments;
if (is_lazily_compiled && pre_data() != NULL) {
@@ -3634,15 +3614,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
scanner_.SeekForward(end_pos);
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
- only_this_property_assignments = false;
only_simple_this_property_assignments = false;
this_property_assignments = Factory::empty_fixed_array();
} else {
ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
materialized_literal_count = temp_scope.materialized_literal_count();
expected_property_count = temp_scope.expected_property_count();
- only_this_property_assignments =
- temp_scope.only_this_property_assignments();
only_simple_this_property_assignments =
temp_scope.only_simple_this_property_assignments();
this_property_assignments = temp_scope.this_property_assignments();
@@ -3664,7 +3641,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
body.elements(),
materialized_literal_count,
expected_property_count,
- only_this_property_assignments,
only_simple_this_property_assignments,
this_property_assignments,
num_parameters,
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 73d6eeb6..353d1654 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -84,8 +84,8 @@ void OS::Setup() {
}
-double OS::nan_value() {
- return NAN;
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ return 0; // FreeBSD runs on anything.
}
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index fe4c31f5..bfcd8fba 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -84,9 +84,66 @@ void OS::Setup() {
}
-double OS::nan_value() {
- return NAN;
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
+ // Here gcc is telling us that we are on an ARM and gcc is assuming that we
+ // have VFP3 instructions. If gcc can assume it then so can we.
+ return 1u << VFP3;
+#else
+ return 0; // Linux runs on anything.
+#endif
+}
+
+
+#ifdef __arm__
+bool OS::ArmCpuHasFeature(CpuFeature feature) {
+ const char* search_string = NULL;
+ const char* file_name = "/proc/cpuinfo";
+ // Simple detection of VFP at runtime for Linux.
+ // It is based on /proc/cpuinfo, which reveals hardware configuration
+ // to user-space applications. According to ARM (mid 2009), no similar
+ // facility is universally available on the ARM architectures,
+ // so it's up to individual OSes to provide such.
+ //
+ // This is written as a straight shot one pass parser
+ // and not using STL string and ifstream because,
+ // on Linux, it's reading from a (non-mmap-able)
+ // character special device.
+ switch (feature) {
+ case VFP3:
+ search_string = "vfp";
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ FILE* f = NULL;
+ const char* what = search_string;
+
+ if (NULL == (f = fopen(file_name, "r")))
+ return false;
+
+ int k;
+ while (EOF != (k = fgetc(f))) {
+ if (k == *what) {
+ ++what;
+ while ((*what != '\0') && (*what == fgetc(f))) {
+ ++what;
+ }
+ if (*what == '\0') {
+ fclose(f);
+ return true;
+ } else {
+ what = search_string;
+ }
+ }
+ }
+ fclose(f);
+
+ // Did not find string in the proc file.
+ return false;
}
+#endif // def __arm__
int OS::ActivationFrameAlignment() {
@@ -232,7 +289,7 @@ void OS::LogSharedLibraryAddresses() {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
- FILE *fp = fopen("/proc/self/maps", "r");
+ FILE* fp = fopen("/proc/self/maps", "r");
if (fp == NULL) return;
// Allocate enough room to be able to store a full file name.
@@ -603,7 +660,7 @@ typedef uint32_t __sigset_t;
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
- struct ucontext *uc_link;
+ struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
__sigset_t uc_sigmask;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 0b236a5a..0d5be45e 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -244,8 +244,11 @@ void OS::LogSharedLibraryAddresses() {
}
-double OS::nan_value() {
- return NAN;
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ // MacOSX requires all these to install so we can assume they are present.
+ // These constants are defined by the CPUid instructions.
+ const uint64_t one = 1;
+ return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
}
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 084880e3..656c317b 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -150,11 +150,22 @@ int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
}
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ return 0;
+}
+
+
double OS::nan_value() {
UNIMPLEMENTED();
return 0;
}
+
+bool OS::ArmCpuHasFeature(CpuFeature feature) {
+ UNIMPLEMENTED();
+}
+
+
bool OS::IsOutsideAllocatedSpace(void* address) {
UNIMPLEMENTED();
return false;
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
new file mode 100644
index 00000000..6d273047
--- /dev/null
+++ b/src/platform-openbsd.cc
@@ -0,0 +1,597 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for OpenBSD goes here. For the POSIX comaptible parts
+// the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <stdlib.h>
+
+#include <sys/types.h> // mmap & munmap
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <sys/fcntl.h> // open
+#include <unistd.h> // getpagesize
+#include <execinfo.h> // backtrace, backtrace_symbols
+#include <strings.h> // index
+#include <errno.h>
+#include <stdarg.h>
+#include <limits.h>
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+
+
+namespace v8 {
+namespace internal {
+
+// 0 is never a valid thread id on OpenBSD since tids and pids share a
+// name space and pid 0 is used to kill the group (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+ // Correct as on OS X
+ if (-1.0 < x && x < 0.0) {
+ return -0.0;
+ } else {
+ return ceil(x);
+ }
+}
+
+
+void OS::Setup() {
+ // Seed the random number generator.
+ // Convert the current time to a 64-bit integer first, before converting it
+ // to an unsigned. Going directly can cause an overflow and the seed to be
+ // set to all ones. The seed will be identical for different instances that
+ // call this setup code within the same millisecond.
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ srandom(static_cast<unsigned int>(seed));
+}
+
+
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ return 0; // OpenBSD runs on anything.
+}
+
+
+int OS::ActivationFrameAlignment() {
+ // 16 byte alignment on OpenBSD
+ return 16;
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification). The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap. The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ lowest_ever_allocated = Min(lowest_ever_allocated, address);
+ highest_ever_allocated =
+ Max(highest_ever_allocated,
+ reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+ return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+ return getpagesize();
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+ if (mbase == MAP_FAILED) {
+ LOG(StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ UpdateAllocatedSpaceLimits(mbase, msize);
+ return mbase;
+}
+
+
+void OS::Free(void* buf, const size_t length) {
+ int result = munmap(buf, length);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+ UNIMPLEMENTED();
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+ UNIMPLEMENTED();
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+ unsigned int ms = static_cast<unsigned int>(milliseconds);
+ usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+ // Redirect to std abort to signal abnormal program termination.
+ abort();
+}
+
+
+void OS::DebugBreak() {
+#if defined(__arm__) || defined(__thumb__)
+ asm("bkpt 0");
+#else
+ asm("int $3");
+#endif
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+static unsigned StringToLong(char* buffer) {
+ return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+}
+#endif
+
+
+void OS::LogSharedLibraryAddresses() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ static const int MAP_LENGTH = 1024;
+ int fd = open("/proc/self/maps", O_RDONLY);
+ if (fd < 0) return;
+ while (true) {
+ char addr_buffer[11];
+ addr_buffer[0] = '0';
+ addr_buffer[1] = 'x';
+ addr_buffer[10] = 0;
+ int result = read(fd, addr_buffer + 2, 8);
+ if (result < 8) break;
+ unsigned start = StringToLong(addr_buffer);
+ result = read(fd, addr_buffer + 2, 1);
+ if (result < 1) break;
+ if (addr_buffer[2] != '-') break;
+ result = read(fd, addr_buffer + 2, 8);
+ if (result < 8) break;
+ unsigned end = StringToLong(addr_buffer);
+ char buffer[MAP_LENGTH];
+ int bytes_read = -1;
+ do {
+ bytes_read++;
+ if (bytes_read >= MAP_LENGTH - 1)
+ break;
+ result = read(fd, buffer + bytes_read, 1);
+ if (result < 1) break;
+ } while (buffer[bytes_read] != '\n');
+ buffer[bytes_read] = 0;
+ // Ignore mappings that are not executable.
+ if (buffer[3] != 'x') continue;
+ char* start_of_path = index(buffer, '/');
+ // There may be no filename in this line. Skip to next.
+ if (start_of_path == NULL) continue;
+ buffer[bytes_read] = 0;
+ LOG(SharedLibraryEvent(start_of_path, start, end));
+ }
+ close(fd);
+#endif
+}
+
+
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+ UNIMPLEMENTED();
+ return 1;
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+ address_ = mmap(NULL, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
+ size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd, kMmapFdOffset)) {
+ return false;
+ }
+
+ UpdateAllocatedSpaceLimits(address, size);
+ return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(ThreadHandle::Kind kind) {
+ Initialize(kind);
+ }
+
+ void Initialize(ThreadHandle::Kind kind) {
+ switch (kind) {
+ case ThreadHandle::SELF: thread_ = pthread_self(); break;
+ case ThreadHandle::INVALID: thread_ = kNoThread; break;
+ }
+ }
+ pthread_t thread_; // Thread handle for pthread.
+};
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+ data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+ data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+ delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+ return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+ return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // This is also initialized by the first argument to pthread_create() but we
+ // don't know which thread will run first (the original thread or the new
+ // one) so we initialize it here too.
+ thread->thread_handle_data()->thread_ = pthread_self();
+ ASSERT(thread->IsValid());
+ thread->Run();
+ return NULL;
+}
+
+
+void Thread::Start() {
+ pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+ ASSERT(IsValid());
+}
+
+
+void Thread::Join() {
+ pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+ pthread_key_t key;
+ int result = pthread_key_create(&key, NULL);
+ USE(result);
+ ASSERT(result == 0);
+ return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ int result = pthread_key_delete(pthread_key);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+ sched_yield();
+}
+
+
+class OpenBSDMutex : public Mutex {
+ public:
+
+ OpenBSDMutex() {
+ pthread_mutexattr_t attrs;
+ int result = pthread_mutexattr_init(&attrs);
+ ASSERT(result == 0);
+ result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT(result == 0);
+ result = pthread_mutex_init(&mutex_, &attrs);
+ ASSERT(result == 0);
+ }
+
+ virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
+
+ virtual int Lock() {
+ int result = pthread_mutex_lock(&mutex_);
+ return result;
+ }
+
+ virtual int Unlock() {
+ int result = pthread_mutex_unlock(&mutex_);
+ return result;
+ }
+
+ private:
+ pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
+};
+
+
+Mutex* OS::CreateMutex() {
+ return new OpenBSDMutex();
+}
+
+
+class OpenBSDSemaphore : public Semaphore {
+ public:
+ explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
+ virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }
+
+ virtual void Wait();
+ virtual bool Wait(int timeout);
+ virtual void Signal() { sem_post(&sem_); }
+ private:
+ sem_t sem_;
+};
+
+
+void OpenBSDSemaphore::Wait() {
+ while (true) {
+ int result = sem_wait(&sem_);
+ if (result == 0) return; // Successfully got semaphore.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
+
+
+bool OpenBSDSemaphore::Wait(int timeout) {
+ const long kOneSecondMicros = 1000000; // NOLINT
+
+ // Split timeout into second and nanosecond parts.
+ struct timeval delta;
+ delta.tv_usec = timeout % kOneSecondMicros;
+ delta.tv_sec = timeout / kOneSecondMicros;
+
+ struct timeval current_time;
+ // Get the current time.
+ if (gettimeofday(&current_time, NULL) == -1) {
+ return false;
+ }
+
+ // Calculate time for end of timeout.
+ struct timeval end_time;
+ timeradd(&current_time, &delta, &end_time);
+
+ struct timespec ts;
+ TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+ while (true) {
+ int result = sem_trywait(&sem_);
+ if (result == 0) return true; // Successfully got semaphore.
+ if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+ return new OpenBSDSemaphore(count);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Sampler* active_sampler_ = NULL;
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+ USE(info);
+ if (signal != SIGPROF) return;
+ if (active_sampler_ == NULL) return;
+
+ TickSample sample;
+
+ // We always sample the VM state.
+ sample.state = Logger::state();
+
+ active_sampler_->Tick(&sample);
+}
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData() {
+ signal_handler_installed_ = false;
+ }
+
+ bool signal_handler_installed_;
+ struct sigaction old_signal_handler_;
+ struct itimerval old_timer_value_;
+};
+
+
+Sampler::Sampler(int interval, bool profiling)
+ : interval_(interval), profiling_(profiling), active_(false) {
+ data_ = new PlatformData();
+}
+
+
+Sampler::~Sampler() {
+ delete data_;
+}
+
+
+void Sampler::Start() {
+ // There can only be one active sampler at the time on POSIX
+ // platforms.
+ if (active_sampler_ != NULL) return;
+
+ // Request profiling signals.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+ data_->signal_handler_installed_ = true;
+
+ // Set the itimer to generate a tick for each interval.
+ itimerval itimer;
+ itimer.it_interval.tv_sec = interval_ / 1000;
+ itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
+ itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
+ itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
+ setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+
+ // Set this sampler as the active sampler.
+ active_sampler_ = this;
+ active_ = true;
+}
+
+
+void Sampler::Stop() {
+ // Restore old signal handler
+ if (data_->signal_handler_installed_) {
+ setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
+ sigaction(SIGPROF, &data_->old_signal_handler_, 0);
+ data_->signal_handler_installed_ = false;
+ }
+
+ // This sampler is no longer the active sampler.
+ active_sampler_ = NULL;
+ active_ = false;
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 1e1245c5..41e0e64f 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -27,7 +27,7 @@
// Platform specific code for POSIX goes here. This is not a platform on its
// own but contains the parts which are the same across POSIX platforms Linux,
-// Mac OS and FreeBSD.
+// Mac OS, FreeBSD and OpenBSD.
#include <unistd.h>
#include <errno.h>
@@ -61,6 +61,13 @@ double modulo(double x, double y) {
return fmod(x, y);
}
+
+double OS::nan_value() {
+ // NAN from math.h is defined in C99 and not in POSIX.
+ return NAN;
+}
+
+
// ----------------------------------------------------------------------------
// POSIX date/time support.
//
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 54d7b370..1be4b77f 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -48,10 +48,10 @@
#ifndef NOMCX
#define NOMCX
#endif
-// Require Windows 2000 or higher (this is required for the IsDebuggerPresent
+// Require Windows XP or higher (this is required for the RtlCaptureContext
// function to be present).
#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x500
+#define _WIN32_WINNT 0x501
#endif
#include <windows.h>
@@ -839,7 +839,7 @@ void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
// VirtualAlloc rounds allocated size to page size automatically.
- size_t msize = RoundUp(requested, GetPageSize());
+ size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
// Windows XP SP2 allows Data Excution Prevention (DEP).
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
@@ -852,7 +852,7 @@ void* OS::Allocate(const size_t requested,
ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
+ UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize));
return mbase;
}
@@ -1208,22 +1208,7 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
// Capture current context.
CONTEXT context;
- memset(&context, 0, sizeof(context));
- context.ContextFlags = CONTEXT_CONTROL;
- context.ContextFlags = CONTEXT_CONTROL;
-#ifdef _WIN64
- // TODO(X64): Implement context capture.
-#else
- __asm call x
- __asm x: pop eax
- __asm mov context.Eip, eax
- __asm mov context.Ebp, ebp
- __asm mov context.Esp, esp
- // NOTE: At some point, we could use RtlCaptureContext(&context) to
- // capture the context instead of inline assembler. However it is
- // only available on XP, Vista, Server 2003 and Server 2008 which
- // might not be sufficient.
-#endif
+ RtlCaptureContext(&context);
// Initialize the stack walking
STACKFRAME64 stack_frame;
@@ -1331,9 +1316,16 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
#endif // __MINGW32__
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ return 0; // Windows runs on anything.
+}
+
+
double OS::nan_value() {
#ifdef _MSC_VER
- static const __int64 nanval = 0xfff8000000000000;
+ // Positive Quiet NaN with no payload (aka. Indeterminate) has all bits
+ // in mask set, so value equals mask.
+ static const __int64 nanval = kQuietNaNMask;
return *reinterpret_cast<const double*>(&nanval);
#else // _MSC_VER
return NAN;
@@ -1374,7 +1366,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return false;
}
- UpdateAllocatedSpaceLimits(address, size);
+ UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
return true;
}
@@ -1702,7 +1694,9 @@ bool Win32Socket::Connect(const char* host, const char* port) {
}
// Connect.
- status = connect(socket_, result->ai_addr, result->ai_addrlen);
+ status = connect(socket_,
+ result->ai_addr,
+ static_cast<int>(result->ai_addrlen));
freeaddrinfo(result);
return status == 0;
}
diff --git a/src/platform.h b/src/platform.h
index fefe4b85..75e557cb 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -247,9 +247,20 @@ class OS {
// for.
static void LogSharedLibraryAddresses();
+ // The return value indicates the CPU features we are sure of because of the
+ // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
+ // instructions.
+ // This is a little messy because the interpretation is subject to the cross
+ // of the CPU and the OS. The bits in the answer correspond to the bit
+ // positions indicated by the members of the CpuFeature enum from globals.h
+ static uint64_t CpuFeaturesImpliedByPlatform();
+
// Returns the double constant NAN
static double nan_value();
+ // Support runtime detection of VFP3 on ARM CPUs.
+ static bool ArmCpuHasFeature(CpuFeature feature);
+
// Returns the activation frame alignment constraint or zero if
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 10c1ea87..87da0264 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -1339,9 +1339,6 @@ void JsonAstBuilder::VisitSlot(Slot* expr) {
case Slot::LOOKUP:
AddAttribute("type", "LOOKUP");
break;
- case Slot::GLOBAL:
- AddAttribute("type", "GLOBAL");
- break;
}
AddAttribute("index", expr->index());
}
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 0d00ceec..9ae19d72 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -30,13 +30,7 @@
#include "assembler.h"
#include "regexp-stack.h"
#include "regexp-macro-assembler.h"
-#if V8_TARGET_ARCH_ARM
-#include "arm/simulator-arm.h"
-#elif V8_TARGET_ARCH_IA32
-#include "ia32/simulator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/simulator-x64.h"
-#endif
+#include "simulator.h"
namespace v8 {
namespace internal {
@@ -130,11 +124,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
if (StringShape(subject_ptr).IsCons()) {
subject_ptr = ConsString::cast(subject_ptr)->first();
- } else if (StringShape(subject_ptr).IsSliced()) {
- SlicedString* slice = SlicedString::cast(subject_ptr);
- start_offset += slice->start();
- end_offset += slice->start();
- subject_ptr = slice->buffer();
}
// Ensure that an underlying string has the same ascii-ness.
ASSERT(subject_ptr->IsAsciiRepresentation() == is_ascii);
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 26aab2c7..aa01096d 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -215,22 +215,6 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
bool at_start);
};
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
- const char* GetName() { return "RegExpCEntryStub"; }
-};
-
#endif // V8_NATIVE_REGEXP
} } // namespace v8::internal
diff --git a/src/regexp-stack.cc b/src/regexp-stack.cc
index 87a674db..7696279a 100644
--- a/src/regexp-stack.cc
+++ b/src/regexp-stack.cc
@@ -81,7 +81,7 @@ Address RegExpStack::EnsureCapacity(size_t size) {
if (size > kMaximumStackSize) return NULL;
if (size < kMinimumStackSize) size = kMinimumStackSize;
if (thread_local_.memory_size_ < size) {
- Address new_memory = NewArray<byte>(size);
+ Address new_memory = NewArray<byte>(static_cast<int>(size));
if (thread_local_.memory_size_ > 0) {
// Copy original memory into top of new memory.
memcpy(reinterpret_cast<void*>(
diff --git a/src/regexp-stack.h b/src/regexp-stack.h
index 319ab289..fbaa6fbb 100644
--- a/src/regexp-stack.h
+++ b/src/regexp-stack.h
@@ -68,7 +68,9 @@ class RegExpStack {
static Address EnsureCapacity(size_t size);
// Thread local archiving.
- static size_t ArchiveSpacePerThread() { return sizeof(thread_local_); }
+ static int ArchiveSpacePerThread() {
+ return static_cast<int>(sizeof(thread_local_));
+ }
static char* ArchiveStack(char* to);
static char* RestoreStack(char* from);
static void FreeThreadResources() { thread_local_.Free(); }
diff --git a/src/runtime.cc b/src/runtime.cc
index 8fd62c98..b07361aa 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -719,12 +719,15 @@ static Object* Runtime_DeclareContextSlot(Arguments args) {
if (*initial_value != NULL) {
if (index >= 0) {
// The variable or constant context slot should always be in
- // the function context; not in any outer context nor in the
- // arguments object.
- ASSERT(holder.is_identical_to(context));
- if (((attributes & READ_ONLY) == 0) ||
- context->get(index)->IsTheHole()) {
- context->set(index, *initial_value);
+ // the function context or the arguments object.
+ if (holder->IsContext()) {
+ ASSERT(holder.is_identical_to(context));
+ if (((attributes & READ_ONLY) == 0) ||
+ context->get(index)->IsTheHole()) {
+ context->set(index, *initial_value);
+ }
+ } else {
+ Handle<JSObject>::cast(holder)->SetElement(index, *initial_value);
}
} else {
// Slow case: The property is not in the FixedArray part of the context.
@@ -788,51 +791,72 @@ static Object* Runtime_InitializeVarGlobal(Arguments args) {
// case of callbacks in the prototype chain (this rules out using
// SetProperty). We have IgnoreAttributesAndSetLocalProperty for
// this.
+ // Note that objects can have hidden prototypes, so we need to traverse
+ // the whole chain of hidden prototypes to do a 'local' lookup.
+ JSObject* real_holder = global;
LookupResult lookup;
- global->LocalLookup(*name, &lookup);
- if (!lookup.IsProperty()) {
- if (assign) {
- return global->IgnoreAttributesAndSetLocalProperty(*name,
- args[1],
- attributes);
+ while (true) {
+ real_holder->LocalLookup(*name, &lookup);
+ if (lookup.IsProperty()) {
+ // Determine if this is a redeclaration of something read-only.
+ if (lookup.IsReadOnly()) {
+ // If we found readonly property on one of hidden prototypes,
+ // just shadow it.
+ if (real_holder != Top::context()->global()) break;
+ return ThrowRedeclarationError("const", name);
+ }
+
+ // Determine if this is a redeclaration of an intercepted read-only
+ // property and figure out if the property exists at all.
+ bool found = true;
+ PropertyType type = lookup.type();
+ if (type == INTERCEPTOR) {
+ HandleScope handle_scope;
+ Handle<JSObject> holder(real_holder);
+ PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
+ real_holder = *holder;
+ if (intercepted == ABSENT) {
+ // The interceptor claims the property isn't there. We need to
+ // make sure to introduce it.
+ found = false;
+ } else if ((intercepted & READ_ONLY) != 0) {
+ // The property is present, but read-only. Since we're trying to
+ // overwrite it with a variable declaration we must throw a
+ // re-declaration error. However if we found readonly property
+ // on one of hidden prototypes, just shadow it.
+ if (real_holder != Top::context()->global()) break;
+ return ThrowRedeclarationError("const", name);
+ }
+ }
+
+ if (found && !assign) {
+ // The global property is there and we're not assigning any value
+ // to it. Just return.
+ return Heap::undefined_value();
+ }
+
+ // Assign the value (or undefined) to the property.
+ Object* value = (assign) ? args[1] : Heap::undefined_value();
+ return real_holder->SetProperty(&lookup, *name, value, attributes);
}
- return Heap::undefined_value();
- }
- // Determine if this is a redeclaration of something read-only.
- if (lookup.IsReadOnly()) {
- return ThrowRedeclarationError("const", name);
- }
+ Object* proto = real_holder->GetPrototype();
+ if (!proto->IsJSObject())
+ break;
- // Determine if this is a redeclaration of an intercepted read-only
- // property and figure out if the property exists at all.
- bool found = true;
- PropertyType type = lookup.type();
- if (type == INTERCEPTOR) {
- PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
- if (intercepted == ABSENT) {
- // The interceptor claims the property isn't there. We need to
- // make sure to introduce it.
- found = false;
- } else if ((intercepted & READ_ONLY) != 0) {
- // The property is present, but read-only. Since we're trying to
- // overwrite it with a variable declaration we must throw a
- // re-declaration error.
- return ThrowRedeclarationError("const", name);
- }
- // Restore global object from context (in case of GC).
- global = Top::context()->global();
- }
+ if (!JSObject::cast(proto)->map()->is_hidden_prototype())
+ break;
- if (found && !assign) {
- // The global property is there and we're not assigning any value
- // to it. Just return.
- return Heap::undefined_value();
+ real_holder = JSObject::cast(proto);
}
- // Assign the value (or undefined) to the property.
- Object* value = (assign) ? args[1] : Heap::undefined_value();
- return global->SetProperty(&lookup, *name, value, attributes);
+ global = Top::context()->global();
+ if (assign) {
+ return global->IgnoreAttributesAndSetLocalProperty(*name,
+ args[1],
+ attributes);
+ }
+ return Heap::undefined_value();
}
@@ -1274,7 +1298,9 @@ static Object* CharCodeAt(String* subject, Object* index) {
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
// accessed.
- subject->TryFlattenIfNotFlat();
+ Object* flat = subject->TryFlatten();
+ if (flat->IsFailure()) return flat;
+ subject = String::cast(flat);
if (i >= static_cast<uint32_t>(subject->length())) {
return Heap::nan_value();
}
@@ -1357,8 +1383,9 @@ class ReplacementStringBuilder {
StringBuilderSubstringPosition::encode(from);
AddElement(Smi::FromInt(encoded_slice));
} else {
- Handle<String> slice = Factory::NewStringSlice(subject_, from, to);
- AddElement(*slice);
+ // Otherwise encode as two smis.
+ AddElement(Smi::FromInt(-length));
+ AddElement(Smi::FromInt(from));
}
IncrementCharacterCount(length);
}
@@ -1642,16 +1669,14 @@ void CompiledReplacement::Compile(Handle<String> replacement,
capture_count,
subject_length);
}
- // Find substrings of replacement string and create them as String objects..
+ // Find substrings of replacement string and create them as String objects.
int substring_index = 0;
for (int i = 0, n = parts_.length(); i < n; i++) {
int tag = parts_[i].tag;
if (tag <= 0) { // A replacement string slice.
int from = -tag;
int to = parts_[i].data;
- replacement_substrings_.Add(Factory::NewStringSlice(replacement,
- from,
- to));
+ replacement_substrings_.Add(Factory::NewSubString(replacement, from, to));
parts_[i].tag = REPLACEMENT_SUBSTRING;
parts_[i].data = substring_index;
substring_index++;
@@ -1749,9 +1774,10 @@ static Object* StringReplaceRegExpWithString(String* subject,
// Index of end of last match.
int prev = 0;
- // Number of parts added by compiled replacement plus preceeding string
- // and possibly suffix after last match.
- const int parts_added_per_loop = compiled_replacement.parts() + 2;
+ // Number of parts added by compiled replacement plus preceeding
+ // string and possibly suffix after last match. It is possible for
+ // all components to use two elements when encoded as two smis.
+ const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);
bool matched = true;
do {
ASSERT(last_match_info_handle->HasFastElements());
@@ -2223,8 +2249,8 @@ int Runtime::StringMatch(Handle<String> sub,
if (pos == NULL) {
return -1;
}
- return reinterpret_cast<const char*>(pos) - ascii_vector.start()
- + start_index;
+ return static_cast<int>(reinterpret_cast<const char*>(pos)
+ - ascii_vector.start() + start_index);
}
return SingleCharIndexOf(sub->ToUC16Vector(), pat->Get(0), start_index);
}
@@ -2349,21 +2375,29 @@ static Object* Runtime_StringLocaleCompare(Arguments args) {
}
-static Object* Runtime_StringSlice(Arguments args) {
+static Object* Runtime_SubString(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_CHECKED(String, value, args[0]);
- CONVERT_DOUBLE_CHECKED(from_number, args[1]);
- CONVERT_DOUBLE_CHECKED(to_number, args[2]);
-
- int start = FastD2I(from_number);
- int end = FastD2I(to_number);
-
+ Object* from = args[1];
+ Object* to = args[2];
+ int start, end;
+ // We have a fast integer-only case here to avoid a conversion to double in
+ // the common case where from and to are Smis.
+ if (from->IsSmi() && to->IsSmi()) {
+ start = Smi::cast(from)->value();
+ end = Smi::cast(to)->value();
+ } else {
+ CONVERT_DOUBLE_CHECKED(from_number, from);
+ CONVERT_DOUBLE_CHECKED(to_number, to);
+ start = FastD2I(from_number);
+ end = FastD2I(to_number);
+ }
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
- return value->Slice(start, end);
+ return value->SubString(start, end);
}
@@ -2410,7 +2444,7 @@ static Object* Runtime_StringMatch(Arguments args) {
for (int i = 0; i < matches ; i++) {
int from = offsets.at(i * 2);
int to = offsets.at(i * 2 + 1);
- elements->set(i, *Factory::NewStringSlice(subject, from, to));
+ elements->set(i, *Factory::NewSubString(subject, from, to));
}
Handle<JSArray> result = Factory::NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(matches));
@@ -3385,8 +3419,7 @@ static Object* Runtime_StringParseInt(Arguments args) {
NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]);
- CONVERT_DOUBLE_CHECKED(n, args[1]);
- int radix = FastD2I(n);
+ CONVERT_SMI_CHECKED(radix, args[1]);
s->TryFlattenIfNotFlat();
@@ -3611,7 +3644,7 @@ static Object* Runtime_StringTrim(Arguments args) {
right--;
}
}
- return s->Slice(left, right);
+ return s->SubString(left, right);
}
bool Runtime::IsUpperCaseChar(uint16_t ch) {
@@ -3753,6 +3786,7 @@ static Object* Runtime_StringAdd(Arguments args) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, str1, args[0]);
CONVERT_CHECKED(String, str2, args[1]);
+ Counters::string_add_runtime.Increment();
return Heap::AllocateConsString(str1, str2);
}
@@ -3766,9 +3800,21 @@ static inline void StringBuilderConcatHelper(String* special,
for (int i = 0; i < array_length; i++) {
Object* element = fixed_array->get(i);
if (element->IsSmi()) {
+ // Smi encoding of position and length.
int encoded_slice = Smi::cast(element)->value();
- int pos = StringBuilderSubstringPosition::decode(encoded_slice);
- int len = StringBuilderSubstringLength::decode(encoded_slice);
+ int pos;
+ int len;
+ if (encoded_slice > 0) {
+ // Position and length encoded in one smi.
+ pos = StringBuilderSubstringPosition::decode(encoded_slice);
+ len = StringBuilderSubstringLength::decode(encoded_slice);
+ } else {
+ // Position and length encoded in two smis.
+ Object* obj = fixed_array->get(++i);
+ ASSERT(obj->IsSmi());
+ pos = Smi::cast(obj)->value();
+ len = -encoded_slice;
+ }
String::WriteToFlat(special,
sink + position,
pos,
@@ -3789,6 +3835,10 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, array, args[0]);
CONVERT_CHECKED(String, special, args[1]);
+
+ // This assumption is used by the slice encoding in one or two smis.
+ ASSERT(Smi::kMaxValue >= String::kMaxLength);
+
int special_length = special->length();
Object* smi_array_length = array->length();
if (!smi_array_length->IsSmi()) {
@@ -3816,13 +3866,29 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
for (int i = 0; i < array_length; i++) {
Object* elt = fixed_array->get(i);
if (elt->IsSmi()) {
+ // Smi encoding of position and length.
int len = Smi::cast(elt)->value();
- int pos = len >> 11;
- len &= 0x7ff;
- if (pos + len > special_length) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ if (len > 0) {
+ // Position and length encoded in one smi.
+ int pos = len >> 11;
+ len &= 0x7ff;
+ if (pos + len > special_length) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+ position += len;
+ } else {
+ // Position and length encoded in two smis.
+ position += (-len);
+ // Get the position and check that it is also a smi.
+ i++;
+ if (i >= array_length) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+ Object* pos = fixed_array->get(i);
+ if (!pos->IsSmi()) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
}
- position += len;
} else if (elt->IsString()) {
String* element = String::cast(elt);
int element_length = element->length();
@@ -4336,8 +4402,6 @@ static Object* Runtime_NewArgumentsFast(Arguments args) {
Object* result = Heap::AllocateArgumentsObject(callee, length);
if (result->IsFailure()) return result;
- ASSERT(Heap::InNewSpace(result));
-
// Allocate the elements if needed.
if (length > 0) {
// Allocate the fixed array.
@@ -4350,8 +4414,7 @@ static Object* Runtime_NewArgumentsFast(Arguments args) {
for (int i = 0; i < length; i++) {
array->set(i, *--parameters, mode);
}
- JSObject::cast(result)->set_elements(FixedArray::cast(obj),
- SKIP_WRITE_BARRIER);
+ JSObject::cast(result)->set_elements(FixedArray::cast(obj));
}
return result;
}
@@ -4797,6 +4860,12 @@ static Object* Runtime_ReThrow(Arguments args) {
}
+static Object* Runtime_PromoteScheduledException(Arguments args) {
+ ASSERT_EQ(0, args.length());
+ return Top::PromoteScheduledException();
+}
+
+
static Object* Runtime_ThrowReferenceError(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
@@ -4943,6 +5012,9 @@ static Object* Runtime_DebugPrint(Arguments args) {
PrintF("DebugPrint: ");
}
args[0]->Print();
+ if (args[0]->IsHeapObject()) {
+ HeapObject::cast(args[0])->map()->Print();
+ }
#else
// ShortPrint is available in release mode. Print is not.
args[0]->ShortPrint();
@@ -5964,14 +6036,33 @@ static Object* Runtime_DebugLocalPropertyNames(Arguments args) {
// Get the property names.
jsproto = obj;
+ int proto_with_hidden_properties = 0;
for (int i = 0; i < length; i++) {
jsproto->GetLocalPropertyNames(*names,
i == 0 ? 0 : local_property_count[i - 1]);
+ if (!GetHiddenProperties(jsproto, false)->IsUndefined()) {
+ proto_with_hidden_properties++;
+ }
if (i < length - 1) {
jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
}
}
+ // Filter out name of hidden propeties object.
+ if (proto_with_hidden_properties > 0) {
+ Handle<FixedArray> old_names = names;
+ names = Factory::NewFixedArray(
+ names->length() - proto_with_hidden_properties);
+ int dest_pos = 0;
+ for (int i = 0; i < total_property_count; i++) {
+ Object* name = old_names->get(i);
+ if (name == Heap::hidden_symbol()) {
+ continue;
+ }
+ names->set(dest_pos++, name);
+ }
+ }
+
DeleteArray(local_property_count);
return *Factory::NewJSArrayWithElements(names);
}
@@ -6778,8 +6869,9 @@ static Object* Runtime_GetCFrames(Arguments args) {
// Get the stack walk text for this frame.
Handle<String> frame_text;
- if (strlen(frames[i].text) > 0) {
- Vector<const char> str(frames[i].text, strlen(frames[i].text));
+ int frame_text_length = StrLength(frames[i].text);
+ if (frame_text_length > 0) {
+ Vector<const char> str(frames[i].text, frame_text_length);
frame_text = Factory::NewStringFromAscii(str);
}
@@ -7246,7 +7338,7 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
// function(arguments,__source__) {return eval(__source__);}
static const char* source_str =
"(function(arguments,__source__){return eval(__source__);})";
- static const int source_str_length = strlen(source_str);
+ static const int source_str_length = StrLength(source_str);
Handle<String> function_source =
Factory::NewStringFromAscii(Vector<const char>(source_str,
source_str_length));
@@ -7603,8 +7695,31 @@ static Object* Runtime_FunctionGetInferredName(Arguments args) {
CONVERT_CHECKED(JSFunction, f, args[0]);
return f->shared()->inferred_name();
}
+
#endif // ENABLE_DEBUGGER_SUPPORT
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Object* Runtime_ProfilerResume(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(Smi, smi_modules, args[0]);
+ v8::V8::ResumeProfilerEx(smi_modules->value());
+ return Heap::undefined_value();
+}
+
+
+static Object* Runtime_ProfilerPause(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(Smi, smi_modules, args[0]);
+ v8::V8::PauseProfilerEx(smi_modules->value());
+ return Heap::undefined_value();
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
// Finds the script object from the script data. NOTE: This operation uses
// heap traversal to find the function generated for the source position
@@ -7711,7 +7826,7 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
Object* fun = frame->function();
Address pc = frame->pc();
Address start = frame->code()->address();
- Smi* offset = Smi::FromInt(pc - start);
+ Smi* offset = Smi::FromInt(static_cast<int>(pc - start));
FixedArray* elements = FixedArray::cast(result->elements());
if (cursor + 2 < elements->length()) {
elements->set(cursor++, recv);
@@ -7758,6 +7873,13 @@ static Object* Runtime_Abort(Arguments args) {
}
+static Object* Runtime_DeleteHandleScopeExtensions(Arguments args) {
+ ASSERT(args.length() == 0);
+ HandleScope::DeleteExtensions();
+ return Heap::undefined_value();
+}
+
+
#ifdef DEBUG
// ListNatives is ONLY used by the fuzz-natives.js in debug mode
// Exclude the code in release mode.
@@ -7770,7 +7892,8 @@ static Object* Runtime_ListNatives(Arguments args) {
{ \
HandleScope inner; \
Handle<String> name = \
- Factory::NewStringFromAscii(Vector<const char>(#Name, strlen(#Name))); \
+ Factory::NewStringFromAscii( \
+ Vector<const char>(#Name, StrLength(#Name))); \
Handle<JSArray> pair = Factory::NewJSArray(0); \
SetElement(pair, 0, name); \
SetElement(pair, 1, Handle<Smi>(Smi::FromInt(argc))); \
diff --git a/src/runtime.h b/src/runtime.h
index 6b1ce480..85802331 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -149,7 +149,7 @@ namespace internal {
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
- F(StringSlice, 3, 1) \
+ F(SubString, 3, 1) \
F(StringReplaceRegExpWithString, 4, 1) \
F(StringMatch, 3, 1) \
F(StringTrim, 3, 1) \
@@ -234,6 +234,7 @@ namespace internal {
F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \
F(StackGuard, 1, 1) \
+ F(PromoteScheduledException, 0, 1) \
\
/* Contexts */ \
F(NewContext, 1, 1) \
@@ -263,6 +264,8 @@ namespace internal {
F(Log, 2, 1) \
/* ES5 */ \
F(LocalKeys, 1, 1) \
+ /* Handle scopes */ \
+ F(DeleteHandleScopeExtensions, 0, 1) \
\
/* Pseudo functions - handled as macros by parser */ \
F(IS_VAR, 1, 1)
@@ -315,6 +318,14 @@ namespace internal {
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) \
+ F(ProfilerResume, 1, 1) \
+ F(ProfilerPause, 1, 1)
+#else
+#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
+#endif
+
#ifdef DEBUG
#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
/* Testing */ \
@@ -333,7 +344,8 @@ namespace internal {
RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
RUNTIME_FUNCTION_LIST_DEBUG(F) \
- RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
+ RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
+ RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
// ----------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
diff --git a/src/runtime.js b/src/runtime.js
index 789bfdb7..105749a7 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -128,7 +128,10 @@ function COMPARE(x, ncr) {
if (IS_STRING(a) && IS_STRING(b)) {
return %StringCompare(a, b);
} else {
- return %NumberCompare(%ToNumber(a), %ToNumber(b), ncr);
+ var a_number = %ToNumber(a);
+ var b_number = %ToNumber(b);
+ if (NUMBER_IS_NAN(a_number) || NUMBER_IS_NAN(b_number)) return ncr;
+ return %NumberCompare(a_number, b_number, ncr);
}
}
@@ -143,16 +146,16 @@ function COMPARE(x, ncr) {
function ADD(x) {
// Fast case: Check for number operands and do the addition.
if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
- if (IS_STRING(this) && IS_STRING(x)) return %StringAdd(this, x);
+ if (IS_STRING(this) && IS_STRING(x)) return %_StringAdd(this, x);
// Default implementation.
var a = %ToPrimitive(this, NO_HINT);
var b = %ToPrimitive(x, NO_HINT);
if (IS_STRING(a)) {
- return %StringAdd(a, %ToString(b));
+ return %_StringAdd(a, %ToString(b));
} else if (IS_STRING(b)) {
- return %StringAdd(%ToString(a), b);
+ return %_StringAdd(%ToString(a), b);
} else {
return %NumberAdd(%ToNumber(a), %ToNumber(b));
}
@@ -170,7 +173,7 @@ function STRING_ADD_LEFT(y) {
: %ToString(%ToPrimitive(y, NO_HINT));
}
}
- return %StringAdd(this, y);
+ return %_StringAdd(this, y);
}
@@ -186,7 +189,7 @@ function STRING_ADD_RIGHT(y) {
: %ToString(%ToPrimitive(x, NO_HINT));
}
}
- return %StringAdd(x, y);
+ return %_StringAdd(x, y);
}
diff --git a/src/scanner.cc b/src/scanner.cc
index 3dae414f..0d3b789f 100644
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -49,17 +49,11 @@ StaticResource<Scanner::Utf8Decoder> Scanner::utf8_decoder_;
// ----------------------------------------------------------------------------
// UTF8Buffer
-UTF8Buffer::UTF8Buffer() {
- static const int kInitialCapacity = 1 * KB;
- data_ = NewArray<char>(kInitialCapacity);
- limit_ = ComputeLimit(data_, kInitialCapacity);
- Reset();
- ASSERT(Capacity() == kInitialCapacity && pos() == 0);
-}
+UTF8Buffer::UTF8Buffer() : data_(NULL), limit_(NULL) { }
UTF8Buffer::~UTF8Buffer() {
- DeleteArray(data_);
+ if (data_ != NULL) DeleteArray(data_);
}
@@ -69,7 +63,7 @@ void UTF8Buffer::AddCharSlow(uc32 c) {
int old_capacity = Capacity();
int old_position = pos();
int new_capacity =
- Min(old_capacity * 2, old_capacity + kCapacityGrowthLimit);
+ Min(old_capacity * 3, old_capacity + kCapacityGrowthLimit);
char* new_data = NewArray<char>(new_capacity);
memcpy(new_data, data_, old_position);
DeleteArray(data_);
@@ -194,11 +188,142 @@ void TwoByteStringUTF16Buffer::SeekForward(int pos) {
// ----------------------------------------------------------------------------
+// Keyword Matcher
+KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
+ { "break", KEYWORD_PREFIX, Token::BREAK },
+ { NULL, C, Token::ILLEGAL },
+ { NULL, D, Token::ILLEGAL },
+ { "else", KEYWORD_PREFIX, Token::ELSE },
+ { NULL, F, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, I, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, N, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { "return", KEYWORD_PREFIX, Token::RETURN },
+ { "switch", KEYWORD_PREFIX, Token::SWITCH },
+ { NULL, T, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, V, Token::ILLEGAL },
+ { NULL, W, Token::ILLEGAL }
+};
+
+
+void KeywordMatcher::Step(uc32 input) {
+ switch (state_) {
+ case INITIAL: {
+ // matching the first character is the only state with significant fanout.
+ // Match only lower-case letters in range 'b'..'w'.
+ unsigned int offset = input - kFirstCharRangeMin;
+ if (offset < kFirstCharRangeLength) {
+ state_ = first_states_[offset].state;
+ if (state_ == KEYWORD_PREFIX) {
+ keyword_ = first_states_[offset].keyword;
+ counter_ = 1;
+ keyword_token_ = first_states_[offset].token;
+ }
+ return;
+ }
+ break;
+ }
+ case KEYWORD_PREFIX:
+ if (keyword_[counter_] == input) {
+ ASSERT_NE(input, '\0');
+ counter_++;
+ if (keyword_[counter_] == '\0') {
+ state_ = KEYWORD_MATCHED;
+ token_ = keyword_token_;
+ }
+ return;
+ }
+ break;
+ case KEYWORD_MATCHED:
+ token_ = Token::IDENTIFIER;
+ break;
+ case C:
+ if (MatchState(input, 'a', CA)) return;
+ if (MatchState(input, 'o', CO)) return;
+ break;
+ case CA:
+ if (MatchKeywordStart(input, "case", 2, Token::CASE)) return;
+ if (MatchKeywordStart(input, "catch", 2, Token::CATCH)) return;
+ break;
+ case CO:
+ if (MatchState(input, 'n', CON)) return;
+ break;
+ case CON:
+ if (MatchKeywordStart(input, "const", 3, Token::CONST)) return;
+ if (MatchKeywordStart(input, "continue", 3, Token::CONTINUE)) return;
+ break;
+ case D:
+ if (MatchState(input, 'e', DE)) return;
+ if (MatchKeyword(input, 'o', KEYWORD_MATCHED, Token::DO)) return;
+ break;
+ case DE:
+ if (MatchKeywordStart(input, "debugger", 2, Token::DEBUGGER)) return;
+ if (MatchKeywordStart(input, "default", 2, Token::DEFAULT)) return;
+ if (MatchKeywordStart(input, "delete", 2, Token::DELETE)) return;
+ break;
+ case F:
+ if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return;
+ if (MatchKeywordStart(input, "finally", 1, Token::FINALLY)) return;
+ if (MatchKeywordStart(input, "for", 1, Token::FOR)) return;
+ if (MatchKeywordStart(input, "function", 1, Token::FUNCTION)) return;
+ break;
+ case I:
+ if (MatchKeyword(input, 'f', KEYWORD_MATCHED, Token::IF)) return;
+ if (MatchKeyword(input, 'n', IN, Token::IN)) return;
+ break;
+ case IN:
+ token_ = Token::IDENTIFIER;
+ if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) {
+ return;
+ }
+ break;
+ case N:
+ if (MatchKeywordStart(input, "native", 1, Token::NATIVE)) return;
+ if (MatchKeywordStart(input, "new", 1, Token::NEW)) return;
+ if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return;
+ break;
+ case T:
+ if (MatchState(input, 'h', TH)) return;
+ if (MatchState(input, 'r', TR)) return;
+ if (MatchKeywordStart(input, "typeof", 1, Token::TYPEOF)) return;
+ break;
+ case TH:
+ if (MatchKeywordStart(input, "this", 2, Token::THIS)) return;
+ if (MatchKeywordStart(input, "throw", 2, Token::THROW)) return;
+ break;
+ case TR:
+ if (MatchKeywordStart(input, "true", 2, Token::TRUE_LITERAL)) return;
+ if (MatchKeyword(input, 'y', KEYWORD_MATCHED, Token::TRY)) return;
+ break;
+ case V:
+ if (MatchKeywordStart(input, "var", 1, Token::VAR)) return;
+ if (MatchKeywordStart(input, "void", 1, Token::VOID)) return;
+ break;
+ case W:
+ if (MatchKeywordStart(input, "while", 1, Token::WHILE)) return;
+ if (MatchKeywordStart(input, "with", 1, Token::WITH)) return;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // On fallthrough, it's a failure.
+ state_ = UNMATCHABLE;
+}
+
+
+// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) {
- Token::Initialize();
-}
+Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) { }
void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
@@ -215,12 +340,11 @@ void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
position_ = position;
- // Reset literals buffer
- literals_.Reset();
-
// Set c0_ (one character ahead)
ASSERT(kCharacterLookaheadBufferSize == 1);
Advance();
+ // Initializer current_ to not refer to a literal buffer.
+ current_.literal_buffer = NULL;
// Skip initial whitespace allowing HTML comment ends just like
// after a newline and scan first token.
@@ -253,17 +377,23 @@ Token::Value Scanner::Next() {
void Scanner::StartLiteral() {
- next_.literal_pos = literals_.pos();
+ // Use the first buffer unless it's currently in use by the current_ token.
+ // In most cases we won't have two literals/identifiers in a row, so
+ // the second buffer won't be used very often and is unlikely to grow much.
+ UTF8Buffer* free_buffer =
+ (current_.literal_buffer != &literal_buffer_1_) ? &literal_buffer_1_
+ : &literal_buffer_2_;
+ next_.literal_buffer = free_buffer;
+ free_buffer->Reset();
}
void Scanner::AddChar(uc32 c) {
- literals_.AddChar(c);
+ next_.literal_buffer->AddChar(c);
}
void Scanner::TerminateLiteral() {
- next_.literal_end = literals_.pos();
AddChar(0);
}
@@ -383,6 +513,7 @@ Token::Value Scanner::ScanHtmlComment() {
void Scanner::Scan() {
+ next_.literal_buffer = NULL;
Token::Value token;
has_line_terminator_before_next_ = false;
do {
@@ -855,48 +986,40 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
Token::Value Scanner::ScanIdentifier() {
ASSERT(kIsIdentifierStart.get(c0_));
- bool has_escapes = false;
StartLiteral();
+ KeywordMatcher keyword_match;
+
// Scan identifier start character.
if (c0_ == '\\') {
- has_escapes = true;
uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier start characters.
if (!kIsIdentifierStart.get(c)) return Token::ILLEGAL;
AddChar(c);
+ keyword_match.Fail();
} else {
AddChar(c0_);
+ keyword_match.AddChar(c0_);
Advance();
}
// Scan the rest of the identifier characters.
while (kIsIdentifierPart.get(c0_)) {
if (c0_ == '\\') {
- has_escapes = true;
uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier part characters.
if (!kIsIdentifierPart.get(c)) return Token::ILLEGAL;
AddChar(c);
+ keyword_match.Fail();
} else {
AddChar(c0_);
+ keyword_match.AddChar(c0_);
Advance();
}
}
TerminateLiteral();
- // We don't have any 1-letter keywords (this is probably a common case).
- if ((next_.literal_end - next_.literal_pos) == 1) {
- return Token::IDENTIFIER;
- }
-
- // If the identifier contains unicode escapes, it must not be
- // resolved to a keyword.
- if (has_escapes) {
- return Token::IDENTIFIER;
- }
-
- return Token::Lookup(&literals_.data()[next_.literal_pos]);
+ return keyword_match.token();
}
diff --git a/src/scanner.h b/src/scanner.h
index a201d0e9..9d7b34e7 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -41,6 +41,7 @@ class UTF8Buffer {
~UTF8Buffer();
void AddChar(uc32 c) {
+ ASSERT_NOT_NULL(data_);
if (cursor_ <= limit_ &&
static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
*cursor_++ = static_cast<char>(c);
@@ -49,17 +50,30 @@ class UTF8Buffer {
}
}
- void Reset() { cursor_ = data_; }
- int pos() const { return cursor_ - data_; }
+ void Reset() {
+ if (data_ == NULL) {
+ data_ = NewArray<char>(kInitialCapacity);
+ limit_ = ComputeLimit(data_, kInitialCapacity);
+ }
+ cursor_ = data_;
+ }
+
+ int pos() const {
+ ASSERT_NOT_NULL(data_);
+ return static_cast<int>(cursor_ - data_);
+ }
+
char* data() const { return data_; }
private:
+ static const int kInitialCapacity = 256;
char* data_;
char* cursor_;
char* limit_;
int Capacity() const {
- return (limit_ - data_) + unibrow::Utf8::kMaxEncodedSize;
+ ASSERT_NOT_NULL(data_);
+ return static_cast<int>(limit_ - data_) + unibrow::Utf8::kMaxEncodedSize;
}
static char* ComputeLimit(char* data, int capacity) {
@@ -123,6 +137,121 @@ class TwoByteStringUTF16Buffer: public UTF16Buffer {
};
+class KeywordMatcher {
+// Incrementally recognize keywords.
+//
+// Recognized keywords:
+// break case catch const* continue debugger* default delete do else
+// finally false for function if in instanceof native* new null
+// return switch this throw true try typeof var void while with
+//
+// *: Actually "future reserved keywords". These are the only ones we
+// recognized, the remaining are allowed as identifiers.
+ public:
+ KeywordMatcher() : state_(INITIAL), token_(Token::IDENTIFIER) {}
+
+ Token::Value token() { return token_; }
+
+ inline void AddChar(uc32 input) {
+ if (state_ != UNMATCHABLE) {
+ Step(input);
+ }
+ }
+
+ void Fail() {
+ token_ = Token::IDENTIFIER;
+ state_ = UNMATCHABLE;
+ }
+
+ private:
+ enum State {
+ UNMATCHABLE,
+ INITIAL,
+ KEYWORD_PREFIX,
+ KEYWORD_MATCHED,
+ C,
+ CA,
+ CO,
+ CON,
+ D,
+ DE,
+ F,
+ I,
+ IN,
+ N,
+ T,
+ TH,
+ TR,
+ V,
+ W
+ };
+
+ struct FirstState {
+ const char* keyword;
+ State state;
+ Token::Value token;
+ };
+
+ // Range of possible first characters of a keyword.
+ static const unsigned int kFirstCharRangeMin = 'b';
+ static const unsigned int kFirstCharRangeMax = 'w';
+ static const unsigned int kFirstCharRangeLength =
+ kFirstCharRangeMax - kFirstCharRangeMin + 1;
+ // State map for first keyword character range.
+ static FirstState first_states_[kFirstCharRangeLength];
+
+ // Current state.
+ State state_;
+ // Token for currently added characters.
+ Token::Value token_;
+
+ // Matching a specific keyword string (there is only one possible valid
+ // keyword with the current prefix).
+ const char* keyword_;
+ int counter_;
+ Token::Value keyword_token_;
+
+ // If input equals keyword's character at position, continue matching keyword
+ // from that position.
+ inline bool MatchKeywordStart(uc32 input,
+ const char* keyword,
+ int position,
+ Token::Value token_if_match) {
+ if (input == keyword[position]) {
+ state_ = KEYWORD_PREFIX;
+ this->keyword_ = keyword;
+ this->counter_ = position + 1;
+ this->keyword_token_ = token_if_match;
+ return true;
+ }
+ return false;
+ }
+
+ // If input equals match character, transition to new state and return true.
+ inline bool MatchState(uc32 input, char match, State new_state) {
+ if (input == match) {
+ state_ = new_state;
+ return true;
+ }
+ return false;
+ }
+
+ inline bool MatchKeyword(uc32 input,
+ char match,
+ State new_state,
+ Token::Value keyword_token) {
+ if (input == match) { // Matched "do".
+ state_ = new_state;
+ token_ = keyword_token;
+ return true;
+ }
+ return false;
+ }
+
+ void Step(uc32 input);
+};
+
+
class Scanner {
public:
@@ -163,26 +292,30 @@ class Scanner {
// token returned by Next()). The string is 0-terminated and in
// UTF-8 format; they may contain 0-characters. Literal strings are
// collected for identifiers, strings, and numbers.
+ // These functions only give the correct result if the literal
+ // was scanned between calls to StartLiteral() and TerminateLiteral().
const char* literal_string() const {
- return &literals_.data()[current_.literal_pos];
+ return current_.literal_buffer->data();
}
int literal_length() const {
- return current_.literal_end - current_.literal_pos;
- }
-
- Vector<const char> next_literal() const {
- return Vector<const char>(next_literal_string(), next_literal_length());
+ // Excluding terminal '\0' added by TerminateLiteral().
+ return current_.literal_buffer->pos() - 1;
}
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
const char* next_literal_string() const {
- return &literals_.data()[next_.literal_pos];
+ return next_.literal_buffer->data();
}
// Returns the length of the next token (that would be returned if
// Next() were called).
int next_literal_length() const {
- return next_.literal_end - next_.literal_pos;
+ return next_.literal_buffer->pos() - 1;
+ }
+
+ Vector<const char> next_literal() const {
+ return Vector<const char>(next_literal_string(),
+ next_literal_length());
}
// Scans the input as a regular expression pattern, previous
@@ -224,7 +357,8 @@ class Scanner {
// Buffer to hold literal values (identifiers, strings, numbers)
// using 0-terminated UTF-8 encoding.
- UTF8Buffer literals_;
+ UTF8Buffer literal_buffer_1_;
+ UTF8Buffer literal_buffer_2_;
bool stack_overflow_;
static StaticResource<Utf8Decoder> utf8_decoder_;
@@ -236,7 +370,7 @@ class Scanner {
struct TokenDesc {
Token::Value token;
Location location;
- int literal_pos, literal_end;
+ UTF8Buffer* literal_buffer;
};
TokenDesc current_; // desc for current token (as returned by Next())
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 8a237fd0..8b989d7a 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -100,8 +100,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
break;
case Slot::LOOKUP:
- case Slot::GLOBAL:
- // these are currently not used
+ // This is currently not used.
UNREACHABLE();
break;
}
@@ -419,7 +418,7 @@ int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) {
Object** p0 = StackSlotEntriesAddr(code) + 1;
Object** p = p0;
while (*p != NULL) {
- if (*p == name) return p - p0;
+ if (*p == name) return static_cast<int>(p - p0);
p++;
}
}
@@ -450,7 +449,7 @@ int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
ReadInt(p + 1, &v);
Variable::Mode mode_value = static_cast<Variable::Mode>(v);
if (mode != NULL) *mode = mode_value;
- result = ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
+ result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
ContextSlotCache::Update(code, name, mode_value, result);
return result;
}
@@ -482,7 +481,7 @@ int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) {
p = p0 + n;
while (p > p0) {
p--;
- if (*p == name) return p - p0;
+ if (*p == name) return static_cast<int>(p - p0);
}
}
return -1;
diff --git a/src/scopes.cc b/src/scopes.cc
index 25873fac..7da06cdb 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -42,7 +42,7 @@ class ZoneAllocator: public Allocator {
/* nothing to do */
virtual ~ZoneAllocator() {}
- virtual void* New(size_t size) { return Zone::New(size); }
+ virtual void* New(size_t size) { return Zone::New(static_cast<int>(size)); }
/* ignored - Zone is freed in one fell swoop */
virtual void Delete(void* p) {}
@@ -540,11 +540,11 @@ Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
// Lookup a variable starting with this scope. The result is either
-// the statically resolved (local!) variable belonging to an outer scope,
-// or NULL. It may be NULL because a) we couldn't find a variable, or b)
-// because the variable is just a guess (and may be shadowed by another
-// variable that is introduced dynamically via an 'eval' call or a 'with'
-// statement).
+// the statically resolved variable belonging to an outer scope, or
+// NULL. It may be NULL because a) we couldn't find a variable, or b)
+// because the variable is just a guess (and may be shadowed by
+// another variable that is introduced dynamically via an 'eval' call
+// or a 'with' statement).
Variable* Scope::LookupRecursive(Handle<String> name,
bool inner_lookup,
Variable** invalidated_local) {
@@ -598,9 +598,11 @@ Variable* Scope::LookupRecursive(Handle<String> name,
if (inner_lookup)
var->is_accessed_from_inner_scope_ = true;
- // If the variable we have found is just a guess, invalidate the result.
+ // If the variable we have found is just a guess, invalidate the
+ // result. If the found variable is local, record that fact so we
+ // can generate fast code to get it if it is not shadowed by eval.
if (guess) {
- *invalidated_local = var;
+ if (!var->is_global()) *invalidated_local = var;
var = NULL;
}
diff --git a/src/serialize.cc b/src/serialize.cc
index 6ff1d7f5..899e2e7a 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -39,352 +39,73 @@
#include "stub-cache.h"
#include "v8threads.h"
#include "top.h"
+#include "bootstrapper.h"
namespace v8 {
namespace internal {
-// 32-bit encoding: a RelativeAddress must be able to fit in a
-// pointer: it is encoded as an Address with (from LS to MS bits):
-// - 2 bits identifying this as a HeapObject.
-// - 4 bits to encode the AllocationSpace (including special values for
-// code and fixed arrays in LO space)
-// - 27 bits identifying a word in the space, in one of three formats:
-// - paged spaces: 16 bits of page number, 11 bits of word offset in page
-// - NEW space: 27 bits of word offset
-// - LO space: 27 bits of page number
-
-const int kSpaceShift = kHeapObjectTagSize;
-const int kSpaceBits = 4;
-const int kSpaceMask = (1 << kSpaceBits) - 1;
-
-const int kOffsetShift = kSpaceShift + kSpaceBits;
-const int kOffsetBits = 11;
-const int kOffsetMask = (1 << kOffsetBits) - 1;
-
-const int kPageShift = kOffsetShift + kOffsetBits;
-const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
-const int kPageMask = (1 << kPageBits) - 1;
-
-const int kPageAndOffsetShift = kOffsetShift;
-const int kPageAndOffsetBits = kPageBits + kOffsetBits;
-const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
-
-// These values are special allocation space tags used for
-// serialization.
-// Mark the pages executable on platforms that support it.
-const int kLargeCode = LAST_SPACE + 1;
-// Allocate extra remembered-set bits.
-const int kLargeFixedArray = LAST_SPACE + 2;
-
-
-static inline AllocationSpace GetSpace(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
- if (space_number > LAST_SPACE) space_number = LO_SPACE;
- return static_cast<AllocationSpace>(space_number);
-}
-
-
-static inline bool IsLargeExecutableObject(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- const int space_number =
- (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
- return (space_number == kLargeCode);
-}
-
-
-static inline bool IsLargeFixedArray(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- const int space_number =
- (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
- return (space_number == kLargeFixedArray);
-}
-
-
-static inline int PageIndex(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- return static_cast<int>(encoded >> kPageShift) & kPageMask;
-}
-
-
-static inline int PageOffset(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- const int offset = static_cast<int>(encoded >> kOffsetShift) & kOffsetMask;
- return offset << kObjectAlignmentBits;
-}
-
-
-static inline int NewSpaceOffset(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- const int page_offset =
- static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
- return page_offset << kObjectAlignmentBits;
-}
-
-
-static inline int LargeObjectIndex(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- return static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
-}
-
-
-// A RelativeAddress encodes a heap address that is independent of
-// the actual memory addresses in real heap. The general case (for the
-// OLD, CODE and MAP spaces) is as a (space id, page number, page offset)
-// triple. The NEW space has page number == 0, because there are no
-// pages. The LARGE_OBJECT space has page offset = 0, since there is
-// exactly one object per page. RelativeAddresses are encodable as
-// Addresses, so that they can replace the map() pointers of
-// HeapObjects. The encoded Addresses are also encoded as HeapObjects
-// and allow for marking (is_marked() see mark(), clear_mark()...) as
-// used by the Mark-Compact collector.
-
-class RelativeAddress {
+// Mapping objects to their location after deserialization.
+// This is used during building, but not at runtime by V8.
+class SerializationAddressMapper {
public:
- RelativeAddress(AllocationSpace space,
- int page_index,
- int page_offset)
- : space_(space), page_index_(page_index), page_offset_(page_offset) {
- // Assert that the space encoding (plus the two pseudo-spaces for
- // special large objects) fits in the available bits.
- ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0);
- ASSERT(space <= LAST_SPACE && space >= 0);
+ static bool IsMapped(HeapObject* obj) {
+ EnsureMapExists();
+ return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
}
- // Return the encoding of 'this' as an Address. Decode with constructor.
- Address Encode() const;
-
- AllocationSpace space() const {
- if (space_ > LAST_SPACE) return LO_SPACE;
- return static_cast<AllocationSpace>(space_);
- }
- int page_index() const { return page_index_; }
- int page_offset() const { return page_offset_; }
-
- bool in_paged_space() const {
- return space_ == CODE_SPACE ||
- space_ == OLD_POINTER_SPACE ||
- space_ == OLD_DATA_SPACE ||
- space_ == MAP_SPACE ||
- space_ == CELL_SPACE;
+ static int MappedTo(HeapObject* obj) {
+ ASSERT(IsMapped(obj));
+ return reinterpret_cast<intptr_t>(serialization_map_->Lookup(Key(obj),
+ Hash(obj),
+ false)->value);
}
- void next_address(int offset) { page_offset_ += offset; }
- void next_page(int init_offset = 0) {
- page_index_++;
- page_offset_ = init_offset;
+ static void Map(HeapObject* obj, int to) {
+ EnsureMapExists();
+ ASSERT(!IsMapped(obj));
+ HashMap::Entry* entry =
+ serialization_map_->Lookup(Key(obj), Hash(obj), true);
+ entry->value = Value(to);
}
-#ifdef DEBUG
- void Verify();
-#endif
-
- void set_to_large_code_object() {
- ASSERT(space_ == LO_SPACE);
- space_ = kLargeCode;
- }
- void set_to_large_fixed_array() {
- ASSERT(space_ == LO_SPACE);
- space_ = kLargeFixedArray;
+ static void Zap() {
+ if (serialization_map_ != NULL) {
+ delete serialization_map_;
+ }
+ serialization_map_ = NULL;
}
-
private:
- int space_;
- int page_index_;
- int page_offset_;
-};
-
-
-Address RelativeAddress::Encode() const {
- ASSERT(page_index_ >= 0);
- int word_offset = 0;
- int result = 0;
- switch (space_) {
- case MAP_SPACE:
- case CELL_SPACE:
- case OLD_POINTER_SPACE:
- case OLD_DATA_SPACE:
- case CODE_SPACE:
- ASSERT_EQ(0, page_index_ & ~kPageMask);
- word_offset = page_offset_ >> kObjectAlignmentBits;
- ASSERT_EQ(0, word_offset & ~kOffsetMask);
- result = (page_index_ << kPageShift) | (word_offset << kOffsetShift);
- break;
- case NEW_SPACE:
- ASSERT_EQ(0, page_index_);
- word_offset = page_offset_ >> kObjectAlignmentBits;
- ASSERT_EQ(0, word_offset & ~kPageAndOffsetMask);
- result = word_offset << kPageAndOffsetShift;
- break;
- case LO_SPACE:
- case kLargeCode:
- case kLargeFixedArray:
- ASSERT_EQ(0, page_offset_);
- ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
- result = page_index_ << kPageAndOffsetShift;
- break;
+ static bool SerializationMatchFun(void* key1, void* key2) {
+ return key1 == key2;
}
- // OR in AllocationSpace and kHeapObjectTag
- ASSERT_EQ(0, space_ & ~kSpaceMask);
- result |= (space_ << kSpaceShift) | kHeapObjectTag;
- return reinterpret_cast<Address>(result);
-}
-
-#ifdef DEBUG
-void RelativeAddress::Verify() {
- ASSERT(page_offset_ >= 0 && page_index_ >= 0);
- switch (space_) {
- case MAP_SPACE:
- case CELL_SPACE:
- case OLD_POINTER_SPACE:
- case OLD_DATA_SPACE:
- case CODE_SPACE:
- ASSERT(Page::kObjectStartOffset <= page_offset_ &&
- page_offset_ <= Page::kPageSize);
- break;
- case NEW_SPACE:
- ASSERT(page_index_ == 0);
- break;
- case LO_SPACE:
- case kLargeCode:
- case kLargeFixedArray:
- ASSERT(page_offset_ == 0);
- break;
+ static uint32_t Hash(HeapObject* obj) {
+ return reinterpret_cast<intptr_t>(obj->address());
}
-}
-#endif
-
-enum GCTreatment {
- DataObject, // Object that cannot contain a reference to new space.
- PointerObject, // Object that can contain a reference to new space.
- CodeObject // Object that contains executable code.
-};
-
-// A SimulatedHeapSpace simulates the allocation of objects in a page in
-// the heap. It uses linear allocation - that is, it doesn't simulate the
-// use of a free list. This simulated
-// allocation must exactly match that done by Heap.
-
-class SimulatedHeapSpace {
- public:
- // The default constructor initializes to an invalid state.
- SimulatedHeapSpace(): current_(LAST_SPACE, -1, -1) {}
- // Sets 'this' to the first address in 'space' that would be
- // returned by allocation in an empty heap.
- void InitEmptyHeap(AllocationSpace space);
+ static void* Key(HeapObject* obj) {
+ return reinterpret_cast<void*>(obj->address());
+ }
- // Sets 'this' to the next address in 'space' that would be returned
- // by allocation in the current heap. Intended only for testing
- // serialization and deserialization in the current address space.
- void InitCurrentHeap(AllocationSpace space);
+ static void* Value(int v) {
+ return reinterpret_cast<void*>(v);
+ }
- // Returns the RelativeAddress where the next
- // object of 'size' bytes will be allocated, and updates 'this' to
- // point to the next free address beyond that object.
- RelativeAddress Allocate(int size, GCTreatment special_gc_treatment);
+ static void EnsureMapExists() {
+ if (serialization_map_ == NULL) {
+ serialization_map_ = new HashMap(&SerializationMatchFun);
+ }
+ }
- private:
- RelativeAddress current_;
+ static HashMap* serialization_map_;
};
-void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
- switch (space) {
- case MAP_SPACE:
- case CELL_SPACE:
- case OLD_POINTER_SPACE:
- case OLD_DATA_SPACE:
- case CODE_SPACE:
- current_ = RelativeAddress(space, 0, Page::kObjectStartOffset);
- break;
- case NEW_SPACE:
- case LO_SPACE:
- current_ = RelativeAddress(space, 0, 0);
- break;
- }
-}
-
+HashMap* SerializationAddressMapper::serialization_map_ = NULL;
-void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
- switch (space) {
- case MAP_SPACE:
- case CELL_SPACE:
- case OLD_POINTER_SPACE:
- case OLD_DATA_SPACE:
- case CODE_SPACE: {
- PagedSpace* ps;
- if (space == MAP_SPACE) {
- ps = Heap::map_space();
- } else if (space == CELL_SPACE) {
- ps = Heap::cell_space();
- } else if (space == OLD_POINTER_SPACE) {
- ps = Heap::old_pointer_space();
- } else if (space == OLD_DATA_SPACE) {
- ps = Heap::old_data_space();
- } else {
- ASSERT(space == CODE_SPACE);
- ps = Heap::code_space();
- }
- Address top = ps->top();
- Page* top_page = Page::FromAllocationTop(top);
- int page_index = 0;
- PageIterator it(ps, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- if (it.next() == top_page) break;
- page_index++;
- }
- current_ = RelativeAddress(space,
- page_index,
- top_page->Offset(top));
- break;
- }
- case NEW_SPACE:
- current_ = RelativeAddress(space,
- 0,
- Heap::NewSpaceTop() - Heap::NewSpaceStart());
- break;
- case LO_SPACE:
- int page_index = 0;
- for (LargeObjectIterator it(Heap::lo_space()); it.has_next(); it.next()) {
- page_index++;
- }
- current_ = RelativeAddress(space, page_index, 0);
- break;
- }
-}
-RelativeAddress SimulatedHeapSpace::Allocate(int size,
- GCTreatment special_gc_treatment) {
-#ifdef DEBUG
- current_.Verify();
-#endif
- int alloc_size = OBJECT_SIZE_ALIGN(size);
- if (current_.in_paged_space() &&
- current_.page_offset() + alloc_size > Page::kPageSize) {
- ASSERT(alloc_size <= Page::kMaxHeapObjectSize);
- current_.next_page(Page::kObjectStartOffset);
- }
- RelativeAddress result = current_;
- if (current_.space() == LO_SPACE) {
- current_.next_page();
- if (special_gc_treatment == CodeObject) {
- result.set_to_large_code_object();
- } else if (special_gc_treatment == PointerObject) {
- result.set_to_large_fixed_array();
- }
- } else {
- current_.next_address(alloc_size);
- }
-#ifdef DEBUG
- current_.Verify();
- result.Verify();
-#endif
- return result;
-}
// -----------------------------------------------------------------------------
// Coding of external references.
@@ -489,12 +210,12 @@ void ExternalReferenceTable::Add(Address address,
TypeCode type,
uint16_t id,
const char* name) {
- CHECK_NE(NULL, address);
+ ASSERT_NE(NULL, address);
ExternalReferenceEntry entry;
entry.address = address;
entry.code = EncodeExternal(type, id);
entry.name = name;
- CHECK_NE(0, entry.code);
+ ASSERT_NE(0, entry.code);
refs_.Add(entry);
if (id > max_id_[type]) max_id_[type] = id;
}
@@ -575,7 +296,7 @@ void ExternalReferenceTable::PopulateTable() {
Debug::k_debug_break_return_address << kDebugIdShift,
"Debug::debug_break_return_address()");
const char* debug_register_format = "Debug::register_address(%i)";
- size_t dr_format_length = strlen(debug_register_format);
+ int dr_format_length = StrLength(debug_register_format);
for (int i = 0; i < kNumJSCallerSaved; ++i) {
Vector<char> name = Vector<char>::New(dr_format_length + 1);
OS::SNPrintF(name, debug_register_format, i);
@@ -623,11 +344,11 @@ void ExternalReferenceTable::PopulateTable() {
#undef C
};
- size_t top_format_length = strlen(top_address_format) - 2;
+ int top_format_length = StrLength(top_address_format) - 2;
for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
const char* address_name = AddressNames[i];
Vector<char> name =
- Vector<char>::New(top_format_length + strlen(address_name) + 1);
+ Vector<char>::New(top_format_length + StrLength(address_name) + 1);
const char* chars = name.start();
OS::SNPrintF(name, top_address_format, address_name);
Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
@@ -688,76 +409,80 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
3,
"Heap::roots_address()");
- Add(ExternalReference::address_of_stack_guard_limit().address(),
+ Add(ExternalReference::address_of_stack_limit().address(),
UNCLASSIFIED,
4,
"StackGuard::address_of_jslimit()");
- Add(ExternalReference::address_of_regexp_stack_limit().address(),
+ Add(ExternalReference::address_of_real_stack_limit().address(),
UNCLASSIFIED,
5,
+ "StackGuard::address_of_real_jslimit()");
+ Add(ExternalReference::address_of_regexp_stack_limit().address(),
+ UNCLASSIFIED,
+ 6,
"RegExpStack::limit_address()");
Add(ExternalReference::new_space_start().address(),
UNCLASSIFIED,
- 6,
+ 7,
"Heap::NewSpaceStart()");
Add(ExternalReference::heap_always_allocate_scope_depth().address(),
UNCLASSIFIED,
- 7,
+ 8,
"Heap::always_allocate_scope_depth()");
Add(ExternalReference::new_space_allocation_limit_address().address(),
UNCLASSIFIED,
- 8,
+ 9,
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address().address(),
UNCLASSIFIED,
- 9,
+ 10,
"Heap::NewSpaceAllocationTopAddress()");
#ifdef ENABLE_DEBUGGER_SUPPORT
Add(ExternalReference::debug_break().address(),
UNCLASSIFIED,
- 10,
+ 11,
"Debug::Break()");
Add(ExternalReference::debug_step_in_fp_address().address(),
UNCLASSIFIED,
- 11,
+ 12,
"Debug::step_in_fp_addr()");
#endif
Add(ExternalReference::double_fp_operation(Token::ADD).address(),
UNCLASSIFIED,
- 12,
+ 13,
"add_two_doubles");
Add(ExternalReference::double_fp_operation(Token::SUB).address(),
UNCLASSIFIED,
- 13,
+ 14,
"sub_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MUL).address(),
UNCLASSIFIED,
- 14,
+ 15,
"mul_two_doubles");
Add(ExternalReference::double_fp_operation(Token::DIV).address(),
UNCLASSIFIED,
- 15,
+ 16,
"div_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MOD).address(),
UNCLASSIFIED,
- 16,
+ 17,
"mod_two_doubles");
Add(ExternalReference::compare_doubles().address(),
UNCLASSIFIED,
- 17,
+ 18,
"compare_doubles");
#ifdef V8_NATIVE_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
UNCLASSIFIED,
- 18,
+ 19,
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
Add(ExternalReference::re_check_stack_guard_state().address(),
UNCLASSIFIED,
- 19,
+ 20,
"RegExpMacroAssembler*::CheckStackGuardState()");
Add(ExternalReference::re_grow_stack().address(),
UNCLASSIFIED,
- 20,
+ 21,
"NativeRegExpMacroAssembler::GrowStack()");
#endif
}
@@ -823,264 +548,372 @@ ExternalReferenceDecoder::~ExternalReferenceDecoder() {
}
-//------------------------------------------------------------------------------
-// Implementation of Serializer
-
-
-// Helper class to write the bytes of the serialized heap.
-
-class SnapshotWriter {
- public:
- SnapshotWriter() {
- len_ = 0;
- max_ = 8 << 10; // 8K initial size
- str_ = NewArray<byte>(max_);
- }
-
- ~SnapshotWriter() {
- DeleteArray(str_);
- }
-
- void GetBytes(byte** str, int* len) {
- *str = NewArray<byte>(len_);
- memcpy(*str, str_, len_);
- *len = len_;
- }
-
- void Reserve(int bytes, int pos);
-
- void PutC(char c) {
- InsertC(c, len_);
- }
-
- void PutInt(int i) {
- InsertInt(i, len_);
- }
-
- void PutAddress(Address p) {
- PutBytes(reinterpret_cast<byte*>(&p), sizeof(p));
- }
-
- void PutBytes(const byte* a, int size) {
- InsertBytes(a, len_, size);
- }
-
- void PutString(const char* s) {
- InsertString(s, len_);
- }
-
- int InsertC(char c, int pos) {
- Reserve(1, pos);
- str_[pos] = c;
- len_++;
- return pos + 1;
- }
-
- int InsertInt(int i, int pos) {
- return InsertBytes(reinterpret_cast<byte*>(&i), pos, sizeof(i));
- }
-
- int InsertBytes(const byte* a, int pos, int size) {
- Reserve(size, pos);
- memcpy(&str_[pos], a, size);
- len_ += size;
- return pos + size;
- }
-
- int InsertString(const char* s, int pos);
-
- int length() { return len_; }
-
- Address position() { return reinterpret_cast<Address>(&str_[len_]); }
-
- private:
- byte* str_; // the snapshot
- int len_; // the current length of str_
- int max_; // the allocated size of str_
-};
+bool Serializer::serialization_enabled_ = false;
+bool Serializer::too_late_to_enable_now_ = false;
-void SnapshotWriter::Reserve(int bytes, int pos) {
- CHECK(0 <= pos && pos <= len_);
- while (len_ + bytes >= max_) {
- max_ *= 2;
- byte* old = str_;
- str_ = NewArray<byte>(max_);
- memcpy(str_, old, len_);
- DeleteArray(old);
- }
- if (pos < len_) {
- byte* old = str_;
- str_ = NewArray<byte>(max_);
- memcpy(str_, old, pos);
- memcpy(str_ + pos + bytes, old + pos, len_ - pos);
- DeleteArray(old);
- }
+Deserializer::Deserializer(SnapshotByteSource* source)
+ : source_(source),
+ external_reference_decoder_(NULL) {
}
-int SnapshotWriter::InsertString(const char* s, int pos) {
- int size = strlen(s);
- pos = InsertC('[', pos);
- pos = InsertInt(size, pos);
- pos = InsertC(']', pos);
- return InsertBytes(reinterpret_cast<const byte*>(s), pos, size);
-}
-
-
-class ReferenceUpdater: public ObjectVisitor {
- public:
- ReferenceUpdater(HeapObject* obj, Serializer* serializer)
- : obj_address_(obj->address()),
- serializer_(serializer),
- reference_encoder_(serializer->reference_encoder_),
- offsets_(8),
- addresses_(8),
- offsets_32_bit_(0),
- data_32_bit_(0) {
- }
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; ++p) {
- if ((*p)->IsHeapObject()) {
- offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
- Address a = serializer_->GetSavedAddress(HeapObject::cast(*p));
- addresses_.Add(a);
- }
+// This routine both allocates a new object, and also keeps
+// track of where objects have been allocated so that we can
+// fix back references when deserializing.
+Address Deserializer::Allocate(int space_index, Space* space, int size) {
+ Address address;
+ if (!SpaceIsLarge(space_index)) {
+ ASSERT(!SpaceIsPaged(space_index) ||
+ size <= Page::kPageSize - Page::kObjectStartOffset);
+ Object* new_allocation;
+ if (space_index == NEW_SPACE) {
+ new_allocation = reinterpret_cast<NewSpace*>(space)->AllocateRaw(size);
+ } else {
+ new_allocation = reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
+ }
+ HeapObject* new_object = HeapObject::cast(new_allocation);
+ ASSERT(!new_object->IsFailure());
+ address = new_object->address();
+ high_water_[space_index] = address + size;
+ } else {
+ ASSERT(SpaceIsLarge(space_index));
+ ASSERT(size > Page::kPageSize - Page::kObjectStartOffset);
+ LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
+ Object* new_allocation;
+ if (space_index == kLargeData) {
+ new_allocation = lo_space->AllocateRaw(size);
+ } else if (space_index == kLargeFixedArray) {
+ new_allocation = lo_space->AllocateRawFixedArray(size);
+ } else {
+ ASSERT_EQ(kLargeCode, space_index);
+ new_allocation = lo_space->AllocateRawCode(size);
}
+ ASSERT(!new_allocation->IsFailure());
+ HeapObject* new_object = HeapObject::cast(new_allocation);
+ // Record all large objects in the same space.
+ address = new_object->address();
+ high_water_[LO_SPACE] = address + size;
}
+ last_object_address_ = address;
+ return address;
+}
- virtual void VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Address encoded_target = serializer_->GetSavedAddress(target);
- // All calls and jumps are to code objects that encode into 32 bits.
- offsets_32_bit_.Add(rinfo->target_address_address() - obj_address_);
- uint32_t small_target =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(encoded_target));
- ASSERT(reinterpret_cast<uintptr_t>(encoded_target) == small_target);
- data_32_bit_.Add(small_target);
- }
+// This returns the address of an object that has been described in the
+// snapshot as being offset bytes back in a particular space.
+HeapObject* Deserializer::GetAddressFromEnd(int space) {
+ int offset = source_->GetInt();
+ ASSERT(!SpaceIsLarge(space));
+ offset <<= kObjectAlignmentBits;
+ return HeapObject::FromAddress(high_water_[space] - offset);
+}
- virtual void VisitExternalReferences(Address* start, Address* end) {
- for (Address* p = start; p < end; ++p) {
- uint32_t code = reference_encoder_->Encode(*p);
- CHECK(*p == NULL ? code == 0 : code != 0);
- offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
- addresses_.Add(reinterpret_cast<Address>(code));
- }
- }
- virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
- Address target = rinfo->target_address();
- uint32_t encoding = reference_encoder_->Encode(target);
- CHECK(target == NULL ? encoding == 0 : encoding != 0);
- offsets_.Add(rinfo->target_address_address() - obj_address_);
- addresses_.Add(reinterpret_cast<Address>(encoding));
+// This returns the address of an object that has been described in the
+// snapshot as being offset bytes into a particular space.
+HeapObject* Deserializer::GetAddressFromStart(int space) {
+ int offset = source_->GetInt();
+ if (SpaceIsLarge(space)) {
+ // Large spaces have one object per 'page'.
+ return HeapObject::FromAddress(pages_[LO_SPACE][offset]);
}
-
- void Update(Address start_address) {
- for (int i = 0; i < offsets_.length(); i++) {
- memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address));
- }
- for (int i = 0; i < offsets_32_bit_.length(); i++) {
- memcpy(start_address + offsets_32_bit_[i], &data_32_bit_[i],
- sizeof(uint32_t));
- }
+ offset <<= kObjectAlignmentBits;
+ if (space == NEW_SPACE) {
+ // New space has only one space - numbered 0.
+ return HeapObject::FromAddress(pages_[space][0] + offset);
}
-
- private:
- Address obj_address_;
- Serializer* serializer_;
- ExternalReferenceEncoder* reference_encoder_;
- List<int> offsets_;
- List<Address> addresses_;
- // Some updates are 32-bit even on a 64-bit platform.
- // We keep a separate list of them on 64-bit platforms.
- List<int> offsets_32_bit_;
- List<uint32_t> data_32_bit_;
-};
-
-
-// Helper functions for a map of encoded heap object addresses.
-static uint32_t HeapObjectHash(HeapObject* key) {
- uint32_t low32bits = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key));
- return low32bits >> 2;
+ ASSERT(SpaceIsPaged(space));
+ int page_of_pointee = offset >> Page::kPageSizeBits;
+ Address object_address = pages_[space][page_of_pointee] +
+ (offset & Page::kPageAlignmentMask);
+ return HeapObject::FromAddress(object_address);
}
-static bool MatchHeapObject(void* key1, void* key2) {
- return key1 == key2;
+void Deserializer::Deserialize() {
+ // Don't GC while deserializing - just expand the heap.
+ AlwaysAllocateScope always_allocate;
+ // Don't use the free lists while deserializing.
+ LinearAllocationScope allocate_linearly;
+ // No active threads.
+ ASSERT_EQ(NULL, ThreadState::FirstInUse());
+ // No active handles.
+ ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
+ ASSERT_EQ(NULL, external_reference_decoder_);
+ external_reference_decoder_ = new ExternalReferenceDecoder();
+ Heap::IterateRoots(this, VISIT_ONLY_STRONG);
+ ASSERT(source_->AtEOF());
+ delete external_reference_decoder_;
+ external_reference_decoder_ = NULL;
}
-Serializer::Serializer()
- : global_handles_(4),
- saved_addresses_(MatchHeapObject) {
- root_ = true;
- roots_ = 0;
- objects_ = 0;
- reference_encoder_ = NULL;
- writer_ = new SnapshotWriter();
- for (int i = 0; i <= LAST_SPACE; i++) {
- allocator_[i] = new SimulatedHeapSpace();
+// This is called on the roots. It is the driver of the deserialization
+// process. It is also called on the body of each function.
+void Deserializer::VisitPointers(Object** start, Object** end) {
+ // The space must be new space. Any other space would cause ReadChunk to try
+ // to update the remembered using NULL as the address.
+ ReadChunk(start, end, NEW_SPACE, NULL);
+}
+
+
+// This routine writes the new object into the pointer provided and then
+// returns true if the new object was in young space and false otherwise.
+// The reason for this strange interface is that otherwise the object is
+// written very late, which means the ByteArray map is not set up by the
+// time we need to use it to mark the space at the end of a page free (by
+// making it into a byte array).
+void Deserializer::ReadObject(int space_number,
+ Space* space,
+ Object** write_back) {
+ int size = source_->GetInt() << kObjectAlignmentBits;
+ Address address = Allocate(space_number, space, size);
+ *write_back = HeapObject::FromAddress(address);
+ Object** current = reinterpret_cast<Object**>(address);
+ Object** limit = current + (size >> kPointerSizeLog2);
+ ReadChunk(current, limit, space_number, address);
+}
+
+
+#define ONE_CASE_PER_SPACE(base_tag) \
+ case (base_tag) + NEW_SPACE: /* NOLINT */ \
+ case (base_tag) + OLD_POINTER_SPACE: /* NOLINT */ \
+ case (base_tag) + OLD_DATA_SPACE: /* NOLINT */ \
+ case (base_tag) + CODE_SPACE: /* NOLINT */ \
+ case (base_tag) + MAP_SPACE: /* NOLINT */ \
+ case (base_tag) + CELL_SPACE: /* NOLINT */ \
+ case (base_tag) + kLargeData: /* NOLINT */ \
+ case (base_tag) + kLargeCode: /* NOLINT */ \
+ case (base_tag) + kLargeFixedArray: /* NOLINT */
+
+
+void Deserializer::ReadChunk(Object** current,
+ Object** limit,
+ int space,
+ Address address) {
+ while (current < limit) {
+ int data = source_->Get();
+ switch (data) {
+#define RAW_CASE(index, size) \
+ case RAW_DATA_SERIALIZATION + index: { \
+ byte* raw_data_out = reinterpret_cast<byte*>(current); \
+ source_->CopyRaw(raw_data_out, size); \
+ current = reinterpret_cast<Object**>(raw_data_out + size); \
+ break; \
+ }
+ COMMON_RAW_LENGTHS(RAW_CASE)
+#undef RAW_CASE
+ case RAW_DATA_SERIALIZATION: {
+ int size = source_->GetInt();
+ byte* raw_data_out = reinterpret_cast<byte*>(current);
+ source_->CopyRaw(raw_data_out, size);
+ current = reinterpret_cast<Object**>(raw_data_out + size);
+ break;
+ }
+ case OBJECT_SERIALIZATION + NEW_SPACE: {
+ ReadObject(NEW_SPACE, Heap::new_space(), current);
+ if (space != NEW_SPACE) {
+ Heap::RecordWrite(address, static_cast<int>(
+ reinterpret_cast<Address>(current) - address));
+ }
+ current++;
+ break;
+ }
+ case OBJECT_SERIALIZATION + OLD_DATA_SPACE:
+ ReadObject(OLD_DATA_SPACE, Heap::old_data_space(), current++);
+ break;
+ case OBJECT_SERIALIZATION + OLD_POINTER_SPACE:
+ ReadObject(OLD_POINTER_SPACE, Heap::old_pointer_space(), current++);
+ break;
+ case OBJECT_SERIALIZATION + MAP_SPACE:
+ ReadObject(MAP_SPACE, Heap::map_space(), current++);
+ break;
+ case OBJECT_SERIALIZATION + CODE_SPACE:
+ ReadObject(CODE_SPACE, Heap::code_space(), current++);
+ LOG(LogCodeObject(current[-1]));
+ break;
+ case OBJECT_SERIALIZATION + CELL_SPACE:
+ ReadObject(CELL_SPACE, Heap::cell_space(), current++);
+ break;
+ case OBJECT_SERIALIZATION + kLargeData:
+ ReadObject(kLargeData, Heap::lo_space(), current++);
+ break;
+ case OBJECT_SERIALIZATION + kLargeCode:
+ ReadObject(kLargeCode, Heap::lo_space(), current++);
+ LOG(LogCodeObject(current[-1]));
+ break;
+ case OBJECT_SERIALIZATION + kLargeFixedArray:
+ ReadObject(kLargeFixedArray, Heap::lo_space(), current++);
+ break;
+ case CODE_OBJECT_SERIALIZATION + kLargeCode: {
+ Object* new_code_object = NULL;
+ ReadObject(kLargeCode, Heap::lo_space(), &new_code_object);
+ Code* code_object = reinterpret_cast<Code*>(new_code_object);
+ LOG(LogCodeObject(code_object));
+ // Setting a branch/call to another code object from code.
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::set_target_at(location_of_branch_data,
+ code_object->instruction_start());
+ location_of_branch_data += Assembler::kCallTargetSize;
+ current = reinterpret_cast<Object**>(location_of_branch_data);
+ break;
+ }
+ case CODE_OBJECT_SERIALIZATION + CODE_SPACE: {
+ Object* new_code_object = NULL;
+ ReadObject(CODE_SPACE, Heap::code_space(), &new_code_object);
+ Code* code_object = reinterpret_cast<Code*>(new_code_object);
+ LOG(LogCodeObject(code_object));
+ // Setting a branch/call to another code object from code.
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::set_target_at(location_of_branch_data,
+ code_object->instruction_start());
+ location_of_branch_data += Assembler::kCallTargetSize;
+ current = reinterpret_cast<Object**>(location_of_branch_data);
+ break;
+ }
+ ONE_CASE_PER_SPACE(BACKREF_SERIALIZATION) {
+ // Write a backreference to an object we unpacked earlier.
+ int backref_space = (data & kSpaceMask);
+ if (backref_space == NEW_SPACE && space != NEW_SPACE) {
+ Heap::RecordWrite(address, static_cast<int>(
+ reinterpret_cast<Address>(current) - address));
+ }
+ *current++ = GetAddressFromEnd(backref_space);
+ break;
+ }
+ ONE_CASE_PER_SPACE(REFERENCE_SERIALIZATION) {
+ // Write a reference to an object we unpacked earlier.
+ int reference_space = (data & kSpaceMask);
+ if (reference_space == NEW_SPACE && space != NEW_SPACE) {
+ Heap::RecordWrite(address, static_cast<int>(
+ reinterpret_cast<Address>(current) - address));
+ }
+ *current++ = GetAddressFromStart(reference_space);
+ break;
+ }
+#define COMMON_REFS_CASE(index, reference_space, address) \
+ case REFERENCE_SERIALIZATION + index: { \
+ ASSERT(SpaceIsPaged(reference_space)); \
+ Address object_address = \
+ pages_[reference_space][0] + (address << kObjectAlignmentBits); \
+ *current++ = HeapObject::FromAddress(object_address); \
+ break; \
+ }
+ COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+#undef COMMON_REFS_CASE
+ ONE_CASE_PER_SPACE(CODE_BACKREF_SERIALIZATION) {
+ int backref_space = (data & kSpaceMask);
+ // Can't use Code::cast because heap is not set up yet and assertions
+ // will fail.
+ Code* code_object =
+ reinterpret_cast<Code*>(GetAddressFromEnd(backref_space));
+ // Setting a branch/call to previously decoded code object from code.
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::set_target_at(location_of_branch_data,
+ code_object->instruction_start());
+ location_of_branch_data += Assembler::kCallTargetSize;
+ current = reinterpret_cast<Object**>(location_of_branch_data);
+ break;
+ }
+ ONE_CASE_PER_SPACE(CODE_REFERENCE_SERIALIZATION) {
+ int backref_space = (data & kSpaceMask);
+ // Can't use Code::cast because heap is not set up yet and assertions
+ // will fail.
+ Code* code_object =
+ reinterpret_cast<Code*>(GetAddressFromStart(backref_space));
+ // Setting a branch/call to previously decoded code object from code.
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::set_target_at(location_of_branch_data,
+ code_object->instruction_start());
+ location_of_branch_data += Assembler::kCallTargetSize;
+ current = reinterpret_cast<Object**>(location_of_branch_data);
+ break;
+ }
+ case EXTERNAL_REFERENCE_SERIALIZATION: {
+ int reference_id = source_->GetInt();
+ Address address = external_reference_decoder_->Decode(reference_id);
+ *current++ = reinterpret_cast<Object*>(address);
+ break;
+ }
+ case EXTERNAL_BRANCH_TARGET_SERIALIZATION: {
+ int reference_id = source_->GetInt();
+ Address address = external_reference_decoder_->Decode(reference_id);
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::set_external_target_at(location_of_branch_data, address);
+ location_of_branch_data += Assembler::kExternalTargetSize;
+ current = reinterpret_cast<Object**>(location_of_branch_data);
+ break;
+ }
+ case START_NEW_PAGE_SERIALIZATION: {
+ int space = source_->Get();
+ pages_[space].Add(last_object_address_);
+ break;
+ }
+ case NATIVES_STRING_RESOURCE: {
+ int index = source_->Get();
+ Vector<const char> source_vector = Natives::GetScriptSource(index);
+ NativesExternalStringResource* resource =
+ new NativesExternalStringResource(source_vector.start());
+ *current++ = reinterpret_cast<Object*>(resource);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
+ ASSERT_EQ(current, limit);
}
-Serializer::~Serializer() {
- for (int i = 0; i <= LAST_SPACE; i++) {
- delete allocator_[i];
+void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
+ const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7;
+ for (int shift = max_shift; shift > 0; shift -= 7) {
+ if (integer >= static_cast<uintptr_t>(1u) << shift) {
+ Put(((integer >> shift) & 0x7f) | 0x80, "IntPart");
+ }
}
- if (reference_encoder_) delete reference_encoder_;
- delete writer_;
+ PutSection(integer & 0x7f, "IntLastPart");
}
-
-bool Serializer::serialization_enabled_ = false;
-
-
#ifdef DEBUG
-static const int kMaxTagLength = 32;
-void Serializer::Synchronize(const char* tag) {
+void Deserializer::Synchronize(const char* tag) {
+ int data = source_->Get();
+ // If this assert fails then that indicates that you have a mismatch between
+ // the number of GC roots when serializing and deserializing.
+ ASSERT_EQ(SYNCHRONIZE, data);
+ do {
+ int character = source_->Get();
+ if (character == 0) break;
+ if (FLAG_debug_serialization) {
+ PrintF("%c", character);
+ }
+ } while (true);
if (FLAG_debug_serialization) {
- int length = strlen(tag);
- ASSERT(length <= kMaxTagLength);
- writer_->PutC('S');
- writer_->PutInt(length);
- writer_->PutBytes(reinterpret_cast<const byte*>(tag), length);
+ PrintF("\n");
}
}
-#endif
-void Serializer::InitializeAllocators() {
- for (int i = 0; i <= LAST_SPACE; i++) {
- allocator_[i]->InitEmptyHeap(static_cast<AllocationSpace>(i));
- }
-}
-
-
-bool Serializer::IsVisited(HeapObject* obj) {
- HashMap::Entry* entry =
- saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
- return entry != NULL;
-}
-
-
-Address Serializer::GetSavedAddress(HeapObject* obj) {
- HashMap::Entry* entry =
- saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
- ASSERT(entry != NULL);
- return reinterpret_cast<Address>(entry->value);
+void Serializer::Synchronize(const char* tag) {
+ sink_->Put(SYNCHRONIZE, tag);
+ int character;
+ do {
+ character = *tag++;
+ sink_->PutSection(character, "TagCharacter");
+ } while (character != 0);
}
+#endif
-void Serializer::SaveAddress(HeapObject* obj, Address addr) {
- HashMap::Entry* entry =
- saved_addresses_.Lookup(obj, HeapObjectHash(obj), true);
- entry->value = addr;
+Serializer::Serializer(SnapshotByteSink* sink)
+ : sink_(sink),
+ current_root_index_(0),
+ external_reference_encoder_(NULL) {
+ for (int i = 0; i <= LAST_SPACE; i++) {
+ fullness_[i] = 0;
+ }
}
@@ -1090,653 +923,299 @@ void Serializer::Serialize() {
// No active or weak handles.
CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
- // We need a counter function during serialization to resolve the
- // references to counters in the code on the heap.
- CHECK(StatsTable::HasCounterFunction());
- CHECK(enabled());
- InitializeAllocators();
- reference_encoder_ = new ExternalReferenceEncoder();
- PutHeader();
- Heap::IterateRoots(this);
- PutLog();
- PutContextStack();
- Disable();
-}
-
-
-void Serializer::Finalize(byte** str, int* len) {
- writer_->GetBytes(str, len);
+ CHECK_EQ(NULL, external_reference_encoder_);
+ // We don't support serializing installed extensions.
+ for (RegisteredExtension* ext = RegisteredExtension::first_extension();
+ ext != NULL;
+ ext = ext->next()) {
+ CHECK_NE(v8::INSTALLED, ext->state());
+ }
+ external_reference_encoder_ = new ExternalReferenceEncoder();
+ Heap::IterateRoots(this, VISIT_ONLY_STRONG);
+ delete external_reference_encoder_;
+ external_reference_encoder_ = NULL;
+ SerializationAddressMapper::Zap();
}
-// Serialize objects by writing them into the stream.
-
void Serializer::VisitPointers(Object** start, Object** end) {
- bool root = root_;
- root_ = false;
- for (Object** p = start; p < end; ++p) {
- bool serialized;
- Address a = Encode(*p, &serialized);
- if (root) {
- roots_++;
- // If the object was not just serialized,
- // write its encoded address instead.
- if (!serialized) PutEncodedAddress(a);
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsSmi()) {
+ sink_->Put(RAW_DATA_SERIALIZATION, "RawData");
+ sink_->PutInt(kPointerSize, "length");
+ for (int i = 0; i < kPointerSize; i++) {
+ sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
+ }
+ } else {
+ SerializeObject(*current, TAGGED_REPRESENTATION);
}
}
- root_ = root;
-}
-
-
-void Serializer::VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- bool serialized;
- Encode(target, &serialized);
}
-class GlobalHandlesRetriever: public ObjectVisitor {
- public:
- explicit GlobalHandlesRetriever(List<Object**>* handles)
- : global_handles_(handles) {}
-
- virtual void VisitPointers(Object** start, Object** end) {
- for (; start != end; ++start) {
- global_handles_->Add(start);
+void Serializer::SerializeObject(
+ Object* o,
+ ReferenceRepresentation reference_representation) {
+ CHECK(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+ if (SerializationAddressMapper::IsMapped(heap_object)) {
+ int space = SpaceOfAlreadySerializedObject(heap_object);
+ int address = SerializationAddressMapper::MappedTo(heap_object);
+ int offset = CurrentAllocationAddress(space) - address;
+ bool from_start = true;
+ if (SpaceIsPaged(space)) {
+ if ((CurrentAllocationAddress(space) >> Page::kPageSizeBits) ==
+ (address >> Page::kPageSizeBits)) {
+ from_start = false;
+ address = offset;
+ }
+ } else if (space == NEW_SPACE) {
+ if (offset < address) {
+ from_start = false;
+ address = offset;
+ }
}
- }
-
- private:
- List<Object**>* global_handles_;
-};
-
-
-void Serializer::PutFlags() {
- writer_->PutC('F');
- List<const char*>* argv = FlagList::argv();
- writer_->PutInt(argv->length());
- writer_->PutC('[');
- for (int i = 0; i < argv->length(); i++) {
- if (i > 0) writer_->PutC('|');
- writer_->PutString((*argv)[i]);
- DeleteArray((*argv)[i]);
- }
- writer_->PutC(']');
- flags_end_ = writer_->length();
- delete argv;
-}
-
-
-void Serializer::PutHeader() {
- PutFlags();
- writer_->PutC('D');
-#ifdef DEBUG
- writer_->PutC(FLAG_debug_serialization ? '1' : '0');
-#else
- writer_->PutC('0');
-#endif
-#ifdef V8_NATIVE_REGEXP
- writer_->PutC('N');
-#else // Interpreted regexp
- writer_->PutC('I');
-#endif
- // Write sizes of paged memory spaces. Allocate extra space for the old
- // and code spaces, because objects in new space will be promoted to them.
- writer_->PutC('S');
- writer_->PutC('[');
- writer_->PutInt(Heap::old_pointer_space()->Size() +
- Heap::new_space()->Size());
- writer_->PutC('|');
- writer_->PutInt(Heap::old_data_space()->Size() + Heap::new_space()->Size());
- writer_->PutC('|');
- writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
- writer_->PutC('|');
- writer_->PutInt(Heap::map_space()->Size());
- writer_->PutC('|');
- writer_->PutInt(Heap::cell_space()->Size());
- writer_->PutC(']');
- // Write global handles.
- writer_->PutC('G');
- writer_->PutC('[');
- GlobalHandlesRetriever ghr(&global_handles_);
- GlobalHandles::IterateRoots(&ghr);
- for (int i = 0; i < global_handles_.length(); i++) {
- writer_->PutC('N');
- }
- writer_->PutC(']');
-}
-
-
-void Serializer::PutLog() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log_code) {
- Logger::TearDown();
- int pos = writer_->InsertC('L', flags_end_);
- bool exists;
- Vector<const char> log = ReadFile(FLAG_logfile, &exists);
- writer_->InsertString(log.start(), pos);
- log.Dispose();
- }
-#endif
-}
-
-
-static int IndexOf(const List<Object**>& list, Object** element) {
- for (int i = 0; i < list.length(); i++) {
- if (list[i] == element) return i;
- }
- return -1;
-}
-
-
-void Serializer::PutGlobalHandleStack(const List<Handle<Object> >& stack) {
- writer_->PutC('[');
- writer_->PutInt(stack.length());
- for (int i = stack.length() - 1; i >= 0; i--) {
- writer_->PutC('|');
- int gh_index = IndexOf(global_handles_, stack[i].location());
- CHECK_GE(gh_index, 0);
- writer_->PutInt(gh_index);
- }
- writer_->PutC(']');
-}
-
-
-void Serializer::PutContextStack() {
- List<Context*> contexts(2);
- while (HandleScopeImplementer::instance()->HasSavedContexts()) {
- Context* context =
- HandleScopeImplementer::instance()->RestoreContext();
- contexts.Add(context);
- }
- for (int i = contexts.length() - 1; i >= 0; i--) {
- HandleScopeImplementer::instance()->SaveContext(contexts[i]);
- }
- writer_->PutC('C');
- writer_->PutC('[');
- writer_->PutInt(contexts.length());
- if (!contexts.is_empty()) {
- Object** start = reinterpret_cast<Object**>(&contexts.first());
- VisitPointers(start, start + contexts.length());
- }
- writer_->PutC(']');
-}
-
-void Serializer::PutEncodedAddress(Address addr) {
- writer_->PutC('P');
- writer_->PutAddress(addr);
-}
-
-
-Address Serializer::Encode(Object* o, bool* serialized) {
- *serialized = false;
- if (o->IsSmi()) {
- return reinterpret_cast<Address>(o);
- } else {
- HeapObject* obj = HeapObject::cast(o);
- if (IsVisited(obj)) {
- return GetSavedAddress(obj);
+ // If we are actually dealing with real offsets (and not a numbering of
+ // all objects) then we should shift out the bits that are always 0.
+ if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
+ if (reference_representation == CODE_TARGET_REPRESENTATION) {
+ if (from_start) {
+ sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
+ sink_->PutInt(address, "address");
+ } else {
+ sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
+ sink_->PutInt(address, "address");
+ }
} else {
- // First visit: serialize the object.
- *serialized = true;
- return PutObject(obj);
+ CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
+ if (from_start) {
+#define COMMON_REFS_CASE(tag, common_space, common_offset) \
+ if (space == common_space && address == common_offset) { \
+ sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
+ } else /* NOLINT */
+ COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+#undef COMMON_REFS_CASE
+ { /* NOLINT */
+ sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
+ sink_->PutInt(address, "address");
+ }
+ } else {
+ sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+ sink_->PutInt(address, "address");
+ }
}
+ } else {
+ // Object has not yet been serialized. Serialize it here.
+ ObjectSerializer serializer(this,
+ heap_object,
+ sink_,
+ reference_representation);
+ serializer.Serialize();
}
}
-Address Serializer::PutObject(HeapObject* obj) {
- Map* map = obj->map();
- InstanceType type = map->instance_type();
- int size = obj->SizeFromMap(map);
-
- // Simulate the allocation of obj to predict where it will be
- // allocated during deserialization.
- Address addr = Allocate(obj).Encode();
-
- SaveAddress(obj, addr);
-
- if (type == CODE_TYPE) {
- LOG(CodeMoveEvent(obj->address(), addr));
- }
- // Write out the object prologue: type, size, and simulated address of obj.
- writer_->PutC('[');
- CHECK_EQ(0, static_cast<int>(size & kObjectAlignmentMask));
- writer_->PutInt(type);
- writer_->PutInt(size >> kObjectAlignmentBits);
- PutEncodedAddress(addr); // encodes AllocationSpace
-
- // Visit all the pointers in the object other than the map. This
- // will recursively serialize any as-yet-unvisited objects.
- obj->Iterate(this);
-
- // Mark end of recursively embedded objects, start of object body.
- writer_->PutC('|');
- // Write out the raw contents of the object. No compression, but
- // fast to deserialize.
- writer_->PutBytes(obj->address(), size);
- // Update pointers and external references in the written object.
- ReferenceUpdater updater(obj, this);
- obj->Iterate(&updater);
- updater.Update(writer_->position() - size);
+void Serializer::ObjectSerializer::Serialize() {
+ int space = Serializer::SpaceOfObject(object_);
+ int size = object_->Size();
-#ifdef DEBUG
- if (FLAG_debug_serialization) {
- // Write out the object epilogue to catch synchronization errors.
- PutEncodedAddress(addr);
- writer_->PutC(']');
+ if (reference_representation_ == TAGGED_REPRESENTATION) {
+ sink_->Put(OBJECT_SERIALIZATION + space, "ObjectSerialization");
+ } else {
+ CHECK_EQ(CODE_TARGET_REPRESENTATION, reference_representation_);
+ sink_->Put(CODE_OBJECT_SERIALIZATION + space, "ObjectSerialization");
}
-#endif
-
- objects_++;
- return addr;
-}
+ sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
-
-RelativeAddress Serializer::Allocate(HeapObject* obj) {
- // Find out which AllocationSpace 'obj' is in.
- AllocationSpace s;
- bool found = false;
- for (int i = FIRST_SPACE; !found && i <= LAST_SPACE; i++) {
- s = static_cast<AllocationSpace>(i);
- found = Heap::InSpace(obj, s);
- }
- CHECK(found);
- int size = obj->Size();
- if (s == NEW_SPACE) {
- if (size > Heap::MaxObjectSizeInPagedSpace()) {
- s = LO_SPACE;
- } else {
- OldSpace* space = Heap::TargetSpace(obj);
- ASSERT(space == Heap::old_pointer_space() ||
- space == Heap::old_data_space());
- s = (space == Heap::old_pointer_space()) ?
- OLD_POINTER_SPACE :
- OLD_DATA_SPACE;
- }
+ // Mark this object as already serialized.
+ bool start_new_page;
+ SerializationAddressMapper::Map(
+ object_,
+ serializer_->Allocate(space, size, &start_new_page));
+ if (start_new_page) {
+ sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage");
+ sink_->PutSection(space, "NewPageSpace");
}
- GCTreatment gc_treatment = DataObject;
- if (obj->IsFixedArray()) gc_treatment = PointerObject;
- else if (obj->IsCode()) gc_treatment = CodeObject;
- return allocator_[s]->Allocate(size, gc_treatment);
-}
-
-
-//------------------------------------------------------------------------------
-// Implementation of Deserializer
+ // Serialize the map (first word of the object).
+ serializer_->SerializeObject(object_->map(), TAGGED_REPRESENTATION);
-static const int kInitArraySize = 32;
-
-
-Deserializer::Deserializer(const byte* str, int len)
- : reader_(str, len),
- map_pages_(kInitArraySize),
- cell_pages_(kInitArraySize),
- old_pointer_pages_(kInitArraySize),
- old_data_pages_(kInitArraySize),
- code_pages_(kInitArraySize),
- large_objects_(kInitArraySize),
- global_handles_(4) {
- root_ = true;
- roots_ = 0;
- objects_ = 0;
- reference_decoder_ = NULL;
-#ifdef DEBUG
- expect_debug_information_ = false;
-#endif
+ // Serialize the rest of the object.
+ CHECK_EQ(0, bytes_processed_so_far_);
+ bytes_processed_so_far_ = kPointerSize;
+ object_->IterateBody(object_->map()->instance_type(), size, this);
+ OutputRawData(object_->address() + size);
}
-Deserializer::~Deserializer() {
- if (reference_decoder_) delete reference_decoder_;
-}
-
+void Serializer::ObjectSerializer::VisitPointers(Object** start,
+ Object** end) {
+ Object** current = start;
+ while (current < end) {
+ while (current < end && (*current)->IsSmi()) current++;
+ if (current < end) OutputRawData(reinterpret_cast<Address>(current));
-void Deserializer::ExpectEncodedAddress(Address expected) {
- Address a = GetEncodedAddress();
- USE(a);
- ASSERT(a == expected);
-}
-
-
-#ifdef DEBUG
-void Deserializer::Synchronize(const char* tag) {
- if (expect_debug_information_) {
- char buf[kMaxTagLength];
- reader_.ExpectC('S');
- int length = reader_.GetInt();
- ASSERT(length <= kMaxTagLength);
- reader_.GetBytes(reinterpret_cast<Address>(buf), length);
- ASSERT_EQ(strlen(tag), length);
- ASSERT(strncmp(tag, buf, length) == 0);
- }
-}
-#endif
-
-
-void Deserializer::Deserialize() {
- // No active threads.
- ASSERT_EQ(NULL, ThreadState::FirstInUse());
- // No active handles.
- ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
- reference_decoder_ = new ExternalReferenceDecoder();
- // By setting linear allocation only, we forbid the use of free list
- // allocation which is not predicted by SimulatedAddress.
- GetHeader();
- Heap::IterateRoots(this);
- GetContextStack();
-}
-
-
-void Deserializer::VisitPointers(Object** start, Object** end) {
- bool root = root_;
- root_ = false;
- for (Object** p = start; p < end; ++p) {
- if (root) {
- roots_++;
- // Read the next object or pointer from the stream
- // pointer in the stream.
- int c = reader_.GetC();
- if (c == '[') {
- *p = GetObject(); // embedded object
- } else {
- ASSERT(c == 'P'); // pointer to previously serialized object
- *p = Resolve(reader_.GetAddress());
- }
- } else {
- // A pointer internal to a HeapObject that we've already
- // read: resolve it to a true address (or Smi)
- *p = Resolve(reinterpret_cast<Address>(*p));
+ while (current < end && !(*current)->IsSmi()) {
+ serializer_->SerializeObject(*current, TAGGED_REPRESENTATION);
+ bytes_processed_so_far_ += kPointerSize;
+ current++;
}
}
- root_ = root;
}
-void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- // On all platforms, the encoded code object address is only 32 bits.
- Address encoded_address = reinterpret_cast<Address>(Memory::uint32_at(
- reinterpret_cast<Address>(rinfo->target_object_address())));
- Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
- rinfo->set_target_address(target_object->instruction_start());
-}
-
+void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
+ Address* end) {
+ Address references_start = reinterpret_cast<Address>(start);
+ OutputRawData(references_start);
-void Deserializer::VisitExternalReferences(Address* start, Address* end) {
- for (Address* p = start; p < end; ++p) {
- uint32_t code = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*p));
- *p = reference_decoder_->Decode(code);
+ for (Address* current = start; current < end; current++) {
+ sink_->Put(EXTERNAL_REFERENCE_SERIALIZATION, "ExternalReference");
+ int reference_id = serializer_->EncodeExternalReference(*current);
+ sink_->PutInt(reference_id, "reference id");
}
+ bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize);
}
-void Deserializer::VisitRuntimeEntry(RelocInfo* rinfo) {
- uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->target_address_address());
- uint32_t encoding = *pc;
- Address target = reference_decoder_->Decode(encoding);
- rinfo->set_target_address(target);
+void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
+ Address target_start = rinfo->target_address_address();
+ OutputRawData(target_start);
+ Address target = rinfo->target_address();
+ uint32_t encoding = serializer_->EncodeExternalReference(target);
+ CHECK(target == NULL ? encoding == 0 : encoding != 0);
+ sink_->Put(EXTERNAL_BRANCH_TARGET_SERIALIZATION, "ExternalReference");
+ sink_->PutInt(encoding, "reference id");
+ bytes_processed_so_far_ += Assembler::kExternalTargetSize;
}
-void Deserializer::GetFlags() {
- reader_.ExpectC('F');
- int argc = reader_.GetInt() + 1;
- char** argv = NewArray<char*>(argc);
- reader_.ExpectC('[');
- for (int i = 1; i < argc; i++) {
- if (i > 1) reader_.ExpectC('|');
- argv[i] = reader_.GetString();
- }
- reader_.ExpectC(']');
- has_log_ = false;
- for (int i = 1; i < argc; i++) {
- if (strcmp("--log_code", argv[i]) == 0) {
- has_log_ = true;
- } else if (strcmp("--nouse_ic", argv[i]) == 0) {
- FLAG_use_ic = false;
- } else if (strcmp("--debug_code", argv[i]) == 0) {
- FLAG_debug_code = true;
- } else if (strcmp("--nolazy", argv[i]) == 0) {
- FLAG_lazy = false;
+void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
+ CHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Address target_start = rinfo->target_address_address();
+ OutputRawData(target_start);
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ serializer_->SerializeObject(target, CODE_TARGET_REPRESENTATION);
+ bytes_processed_so_far_ += Assembler::kCallTargetSize;
+}
+
+
+void Serializer::ObjectSerializer::VisitExternalAsciiString(
+ v8::String::ExternalAsciiStringResource** resource_pointer) {
+ Address references_start = reinterpret_cast<Address>(resource_pointer);
+ OutputRawData(references_start);
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ Object* source = Heap::natives_source_cache()->get(i);
+ if (!source->IsUndefined()) {
+ ExternalAsciiString* string = ExternalAsciiString::cast(source);
+ typedef v8::String::ExternalAsciiStringResource Resource;
+ Resource* resource = string->resource();
+ if (resource == *resource_pointer) {
+ sink_->Put(NATIVES_STRING_RESOURCE, "NativesStringResource");
+ sink_->PutSection(i, "NativesStringResourceEnd");
+ bytes_processed_so_far_ += sizeof(resource);
+ return;
+ }
}
- DeleteArray(argv[i]);
}
-
- DeleteArray(argv);
+ // One of the strings in the natives cache should match the resource. We
+ // can't serialize any other kinds of external strings.
+ UNREACHABLE();
}
-void Deserializer::GetLog() {
- if (has_log_) {
- reader_.ExpectC('L');
- char* snapshot_log = reader_.GetString();
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log_code) {
- LOG(Preamble(snapshot_log));
+void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
+ Address object_start = object_->address();
+ int up_to_offset = static_cast<int>(up_to - object_start);
+ int skipped = up_to_offset - bytes_processed_so_far_;
+ // This assert will fail if the reloc info gives us the target_address_address
+ // locations in a non-ascending order. Luckily that doesn't happen.
+ ASSERT(skipped >= 0);
+ if (skipped != 0) {
+ Address base = object_start + bytes_processed_so_far_;
+#define RAW_CASE(index, length) \
+ if (skipped == length) { \
+ sink_->PutSection(RAW_DATA_SERIALIZATION + index, "RawDataFixed"); \
+ } else /* NOLINT */
+ COMMON_RAW_LENGTHS(RAW_CASE)
+#undef RAW_CASE
+ { /* NOLINT */
+ sink_->Put(RAW_DATA_SERIALIZATION, "RawData");
+ sink_->PutInt(skipped, "length");
}
-#endif
- DeleteArray(snapshot_log);
- }
-}
-
-
-static void InitPagedSpace(PagedSpace* space,
- int capacity,
- List<Page*>* page_list) {
- if (!space->EnsureCapacity(capacity)) {
- V8::FatalProcessOutOfMemory("InitPagedSpace");
- }
- PageIterator it(space, PageIterator::ALL_PAGES);
- while (it.has_next()) page_list->Add(it.next());
-}
-
-
-void Deserializer::GetHeader() {
- reader_.ExpectC('D');
-#ifdef DEBUG
- expect_debug_information_ = reader_.GetC() == '1';
-#else
- // In release mode, don't attempt to read a snapshot containing
- // synchronization tags.
- if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags.");
-#endif
-#ifdef V8_NATIVE_REGEXP
- reader_.ExpectC('N');
-#else // Interpreted regexp.
- reader_.ExpectC('I');
-#endif
- // Ensure sufficient capacity in paged memory spaces to avoid growth
- // during deserialization.
- reader_.ExpectC('S');
- reader_.ExpectC('[');
- InitPagedSpace(Heap::old_pointer_space(),
- reader_.GetInt(),
- &old_pointer_pages_);
- reader_.ExpectC('|');
- InitPagedSpace(Heap::old_data_space(), reader_.GetInt(), &old_data_pages_);
- reader_.ExpectC('|');
- InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
- reader_.ExpectC('|');
- InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
- reader_.ExpectC('|');
- InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_);
- reader_.ExpectC(']');
- // Create placeholders for global handles later to be fill during
- // IterateRoots.
- reader_.ExpectC('G');
- reader_.ExpectC('[');
- int c = reader_.GetC();
- while (c != ']') {
- ASSERT(c == 'N');
- global_handles_.Add(GlobalHandles::Create(NULL).location());
- c = reader_.GetC();
- }
-}
-
-
-void Deserializer::GetGlobalHandleStack(List<Handle<Object> >* stack) {
- reader_.ExpectC('[');
- int length = reader_.GetInt();
- for (int i = 0; i < length; i++) {
- reader_.ExpectC('|');
- int gh_index = reader_.GetInt();
- stack->Add(global_handles_[gh_index]);
- }
- reader_.ExpectC(']');
-}
-
-
-void Deserializer::GetContextStack() {
- reader_.ExpectC('C');
- CHECK_EQ(reader_.GetC(), '[');
- int count = reader_.GetInt();
- List<Context*> entered_contexts(count);
- if (count > 0) {
- Object** start = reinterpret_cast<Object**>(&entered_contexts.first());
- VisitPointers(start, start + count);
- }
- reader_.ExpectC(']');
- for (int i = 0; i < count; i++) {
- HandleScopeImplementer::instance()->SaveContext(entered_contexts[i]);
- }
-}
-
-
-Address Deserializer::GetEncodedAddress() {
- reader_.ExpectC('P');
- return reader_.GetAddress();
-}
-
-
-Object* Deserializer::GetObject() {
- // Read the prologue: type, size and encoded address.
- InstanceType type = static_cast<InstanceType>(reader_.GetInt());
- int size = reader_.GetInt() << kObjectAlignmentBits;
- Address a = GetEncodedAddress();
-
- // Get a raw object of the right size in the right space.
- AllocationSpace space = GetSpace(a);
- Object* o;
- if (IsLargeExecutableObject(a)) {
- o = Heap::lo_space()->AllocateRawCode(size);
- } else if (IsLargeFixedArray(a)) {
- o = Heap::lo_space()->AllocateRawFixedArray(size);
- } else {
- AllocationSpace retry_space = (space == NEW_SPACE)
- ? Heap::TargetSpaceId(type)
- : space;
- o = Heap::AllocateRaw(size, space, retry_space);
- }
- ASSERT(!o->IsFailure());
- // Check that the simulation of heap allocation was correct.
- ASSERT(o == Resolve(a));
-
- // Read any recursively embedded objects.
- int c = reader_.GetC();
- while (c == '[') {
- GetObject();
- c = reader_.GetC();
- }
- ASSERT(c == '|');
-
- HeapObject* obj = reinterpret_cast<HeapObject*>(o);
- // Read the uninterpreted contents of the object after the map
- reader_.GetBytes(obj->address(), size);
-#ifdef DEBUG
- if (expect_debug_information_) {
- // Read in the epilogue to check that we're still synchronized
- ExpectEncodedAddress(a);
- reader_.ExpectC(']');
- }
-#endif
-
- // Resolve the encoded pointers we just read in.
- // Same as obj->Iterate(this), but doesn't rely on the map pointer being set.
- VisitPointer(reinterpret_cast<Object**>(obj->address()));
- obj->IterateBody(type, size, this);
-
- if (type == CODE_TYPE) {
- LOG(CodeMoveEvent(a, obj->address()));
+ for (int i = 0; i < skipped; i++) {
+ unsigned int data = base[i];
+ sink_->PutSection(data, "Byte");
+ }
+ bytes_processed_so_far_ += skipped;
}
- objects_++;
- return o;
-}
-
-
-static inline Object* ResolvePaged(int page_index,
- int page_offset,
- PagedSpace* space,
- List<Page*>* page_list) {
- ASSERT(page_index < page_list->length());
- Address address = (*page_list)[page_index]->OffsetToAddress(page_offset);
- return HeapObject::FromAddress(address);
}
-template<typename T>
-void ConcatReversed(List<T>* target, const List<T>& source) {
- for (int i = source.length() - 1; i >= 0; i--) {
- target->Add(source[i]);
+int Serializer::SpaceOfObject(HeapObject* object) {
+ for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
+ AllocationSpace s = static_cast<AllocationSpace>(i);
+ if (Heap::InSpace(object, s)) {
+ if (i == LO_SPACE) {
+ if (object->IsCode()) {
+ return kLargeCode;
+ } else if (object->IsFixedArray()) {
+ return kLargeFixedArray;
+ } else {
+ return kLargeData;
+ }
+ }
+ return i;
+ }
}
+ UNREACHABLE();
+ return 0;
}
-Object* Deserializer::Resolve(Address encoded) {
- Object* o = reinterpret_cast<Object*>(encoded);
- if (o->IsSmi()) return o;
-
- // Encoded addresses of HeapObjects always have 'HeapObject' tags.
- ASSERT(o->IsHeapObject());
- switch (GetSpace(encoded)) {
- // For Map space and Old space, we cache the known Pages in map_pages,
- // old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
- // of page addresses, we don't rely on it since GetObject uses AllocateRaw,
- // and that appears not to update the page list.
- case MAP_SPACE:
- return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::map_space(), &map_pages_);
- case CELL_SPACE:
- return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::cell_space(), &cell_pages_);
- case OLD_POINTER_SPACE:
- return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::old_pointer_space(), &old_pointer_pages_);
- case OLD_DATA_SPACE:
- return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::old_data_space(), &old_data_pages_);
- case CODE_SPACE:
- return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::code_space(), &code_pages_);
- case NEW_SPACE:
- return HeapObject::FromAddress(Heap::NewSpaceStart() +
- NewSpaceOffset(encoded));
- case LO_SPACE:
- // Cache the known large_objects, allocated one per 'page'
- int index = LargeObjectIndex(encoded);
- if (index >= large_objects_.length()) {
- int new_object_count =
- Heap::lo_space()->PageCount() - large_objects_.length();
- List<Object*> new_objects(new_object_count);
- LargeObjectIterator it(Heap::lo_space());
- for (int i = 0; i < new_object_count; i++) {
- new_objects.Add(it.next());
- }
-#ifdef DEBUG
- for (int i = large_objects_.length() - 1; i >= 0; i--) {
- ASSERT(it.next() == large_objects_[i]);
- }
-#endif
- ConcatReversed(&large_objects_, new_objects);
- ASSERT(index < large_objects_.length());
- }
- return large_objects_[index]; // s.page_offset() is ignored.
+int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) {
+ for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
+ AllocationSpace s = static_cast<AllocationSpace>(i);
+ if (Heap::InSpace(object, s)) {
+ return i;
+ }
}
UNREACHABLE();
- return NULL;
+ return 0;
+}
+
+
+int Serializer::Allocate(int space, int size, bool* new_page) {
+ CHECK(space >= 0 && space < kNumberOfSpaces);
+ if (SpaceIsLarge(space)) {
+ // In large object space we merely number the objects instead of trying to
+ // determine some sort of address.
+ *new_page = true;
+ return fullness_[LO_SPACE]++;
+ }
+ *new_page = false;
+ if (fullness_[space] == 0) {
+ *new_page = true;
+ }
+ if (SpaceIsPaged(space)) {
+ // Paged spaces are a little special. We encode their addresses as if the
+ // pages were all contiguous and each page were filled up in the range
+ // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous
+ // and allocation does not start at offset 0 in the page, but this scheme
+ // means the deserializer can get the page number quickly by shifting the
+ // serialized address.
+ CHECK(IsPowerOf2(Page::kPageSize));
+ int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
+ CHECK(size <= Page::kObjectAreaSize);
+ if (used_in_this_page + size > Page::kObjectAreaSize) {
+ *new_page = true;
+ fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
+ }
+ }
+ int allocation_address = fullness_[space];
+ fullness_[space] = allocation_address + size;
+ return allocation_address;
}
diff --git a/src/serialize.h b/src/serialize.h
index c901480f..96bd751d 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -108,235 +108,288 @@ class ExternalReferenceDecoder {
};
-// A Serializer recursively visits objects to construct a serialized
-// representation of the Heap stored in a string. Serialization is
-// destructive. We use a similar mechanism to the GC to ensure that
-// each object is visited once, namely, we modify the map pointer of
-// each visited object to contain the relative address in the
-// appropriate space where that object will be allocated when the heap
-// is deserialized.
-
-
-// Helper classes defined in serialize.cc.
-class RelativeAddress;
-class SimulatedHeapSpace;
-class SnapshotWriter;
-class ReferenceUpdater;
-
-
-class Serializer: public ObjectVisitor {
+class SnapshotByteSource {
public:
- Serializer();
-
- virtual ~Serializer();
-
- // Serialize the current state of the heap. This operation destroys the
- // heap contents and the contents of the roots into the heap.
- void Serialize();
-
- // Returns the serialized buffer. Ownership is transferred to the
- // caller. Only the destructor and getters may be called after this call.
- void Finalize(byte** str, int* len);
-
- int roots() { return roots_; }
- int objects() { return objects_; }
-
-#ifdef DEBUG
- // insert "tag" into the serialized stream
- virtual void Synchronize(const char* tag);
-#endif
-
- static bool enabled() { return serialization_enabled_; }
-
- static void Enable() { serialization_enabled_ = true; }
- static void Disable() { serialization_enabled_ = false; }
+ SnapshotByteSource(const byte* array, int length)
+ : data_(array), length_(length), position_(0) { }
- private:
- friend class ReferenceUpdater;
-
- virtual void VisitPointers(Object** start, Object** end);
- virtual void VisitCodeTarget(RelocInfo* rinfo);
- bool IsVisited(HeapObject* obj);
-
- Address GetSavedAddress(HeapObject* obj);
-
- void SaveAddress(HeapObject* obj, Address addr);
-
- void PutEncodedAddress(Address addr);
- // Write the global flags into the file.
- void PutFlags();
- // Write global information into the header of the file.
- void PutHeader();
- // Write the contents of the log into the file.
- void PutLog();
- // Serialize 'obj', and return its encoded RelativeAddress.
- Address PutObject(HeapObject* obj);
- // Write a stack of handles to the file bottom first.
- void PutGlobalHandleStack(const List<Handle<Object> >& stack);
- // Write the context stack into the file.
- void PutContextStack();
-
- // Return the encoded RelativeAddress where this object will be
- // allocated on deserialization. On the first visit of 'o',
- // serialize its contents. On return, *serialized will be true iff
- // 'o' has just been serialized.
- Address Encode(Object* o, bool* serialized);
-
- // Simulate the allocation of 'obj', returning the address where it will
- // be allocated on deserialization
- RelativeAddress Allocate(HeapObject* obj);
-
- void InitializeAllocators();
-
- SnapshotWriter* writer_;
- bool root_; // serializing a root?
- int roots_; // number of roots visited
- int objects_; // number of objects serialized
-
- static bool serialization_enabled_;
-
- int flags_end_; // The position right after the flags.
-
- // An array of per-space SimulatedHeapSpaces used as memory allocators.
- SimulatedHeapSpace* allocator_[LAST_SPACE+1];
- // A list of global handles at serialization time.
- List<Object**> global_handles_;
-
- ExternalReferenceEncoder* reference_encoder_;
-
- HashMap saved_addresses_;
-
- DISALLOW_COPY_AND_ASSIGN(Serializer);
-};
-
-// Helper class to read the bytes of the serialized heap.
-
-class SnapshotReader {
- public:
- SnapshotReader(const byte* str, int len): str_(str), end_(str + len) {}
+ bool HasMore() { return position_ < length_; }
- void ExpectC(char expected) {
- int c = GetC();
- USE(c);
- ASSERT(c == expected);
+ int Get() {
+ ASSERT(position_ < length_);
+ return data_[position_++];
}
- int GetC() {
- if (str_ >= end_) return EOF;
- return *str_++;
+ void CopyRaw(byte* to, int number_of_bytes) {
+ memcpy(to, data_ + position_, number_of_bytes);
+ position_ += number_of_bytes;
}
int GetInt() {
- int result;
- GetBytes(reinterpret_cast<Address>(&result), sizeof(result));
- return result;
+ // A little unwind to catch the really small ints.
+ int snapshot_byte = Get();
+ if ((snapshot_byte & 0x80) == 0) {
+ return snapshot_byte;
+ }
+ int accumulator = (snapshot_byte & 0x7f) << 7;
+ while (true) {
+ snapshot_byte = Get();
+ if ((snapshot_byte & 0x80) == 0) {
+ return accumulator | snapshot_byte;
+ }
+ accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
+ }
+ UNREACHABLE();
+ return accumulator;
}
- Address GetAddress() {
- Address result;
- GetBytes(reinterpret_cast<Address>(&result), sizeof(result));
- return result;
- }
-
- void GetBytes(Address a, int size) {
- ASSERT(str_ + size <= end_);
- memcpy(a, str_, size);
- str_ += size;
- }
-
- char* GetString() {
- ExpectC('[');
- int size = GetInt();
- ExpectC(']');
- char* s = NewArray<char>(size + 1);
- GetBytes(reinterpret_cast<Address>(s), size);
- s[size] = 0;
- return s;
+ bool AtEOF() {
+ return position_ == length_;
}
private:
- const byte* str_;
- const byte* end_;
+ const byte* data_;
+ int length_;
+ int position_;
};
-// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
+// It is very common to have a reference to the object at word 10 in space 2,
+// the object at word 5 in space 2 and the object at word 28 in space 4. This
+// only works for objects in the first page of a space.
+#define COMMON_REFERENCE_PATTERNS(f) \
+ f(kNumberOfSpaces, 2, 10) \
+ f(kNumberOfSpaces + 1, 2, 5) \
+ f(kNumberOfSpaces + 2, 4, 28) \
+ f(kNumberOfSpaces + 3, 2, 21) \
+ f(kNumberOfSpaces + 4, 2, 98) \
+ f(kNumberOfSpaces + 5, 2, 67) \
+ f(kNumberOfSpaces + 6, 4, 132)
+
+#define COMMON_RAW_LENGTHS(f) \
+ f(1, 1) \
+ f(2, 2) \
+ f(3, 3) \
+ f(4, 4) \
+ f(5, 5) \
+ f(6, 6) \
+ f(7, 7) \
+ f(8, 8) \
+ f(9, 12) \
+ f(10, 16) \
+ f(11, 20) \
+ f(12, 24) \
+ f(13, 28) \
+ f(14, 32) \
+ f(15, 36)
+
+// The SerDes class is a common superclass for Serializer and Deserializer
+// which is used to store common constants and methods used by both.
+class SerDes: public ObjectVisitor {
+ protected:
+ enum DataType {
+ RAW_DATA_SERIALIZATION = 0,
+ // And 15 common raw lengths.
+ OBJECT_SERIALIZATION = 16,
+ // One variant per space.
+ CODE_OBJECT_SERIALIZATION = 25,
+ // One per space (only code spaces in use).
+ EXTERNAL_REFERENCE_SERIALIZATION = 34,
+ EXTERNAL_BRANCH_TARGET_SERIALIZATION = 35,
+ SYNCHRONIZE = 36,
+ START_NEW_PAGE_SERIALIZATION = 37,
+ NATIVES_STRING_RESOURCE = 38,
+ // Free: 39-47.
+ BACKREF_SERIALIZATION = 48,
+ // One per space, must be kSpaceMask aligned.
+ // Free: 57-63.
+ REFERENCE_SERIALIZATION = 64,
+ // One per space and common references. Must be kSpaceMask aligned.
+ CODE_BACKREF_SERIALIZATION = 80,
+ // One per space, must be kSpaceMask aligned.
+ // Free: 89-95.
+ CODE_REFERENCE_SERIALIZATION = 96
+ // One per space, must be kSpaceMask aligned.
+ // Free: 105-255.
+ };
+ static const int kLargeData = LAST_SPACE;
+ static const int kLargeCode = kLargeData + 1;
+ static const int kLargeFixedArray = kLargeCode + 1;
+ static const int kNumberOfSpaces = kLargeFixedArray + 1;
+
+ // A bitmask for getting the space out of an instruction.
+ static const int kSpaceMask = 15;
+
+ static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
+ static inline bool SpaceIsPaged(int space) {
+ return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
+ }
+};
-class Deserializer: public ObjectVisitor {
- public:
- // Create a deserializer. The snapshot is held in str and has size len.
- Deserializer(const byte* str, int len);
- virtual ~Deserializer();
- // Read the flags from the header of the file, and set those that
- // should be inherited from the snapshot.
- void GetFlags();
+// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
+class Deserializer: public SerDes {
+ public:
+ // Create a deserializer from a snapshot byte source.
+ explicit Deserializer(SnapshotByteSource* source);
- // Read saved profiling information from the file and log it if required.
- void GetLog();
+ virtual ~Deserializer() { }
// Deserialize the snapshot into an empty heap.
void Deserialize();
-
- int roots() { return roots_; }
- int objects() { return objects_; }
-
#ifdef DEBUG
- // Check for the presence of "tag" in the serialized stream
virtual void Synchronize(const char* tag);
#endif
private:
virtual void VisitPointers(Object** start, Object** end);
- virtual void VisitCodeTarget(RelocInfo* rinfo);
- virtual void VisitExternalReferences(Address* start, Address* end);
- virtual void VisitRuntimeEntry(RelocInfo* rinfo);
- Address GetEncodedAddress();
+ virtual void VisitExternalReferences(Address* start, Address* end) {
+ UNREACHABLE();
+ }
- // Read other global information (except flags) from the header of the file.
- void GetHeader();
- // Read a stack of handles from the file bottom first.
- void GetGlobalHandleStack(List<Handle<Object> >* stack);
- // Read the context stack from the file.
- void GetContextStack();
+ virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
+ UNREACHABLE();
+ }
- Object* GetObject();
+ void ReadChunk(Object** start, Object** end, int space, Address address);
+ HeapObject* GetAddressFromStart(int space);
+ inline HeapObject* GetAddressFromEnd(int space);
+ Address Allocate(int space_number, Space* space, int size);
+ void ReadObject(int space_number, Space* space, Object** write_back);
+
+ // Keep track of the pages in the paged spaces.
+ // (In large object space we are keeping track of individual objects
+ // rather than pages.) In new space we just need the address of the
+ // first object and the others will flow from that.
+ List<Address> pages_[SerDes::kNumberOfSpaces];
+
+ SnapshotByteSource* source_;
+ ExternalReferenceDecoder* external_reference_decoder_;
+ // This is the address of the next object that will be allocated in each
+ // space. It is used to calculate the addresses of back-references.
+ Address high_water_[LAST_SPACE + 1];
+ // This is the address of the most recent object that was allocated. It
+ // is used to set the location of the new page when we encounter a
+ // START_NEW_PAGE_SERIALIZATION tag.
+ Address last_object_address_;
- // Get the encoded address. In debug mode we make sure
- // it matches the given expectations.
- void ExpectEncodedAddress(Address expected);
+ DISALLOW_COPY_AND_ASSIGN(Deserializer);
+};
- // Given an encoded address (the result of
- // RelativeAddress::Encode), return the object to which it points,
- // which will be either an Smi or a HeapObject in the current heap.
- Object* Resolve(Address encoded_address);
- SnapshotReader reader_;
- bool root_; // Deserializing a root?
- int roots_; // number of roots visited
- int objects_; // number of objects serialized
+class SnapshotByteSink {
+ public:
+ virtual ~SnapshotByteSink() { }
+ virtual void Put(int byte, const char* description) = 0;
+ virtual void PutSection(int byte, const char* description) {
+ Put(byte, description);
+ }
+ void PutInt(uintptr_t integer, const char* description);
+};
- bool has_log_; // The file has log information.
- // Resolve caches the following:
- List<Page*> map_pages_; // All pages in the map space.
- List<Page*> cell_pages_; // All pages in the cell space.
- List<Page*> old_pointer_pages_; // All pages in the old pointer space.
- List<Page*> old_data_pages_; // All pages in the old data space.
- List<Page*> code_pages_; // All pages in the code space.
- List<Object*> large_objects_; // All known large objects.
- // A list of global handles at deserialization time.
- List<Object**> global_handles_;
+class Serializer : public SerDes {
+ public:
+ explicit Serializer(SnapshotByteSink* sink);
+ // Serialize the current state of the heap. This operation destroys the
+ // heap contents.
+ void Serialize();
+ void VisitPointers(Object** start, Object** end);
- ExternalReferenceDecoder* reference_decoder_;
+ static void Enable() {
+ if (!serialization_enabled_) {
+ ASSERT(!too_late_to_enable_now_);
+ }
+ serialization_enabled_ = true;
+ }
+ static void Disable() { serialization_enabled_ = false; }
+ // Call this when you have made use of the fact that there is no serialization
+ // going on.
+ static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
+ static bool enabled() { return serialization_enabled_; }
#ifdef DEBUG
- bool expect_debug_information_;
+ virtual void Synchronize(const char* tag);
#endif
- DISALLOW_COPY_AND_ASSIGN(Deserializer);
+ private:
+ enum ReferenceRepresentation {
+ TAGGED_REPRESENTATION, // A tagged object reference.
+ CODE_TARGET_REPRESENTATION // A reference to first instruction in target.
+ };
+ class ObjectSerializer : public ObjectVisitor {
+ public:
+ ObjectSerializer(Serializer* serializer,
+ Object* o,
+ SnapshotByteSink* sink,
+ ReferenceRepresentation representation)
+ : serializer_(serializer),
+ object_(HeapObject::cast(o)),
+ sink_(sink),
+ reference_representation_(representation),
+ bytes_processed_so_far_(0) { }
+ void Serialize();
+ void VisitPointers(Object** start, Object** end);
+ void VisitExternalReferences(Address* start, Address* end);
+ void VisitCodeTarget(RelocInfo* target);
+ void VisitRuntimeEntry(RelocInfo* reloc);
+ // Used for seralizing the external strings that hold the natives source.
+ void VisitExternalAsciiString(
+ v8::String::ExternalAsciiStringResource** resource);
+ // We can't serialize a heap with external two byte strings.
+ void VisitExternalTwoByteString(
+ v8::String::ExternalStringResource** resource) {
+ UNREACHABLE();
+ }
+
+ private:
+ void OutputRawData(Address up_to);
+
+ Serializer* serializer_;
+ HeapObject* object_;
+ SnapshotByteSink* sink_;
+ ReferenceRepresentation reference_representation_;
+ int bytes_processed_so_far_;
+ };
+
+ void SerializeObject(Object* o, ReferenceRepresentation representation);
+ void InitializeAllocators();
+ // This will return the space for an object. If the object is in large
+ // object space it may return kLargeCode or kLargeFixedArray in order
+ // to indicate to the deserializer what kind of large object allocation
+ // to make.
+ static int SpaceOfObject(HeapObject* object);
+ // This just returns the space of the object. It will return LO_SPACE
+ // for all large objects since you can't check the type of the object
+ // once the map has been used for the serialization address.
+ static int SpaceOfAlreadySerializedObject(HeapObject* object);
+ int Allocate(int space, int size, bool* new_page_started);
+ int CurrentAllocationAddress(int space) {
+ if (SpaceIsLarge(space)) space = LO_SPACE;
+ return fullness_[space];
+ }
+ int EncodeExternalReference(Address addr) {
+ return external_reference_encoder_->Encode(addr);
+ }
+
+ // Keep track of the fullness of each space in order to generate
+ // relative addresses for back references. Large objects are
+ // just numbered sequentially since relative addresses make no
+ // sense in large object space.
+ int fullness_[LAST_SPACE + 1];
+ SnapshotByteSink* sink_;
+ int current_root_index_;
+ ExternalReferenceEncoder* external_reference_encoder_;
+ static bool serialization_enabled_;
+ // Did we already make use of the fact that serialization was not enabled?
+ static bool too_late_to_enable_now_;
+
+ friend class ObjectSerializer;
+ friend class Deserializer;
+
+ DISALLOW_COPY_AND_ASSIGN(Serializer);
};
} } // namespace v8::internal
diff --git a/src/simulator.h b/src/simulator.h
new file mode 100644
index 00000000..6f8cd5a2
--- /dev/null
+++ b/src/simulator.h
@@ -0,0 +1,41 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SIMULATOR_H_
+#define V8_SIMULATOR_H_
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/simulator-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/simulator-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/simulator-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+#endif // V8_SIMULATOR_H_
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 9c66a503..c01baad7 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -32,14 +32,15 @@
#include "api.h"
#include "serialize.h"
#include "snapshot.h"
+#include "platform.h"
namespace v8 {
namespace internal {
bool Snapshot::Deserialize(const byte* content, int len) {
- Deserializer des(content, len);
- des.GetFlags();
- return V8::Initialize(&des);
+ SnapshotByteSource source(content, len);
+ Deserializer deserializer(&source);
+ return V8::Initialize(&deserializer);
}
@@ -48,28 +49,49 @@ bool Snapshot::Initialize(const char* snapshot_file) {
int len;
byte* str = ReadBytes(snapshot_file, &len);
if (!str) return false;
- bool result = Deserialize(str, len);
+ Deserialize(str, len);
DeleteArray(str);
- return result;
+ return true;
} else if (size_ > 0) {
- return Deserialize(data_, size_);
+ Deserialize(data_, size_);
+ return true;
}
return false;
}
-bool Snapshot::WriteToFile(const char* snapshot_file) {
- Serializer ser;
- ser.Serialize();
- byte* str;
- int len;
- ser.Finalize(&str, &len);
+class FileByteSink : public SnapshotByteSink {
+ public:
+ explicit FileByteSink(const char* snapshot_file) {
+ fp_ = OS::FOpen(snapshot_file, "wb");
+ if (fp_ == NULL) {
+ PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ exit(1);
+ }
+ }
+ virtual ~FileByteSink() {
+ if (fp_ != NULL) {
+ fclose(fp_);
+ }
+ }
+ virtual void Put(int byte, const char* description) {
+ if (fp_ != NULL) {
+ fputc(byte, fp_);
+ }
+ }
- int written = WriteBytes(snapshot_file, str, len);
+ private:
+ FILE* fp_;
+};
- DeleteArray(str);
- return written == len;
+
+bool Snapshot::WriteToFile(const char* snapshot_file) {
+ FileByteSink file(snapshot_file);
+ Serializer ser(&file);
+ ser.Serialize();
+ return true;
}
+
} } // namespace v8::internal
diff --git a/src/spaces.cc b/src/spaces.cc
index bd58742e..f3b6b9f6 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -354,7 +354,7 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
} else {
mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
}
- int alloced = *allocated;
+ int alloced = static_cast<int>(*allocated);
size_ += alloced;
Counters::memory_allocated.Increment(alloced);
return mem;
@@ -367,8 +367,8 @@ void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
} else {
OS::Free(mem, length);
}
- Counters::memory_allocated.Decrement(length);
- size_ -= length;
+ Counters::memory_allocated.Decrement(static_cast<int>(length));
+ size_ -= static_cast<int>(length);
ASSERT(size_ >= 0);
}
@@ -387,7 +387,7 @@ void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
// We are sure that we have mapped a block of requested addresses.
ASSERT(initial_chunk_->size() == requested);
LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
- size_ += requested;
+ size_ += static_cast<int>(requested);
return initial_chunk_->address();
}
@@ -397,8 +397,8 @@ static int PagesInChunk(Address start, size_t size) {
// and the last page ends on the last page-aligned address before
// start+size. Page::kPageSize is a power of two so we can divide by
// shifting.
- return (RoundDown(start + size, Page::kPageSize)
- - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;
+ return static_cast<int>((RoundDown(start + size, Page::kPageSize)
+ - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits);
}
@@ -412,7 +412,7 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
if (size_ + static_cast<int>(chunk_size) > capacity_) {
// Request as many pages as we can.
chunk_size = capacity_ - size_;
- requested_pages = chunk_size >> Page::kPageSizeBits;
+ requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits);
if (requested_pages <= 0) return Page::FromAddress(NULL);
}
@@ -445,7 +445,7 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size,
if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
return Page::FromAddress(NULL);
}
- Counters::memory_allocated.Increment(size);
+ Counters::memory_allocated.Increment(static_cast<int>(size));
// So long as we correctly overestimated the number of chunks we should not
// run out of chunk ids.
@@ -466,7 +466,7 @@ bool MemoryAllocator::CommitBlock(Address start,
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Commit(start, size, executable)) return false;
- Counters::memory_allocated.Increment(size);
+ Counters::memory_allocated.Increment(static_cast<int>(size));
return true;
}
@@ -478,7 +478,7 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Uncommit(start, size)) return false;
- Counters::memory_allocated.Decrement(size);
+ Counters::memory_allocated.Decrement(static_cast<int>(size));
return true;
}
@@ -558,7 +558,7 @@ void MemoryAllocator::DeleteChunk(int chunk_id) {
// TODO(1240712): VirtualMemory::Uncommit has a return value which
// is ignored here.
initial_chunk_->Uncommit(c.address(), c.size());
- Counters::memory_allocated.Decrement(c.size());
+ Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
} else {
LOG(DeleteEvent("PagedChunk", c.address()));
FreeRawMemory(c.address(), c.size());
@@ -1096,7 +1096,8 @@ void NewSpace::Grow() {
void NewSpace::Shrink() {
int new_capacity = Max(InitialCapacity(), 2 * Size());
- int rounded_new_capacity = RoundUp(new_capacity, OS::AllocateAlignment());
+ int rounded_new_capacity =
+ RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
if (rounded_new_capacity < Capacity() &&
to_space_.ShrinkTo(rounded_new_capacity)) {
// Only shrink from space if we managed to shrink to space.
@@ -1234,7 +1235,7 @@ void SemiSpace::TearDown() {
bool SemiSpace::Grow() {
// Double the semispace size but only up to maximum capacity.
int maximum_extra = maximum_capacity_ - capacity_;
- int extra = Min(RoundUp(capacity_, OS::AllocateAlignment()),
+ int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
maximum_extra);
if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
return false;
@@ -1527,7 +1528,9 @@ void FreeListNode::set_size(int size_in_bytes) {
// correct size.
if (size_in_bytes > ByteArray::kAlignedSize) {
set_map(Heap::raw_unchecked_byte_array_map());
- ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
+ // Can't use ByteArray::cast because it fails during deserialization.
+ ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
+ this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) {
set_map(Heap::raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
@@ -1535,7 +1538,8 @@ void FreeListNode::set_size(int size_in_bytes) {
} else {
UNREACHABLE();
}
- ASSERT(Size() == size_in_bytes);
+ // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
+ // deserialization because the byte array map is not done yet.
}
@@ -1794,12 +1798,14 @@ void OldSpace::MCCommitRelocationInfo() {
while (it.has_next()) {
Page* p = it.next();
// Space below the relocation pointer is allocated.
- computed_size += p->mc_relocation_top - p->ObjectAreaStart();
+ computed_size +=
+ static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
if (it.has_next()) {
// Free the space at the top of the page. We cannot use
// p->mc_relocation_top after the call to Free (because Free will clear
// remembered set bits).
- int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;
+ int extra_size =
+ static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
if (extra_size > 0) {
int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
// The bytes we have just "freed" to add to the free list were
@@ -1828,13 +1834,16 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
return AllocateInNextPage(current_page, size_in_bytes);
}
- // There is no next page in this space. Try free list allocation.
- int wasted_bytes;
- Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
- accounting_stats_.WasteBytes(wasted_bytes);
- if (!result->IsFailure()) {
- accounting_stats_.AllocateBytes(size_in_bytes);
- return HeapObject::cast(result);
+ // There is no next page in this space. Try free list allocation unless that
+ // is currently forbidden.
+ if (!Heap::linear_allocation()) {
+ int wasted_bytes;
+ Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
+ accounting_stats_.WasteBytes(wasted_bytes);
+ if (!result->IsFailure()) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ return HeapObject::cast(result);
+ }
}
// Free list allocation failed and there is no next page. Fail if we have
@@ -1862,7 +1871,8 @@ HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
// Add the block at the top of this page to the free list.
- int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;
+ int free_size =
+ static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
if (free_size > 0) {
int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
accounting_stats_.WasteBytes(wasted_bytes);
@@ -1962,7 +1972,7 @@ static void CollectCommentStatistics(RelocIterator* it) {
if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
const char* const txt =
reinterpret_cast<const char*>(it->rinfo()->data());
- flat_delta += it->rinfo()->pc() - prev_pc;
+ flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
if (txt[0] == ']') break; // End of nested comment
// A new comment
CollectCommentStatistics(it);
@@ -1990,7 +2000,7 @@ void PagedSpace::CollectCodeStatistics() {
const byte* prev_pc = code->instruction_start();
while (!it.done()) {
if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
- delta += it.rinfo()->pc() - prev_pc;
+ delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
CollectCommentStatistics(&it);
prev_pc = it.rinfo()->pc();
}
@@ -1999,7 +2009,7 @@ void PagedSpace::CollectCodeStatistics() {
ASSERT(code->instruction_start() <= prev_pc &&
prev_pc <= code->relocation_start());
- delta += code->relocation_start() - prev_pc;
+ delta += static_cast<int>(code->relocation_start() - prev_pc);
EnterComment("NoComment", delta);
}
}
@@ -2028,7 +2038,8 @@ void OldSpace::ReportStatistics() {
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
- int intoff = rset_addr - p->address() - Page::kRSetOffset;
+ int intoff =
+ static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
@@ -2205,9 +2216,10 @@ void FixedSpace::MCCommitRelocationInfo() {
while (it.has_next()) {
Page* page = it.next();
Address page_top = page->AllocationTop();
- computed_size += page_top - page->ObjectAreaStart();
+ computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
if (it.has_next()) {
- accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
+ accounting_stats_.WasteBytes(
+ static_cast<int>(page->ObjectAreaEnd() - page_top));
}
}
@@ -2230,10 +2242,10 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
return AllocateInNextPage(current_page, size_in_bytes);
}
- // There is no next page in this space. Try free list allocation.
- // The fixed space free list implicitly assumes that all free blocks
- // are of the fixed size.
- if (size_in_bytes == object_size_in_bytes_) {
+ // There is no next page in this space. Try free list allocation unless
+ // that is currently forbidden. The fixed space free list implicitly assumes
+ // that all free blocks are of the fixed size.
+ if (!Heap::linear_allocation()) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
@@ -2293,7 +2305,8 @@ void FixedSpace::ReportStatistics() {
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
- int intoff = rset_addr - p->address() - Page::kRSetOffset;
+ int intoff =
+ static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
@@ -2414,7 +2427,7 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
- int os_alignment = OS::AllocateAlignment();
+ int os_alignment = static_cast<int>(OS::AllocateAlignment());
if (os_alignment < Page::kPageSize)
size_in_bytes += (Page::kPageSize - os_alignment);
return size_in_bytes + Page::kObjectStartOffset;
@@ -2493,7 +2506,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
return Failure::RetryAfterGC(requested_size, identity());
}
- size_ += chunk_size;
+ size_ += static_cast<int>(chunk_size);
page_count_++;
chunk->set_next(first_chunk_);
chunk->set_size(chunk_size);
@@ -2644,7 +2657,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
if (object->IsCode()) {
LOG(CodeDeleteEvent(object->address()));
}
- size_ -= chunk_size;
+ size_ -= static_cast<int>(chunk_size);
page_count_--;
MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
LOG(DeleteEvent("LargeObjectChunk", chunk_address));
diff --git a/src/spaces.h b/src/spaces.h
index 9e1d873c..75b992ff 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -172,7 +172,7 @@ class Page {
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
- int offset = a - address();
+ int offset = static_cast<int>(a - address());
ASSERT_PAGE_OFFSET(offset);
return offset;
}
@@ -1116,7 +1116,9 @@ class SemiSpace : public Space {
}
// The offset of an address from the beginning of the space.
- int SpaceOffsetForAddress(Address addr) { return addr - low(); }
+ int SpaceOffsetForAddress(Address addr) {
+ return static_cast<int>(addr - low());
+ }
// If we don't have this here then SemiSpace will be abstract. However
// it should never be called.
@@ -1255,7 +1257,7 @@ class NewSpace : public Space {
}
// Return the allocated bytes in the active semispace.
- virtual int Size() { return top() - bottom(); }
+ virtual int Size() { return static_cast<int>(top() - bottom()); }
// Return the current capacity of a semispace.
int Capacity() {
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 8c62a45f..d1859a20 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -188,7 +188,7 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
void StringStream::PrintObject(Object* o) {
o->ShortPrint(this);
if (o->IsString()) {
- if (String::cast(o)->length() <= String::kMaxMediumStringSize) {
+ if (String::cast(o)->length() <= String::kMaxShortPrintLength) {
return;
}
} else if (o->IsNumber() || o->IsOddball()) {
diff --git a/src/string.js b/src/string.js
index d2d6e969..4f9957a6 100644
--- a/src/string.js
+++ b/src/string.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -180,7 +180,7 @@ function SubString(string, start, end) {
}
return %CharFromCode(char_code);
}
- return %StringSlice(string, start, end);
+ return %SubString(string, start, end);
}
@@ -380,12 +380,19 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) {
// Unfortunately, that means this code is nearly duplicated, here and in
// jsregexp.cc.
if (regexp.global) {
+ var numberOfCaptures = NUMBER_OF_CAPTURES(matchInfo) >> 1;
var previous = 0;
do {
- result.addSpecialSlice(previous, matchInfo[CAPTURE0]);
var startOfMatch = matchInfo[CAPTURE0];
+ result.addSpecialSlice(previous, startOfMatch);
previous = matchInfo[CAPTURE1];
- result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+ if (numberOfCaptures == 1) {
+ var match = SubString(subject, startOfMatch, previous);
+ // Don't call directly to avoid exposing the built-in global object.
+ result.add(replace.call(null, match, startOfMatch, subject));
+ } else {
+ result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+ }
// Can't use matchInfo any more from here, since the function could
// overwrite it.
// Continue with the next match.
@@ -810,10 +817,13 @@ ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) {
var len = end - start;
if (len == 0) return;
var elements = this.elements;
- if (start >= 0 && len >= 0 && start < 0x80000 && len < 0x800) {
+ if (start < 0x80000 && len < 0x800) {
elements[elements.length] = (start << 11) + len;
} else {
- elements[elements.length] = SubString(this.special_string, start, end);
+ // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
+ // so -len is a smi.
+ elements[elements.length] = -len;
+ elements[elements.length] = start;
}
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index e10dc61b..51d9ddb8 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -735,16 +735,24 @@ Handle<Code> ComputeCallMiss(int argc) {
Object* LoadCallbackProperty(Arguments args) {
+ ASSERT(args[0]->IsJSObject());
+ ASSERT(args[1]->IsJSObject());
AccessorInfo* callback = AccessorInfo::cast(args[2]);
Address getter_address = v8::ToCData<Address>(callback->getter());
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL);
- v8::AccessorInfo info(args.arguments());
+ CustomArguments custom_args(callback->data(),
+ JSObject::cast(args[0]),
+ JSObject::cast(args[1]));
+ v8::AccessorInfo info(custom_args.end());
HandleScope scope;
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ state.set_external_callback(getter_address);
+#endif
result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
}
RETURN_IF_SCHEDULED_EXCEPTION();
@@ -768,6 +776,9 @@ Object* StoreCallbackProperty(Arguments args) {
{
// Leaving JavaScript.
VMState state(EXTERNAL);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ state.set_external_callback(setter_address);
+#endif
fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
}
RETURN_IF_SCHEDULED_EXCEPTION();
diff --git a/src/stub-cache.h b/src/stub-cache.h
index e2689202..788c5324 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -226,9 +226,9 @@ class StubCache : public AllStatic {
// hash code would effectively throw away two bits of the hash
// code.
ASSERT(kHeapObjectTagSize == String::kHashShift);
- // Compute the hash of the name (use entire length field).
+ // Compute the hash of the name (use entire hash field).
ASSERT(name->HasHashCode());
- uint32_t field = name->length_field();
+ uint32_t field = name->hash_field();
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
diff --git a/src/third_party/valgrind/valgrind.h b/src/third_party/valgrind/valgrind.h
index 47f369b1..a94dc58b 100644
--- a/src/third_party/valgrind/valgrind.h
+++ b/src/third_party/valgrind/valgrind.h
@@ -74,6 +74,7 @@
#define __VALGRIND_H
#include <stdarg.h>
+#include <stdint.h>
/* Nb: this file might be included in a file compiled with -ansi. So
we can't use C++ style "//" comments nor the "asm" keyword (instead
@@ -232,7 +233,7 @@ typedef
typedef
struct {
- unsigned long long int nraddr; /* where's the code? */
+ uint64_t nraddr; /* where's the code? */
}
OrigFn;
@@ -243,14 +244,14 @@ typedef
#define VALGRIND_DO_CLIENT_REQUEST( \
_zzq_rlval, _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { volatile unsigned long long int _zzq_args[6]; \
- volatile unsigned long long int _zzq_result; \
- _zzq_args[0] = (unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ { volatile uint64_t _zzq_args[6]; \
+ volatile uint64_t _zzq_result; \
+ _zzq_args[0] = (uint64_t)(_zzq_request); \
+ _zzq_args[1] = (uint64_t)(_zzq_arg1); \
+ _zzq_args[2] = (uint64_t)(_zzq_arg2); \
+ _zzq_args[3] = (uint64_t)(_zzq_arg3); \
+ _zzq_args[4] = (uint64_t)(_zzq_arg4); \
+ _zzq_args[5] = (uint64_t)(_zzq_arg5); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RDX = client_request ( %RAX ) */ \
"xchgq %%rbx,%%rbx" \
@@ -263,7 +264,7 @@ typedef
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned long long int __addr; \
+ volatile uint64_t __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RAX = guest_NRADDR */ \
"xchgq %%rcx,%%rcx" \
@@ -346,8 +347,8 @@ typedef
typedef
struct {
- unsigned long long int nraddr; /* where's the code? */
- unsigned long long int r2; /* what tocptr do we need? */
+ uint64_t nraddr; /* where's the code? */
+ uint64_t r2; /* what tocptr do we need? */
}
OrigFn;
@@ -359,15 +360,15 @@ typedef
_zzq_rlval, _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned long long int _zzq_args[6]; \
- register unsigned long long int _zzq_result __asm__("r3"); \
- register unsigned long long int* _zzq_ptr __asm__("r4"); \
- _zzq_args[0] = (unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ { uint64_t _zzq_args[6]; \
+ register uint64_t _zzq_result __asm__("r3"); \
+ register uint64_t* _zzq_ptr __asm__("r4"); \
+ _zzq_args[0] = (uint64_t)(_zzq_request); \
+ _zzq_args[1] = (uint64_t)(_zzq_arg1); \
+ _zzq_args[2] = (uint64_t)(_zzq_arg2); \
+ _zzq_args[3] = (uint64_t)(_zzq_arg3); \
+ _zzq_args[4] = (uint64_t)(_zzq_arg4); \
+ _zzq_args[5] = (uint64_t)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \
@@ -380,7 +381,7 @@ typedef
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register unsigned long long int __addr __asm__("r3"); \
+ register uint64_t __addr __asm__("r3"); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2" \
@@ -484,8 +485,8 @@ typedef
typedef
struct {
- unsigned long long int nraddr; /* where's the code? */
- unsigned long long int r2; /* what tocptr do we need? */
+ uint64_t nraddr; /* where's the code? */
+ uint64_t r2; /* what tocptr do we need? */
}
OrigFn;
@@ -497,9 +498,9 @@ typedef
_zzq_rlval, _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned long long int _zzq_args[7]; \
- register unsigned long long int _zzq_result; \
- register unsigned long long int* _zzq_ptr; \
+ { uint64_t _zzq_args[7]; \
+ register uint64_t _zzq_result; \
+ register uint64_t* _zzq_ptr; \
_zzq_args[0] = (unsigned int long long)(_zzq_request); \
_zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
_zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
@@ -522,7 +523,7 @@ typedef
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register unsigned long long int __addr; \
+ register uint64_t __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2\n\t" \
diff --git a/src/token.cc b/src/token.cc
index bb42cead..0a4ad4c1 100644
--- a/src/token.cc
+++ b/src/token.cc
@@ -55,109 +55,4 @@ int8_t Token::precedence_[NUM_TOKENS] = {
#undef T
-// A perfect (0 collision) hash table of keyword token values.
-
-// larger N will reduce the number of collisions (power of 2 for fast %)
-const unsigned int N = 128;
-// make this small since we have <= 256 tokens
-static uint8_t Hashtable[N];
-static bool IsInitialized = false;
-
-
-static unsigned int Hash(const char* s) {
- // The following constants have been found using trial-and-error. If the
- // keyword set changes, they may have to be recomputed (make them flags
- // and play with the flag values). Increasing N is the simplest way to
- // reduce the number of collisions.
-
- // we must use at least 4 or more chars ('const' and 'continue' share
- // 'con')
- const unsigned int L = 5;
- // smaller S tend to reduce the number of collisions
- const unsigned int S = 4;
- // make this a prime, or at least an odd number
- const unsigned int M = 3;
-
- unsigned int h = 0;
- for (unsigned int i = 0; s[i] != '\0' && i < L; i++) {
- h += (h << S) + s[i];
- }
- // unsigned int % by a power of 2 (otherwise this will not be a bit mask)
- return h * M % N;
-}
-
-
-Token::Value Token::Lookup(const char* str) {
- ASSERT(IsInitialized);
- Value k = static_cast<Value>(Hashtable[Hash(str)]);
- const char* s = string_[k];
- ASSERT(s != NULL || k == IDENTIFIER);
- if (s == NULL || strcmp(s, str) == 0) {
- return k;
- }
- return IDENTIFIER;
-}
-
-
-#ifdef DEBUG
-// We need this function because C++ doesn't allow the expression
-// NULL == NULL, which is a result of macro expansion below. What
-// the hell?
-static bool IsNull(const char* s) {
- return s == NULL;
-}
-#endif
-
-
-void Token::Initialize() {
- if (IsInitialized) return;
-
- // A list of all keywords, terminated by ILLEGAL.
-#define T(name, string, precedence) name,
- static Value keyword[] = {
- TOKEN_LIST(IGNORE_TOKEN, T, IGNORE_TOKEN)
- ILLEGAL
- };
-#undef T
-
- // Assert that the keyword array contains the 25 keywords, 3 future
- // reserved words (const, debugger, and native), and the 3 named literals
- // defined by ECMA-262 standard.
- ASSERT(ARRAY_SIZE(keyword) == 25 + 3 + 3 + 1); // +1 for ILLEGAL sentinel
-
- // Initialize Hashtable.
- ASSERT(NUM_TOKENS <= 256); // Hashtable contains uint8_t elements
- for (unsigned int i = 0; i < N; i++) {
- Hashtable[i] = IDENTIFIER;
- }
-
- // Insert all keywords into Hashtable.
- int collisions = 0;
- for (int i = 0; keyword[i] != ILLEGAL; i++) {
- Value k = keyword[i];
- unsigned int h = Hash(string_[k]);
- if (Hashtable[h] != IDENTIFIER) collisions++;
- Hashtable[h] = k;
- }
-
- if (collisions > 0) {
- PrintF("%d collisions in keyword hashtable\n", collisions);
- FATAL("Fix keyword lookup!");
- }
-
- IsInitialized = true;
-
- // Verify hash table.
-#define T(name, string, precedence) \
- ASSERT(IsNull(string) || Lookup(string) == IDENTIFIER);
-
-#define K(name, string, precedence) \
- ASSERT(Lookup(string) == name);
-
- TOKEN_LIST(T, K, IGNORE_TOKEN)
-
-#undef K
-#undef T
-}
-
} } // namespace v8::internal
diff --git a/src/token.h b/src/token.h
index 4d4df634..a60704cd 100644
--- a/src/token.h
+++ b/src/token.h
@@ -260,15 +260,6 @@ class Token {
return precedence_[tok];
}
- // Returns the keyword value if str is a keyword;
- // returns IDENTIFIER otherwise. The class must
- // have been initialized.
- static Value Lookup(const char* str);
-
- // Must be called once to initialize the class.
- // Multiple calls are ignored.
- static void Initialize();
-
private:
#ifdef DEBUG
static const char* name_[NUM_TOKENS];
diff --git a/src/top.cc b/src/top.cc
index bb2dea4d..02748385 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -31,8 +31,9 @@
#include "bootstrapper.h"
#include "debug.h"
#include "execution.h"
-#include "string-stream.h"
#include "platform.h"
+#include "simulator.h"
+#include "string-stream.h"
namespace v8 {
namespace internal {
@@ -50,6 +51,30 @@ Address top_addresses[] = {
NULL
};
+
+v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
+ return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
+}
+
+
+void ThreadLocalTop::Initialize() {
+ c_entry_fp_ = 0;
+ handler_ = 0;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ js_entry_sp_ = 0;
+#endif
+ stack_is_cooked_ = false;
+ try_catch_handler_address_ = NULL;
+ context_ = NULL;
+ int id = ThreadManager::CurrentId();
+ thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
+ external_caught_exception_ = false;
+ failed_access_check_callback_ = NULL;
+ save_context_ = NULL;
+ catcher_ = NULL;
+}
+
+
Address Top::get_address_from_id(Top::AddressId id) {
return top_addresses[id];
}
@@ -70,9 +95,9 @@ void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
v->VisitPointer(bit_cast<Object**, Context**>(&(thread->context_)));
v->VisitPointer(&(thread->scheduled_exception_));
- for (v8::TryCatch* block = thread->try_catch_handler_;
+ for (v8::TryCatch* block = thread->TryCatchHandler();
block != NULL;
- block = block->next_) {
+ block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
v->VisitPointer(bit_cast<Object**, void**>(&(block->exception_)));
v->VisitPointer(bit_cast<Object**, void**>(&(block->message_)));
}
@@ -91,23 +116,10 @@ void Top::Iterate(ObjectVisitor* v) {
void Top::InitializeThreadLocal() {
- thread_local_.c_entry_fp_ = 0;
- thread_local_.handler_ = 0;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- thread_local_.js_entry_sp_ = 0;
-#endif
- thread_local_.stack_is_cooked_ = false;
- thread_local_.try_catch_handler_ = NULL;
- thread_local_.context_ = NULL;
- int id = ThreadManager::CurrentId();
- thread_local_.thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
- thread_local_.external_caught_exception_ = false;
- thread_local_.failed_access_check_callback_ = NULL;
+ thread_local_.Initialize();
clear_pending_exception();
clear_pending_message();
clear_scheduled_exception();
- thread_local_.save_context_ = NULL;
- thread_local_.catcher_ = NULL;
}
@@ -254,46 +266,24 @@ void Top::TearDown() {
}
-// There are cases where the C stack is separated from JS stack (ARM simulator).
-// To figure out the order of top-most JS try-catch handler and the top-most C
-// try-catch handler, the C try-catch handler keeps a reference to the top-most
-// JS try_catch handler when it was created.
-//
-// Here is a picture to explain the idea:
-// Top::thread_local_.handler_ Top::thread_local_.try_catch_handler_
-//
-// | |
-// v v
-//
-// | JS handler | | C try_catch handler |
-// | next |--+ +-------- | js_handler_ |
-// | | | next_ |--+
-// | | |
-// | JS handler |--+ <---------+ |
-// | next |
-//
-// If the top-most JS try-catch handler is not equal to
-// Top::thread_local_.try_catch_handler_.js_handler_, it means the JS handler
-// is on the top. Otherwise, it means the C try-catch handler is on the top.
-//
void Top::RegisterTryCatchHandler(v8::TryCatch* that) {
- StackHandler* handler =
- reinterpret_cast<StackHandler*>(thread_local_.handler_);
-
- // Find the top-most try-catch handler.
- while (handler != NULL && !handler->is_try_catch()) {
- handler = handler->next();
- }
-
- that->js_handler_ = handler; // casted to void*
- thread_local_.try_catch_handler_ = that;
+ // The ARM simulator has a separate JS stack. We therefore register
+ // the C++ try catch handler with the simulator and get back an
+ // address that can be used for comparisons with addresses into the
+ // JS stack. When running without the simulator, the address
+ // returned will be the address of the C++ try catch handler itself.
+ Address address = reinterpret_cast<Address>(
+ SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
+ thread_local_.set_try_catch_handler_address(address);
}
void Top::UnregisterTryCatchHandler(v8::TryCatch* that) {
- ASSERT(thread_local_.try_catch_handler_ == that);
- thread_local_.try_catch_handler_ = that->next_;
+ ASSERT(thread_local_.TryCatchHandler() == that);
+ thread_local_.set_try_catch_handler_address(
+ reinterpret_cast<Address>(that->next_));
thread_local_.catcher_ = NULL;
+ SimulatorStack::UnregisterCTryCatch();
}
@@ -725,20 +715,18 @@ bool Top::ShouldReturnException(bool* is_caught_externally,
// Get the address of the external handler so we can compare the address to
// determine which one is closer to the top of the stack.
- v8::TryCatch* try_catch = thread_local_.try_catch_handler_;
+ Address external_handler_address = thread_local_.try_catch_handler_address();
// The exception has been externally caught if and only if there is
// an external handler which is on top of the top-most try-catch
// handler.
- //
- // See comments in RegisterTryCatchHandler for details.
- *is_caught_externally = try_catch != NULL &&
- (handler == NULL || handler == try_catch->js_handler_ ||
+ *is_caught_externally = external_handler_address != NULL &&
+ (handler == NULL || handler->address() > external_handler_address ||
!catchable_by_javascript);
if (*is_caught_externally) {
// Only report the exception if the external handler is verbose.
- return thread_local_.try_catch_handler_->is_verbose_;
+ return thread_local_.TryCatchHandler()->is_verbose_;
} else {
// Report the exception if it isn't caught by JavaScript code.
return handler == NULL;
@@ -775,7 +763,7 @@ void Top::DoThrow(Object* exception,
MessageLocation potential_computed_location;
bool try_catch_needs_message =
is_caught_externally &&
- thread_local_.try_catch_handler_->capture_message_;
+ thread_local_.TryCatchHandler()->capture_message_;
if (report_exception || try_catch_needs_message) {
if (location == NULL) {
// If no location was specified we use a computed one instead
@@ -806,7 +794,7 @@ void Top::DoThrow(Object* exception,
}
if (is_caught_externally) {
- thread_local_.catcher_ = thread_local_.try_catch_handler_;
+ thread_local_.catcher_ = thread_local_.TryCatchHandler();
}
// NOTE: Notifying the debugger or generating the message
@@ -830,15 +818,15 @@ void Top::ReportPendingMessages() {
} else if (thread_local_.pending_exception_ ==
Heap::termination_exception()) {
if (external_caught) {
- thread_local_.try_catch_handler_->can_continue_ = false;
- thread_local_.try_catch_handler_->exception_ = Heap::null_value();
+ thread_local_.TryCatchHandler()->can_continue_ = false;
+ thread_local_.TryCatchHandler()->exception_ = Heap::null_value();
}
} else {
Handle<Object> exception(pending_exception());
thread_local_.external_caught_exception_ = false;
if (external_caught) {
- thread_local_.try_catch_handler_->can_continue_ = true;
- thread_local_.try_catch_handler_->exception_ =
+ thread_local_.TryCatchHandler()->can_continue_ = true;
+ thread_local_.TryCatchHandler()->exception_ =
thread_local_.pending_exception_;
if (!thread_local_.pending_message_obj_->IsTheHole()) {
try_catch_handler()->message_ = thread_local_.pending_message_obj_;
@@ -892,9 +880,9 @@ bool Top::OptionalRescheduleException(bool is_bottom_call) {
// If the exception is externally caught, clear it if there are no
// JavaScript frames on the way to the C++ frame that has the
// external handler.
- ASSERT(thread_local_.try_catch_handler_ != NULL);
+ ASSERT(thread_local_.try_catch_handler_address() != NULL);
Address external_handler_address =
- reinterpret_cast<Address>(thread_local_.try_catch_handler_);
+ thread_local_.try_catch_handler_address();
JavaScriptFrameIterator it;
if (it.done() || (it.frame()->sp() > external_handler_address)) {
clear_exception = true;
@@ -941,6 +929,19 @@ Handle<Context> Top::global_context() {
Handle<Context> Top::GetCallingGlobalContext() {
JavaScriptFrameIterator it;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (Debug::InDebugger()) {
+ while (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ Context* context = Context::cast(frame->context());
+ if (context->global_context() == *Debug::debug_context()) {
+ it.Advance();
+ } else {
+ break;
+ }
+ }
+ }
+#endif // ENABLE_DEBUGGER_SUPPORT
if (it.done()) return Handle<Context>::null();
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
diff --git a/src/top.h b/src/top.h
index ae94f08e..8780844b 100644
--- a/src/top.h
+++ b/src/top.h
@@ -43,6 +43,41 @@ class SaveContext; // Forward declaration.
class ThreadLocalTop BASE_EMBEDDED {
public:
+ // Initialize the thread data.
+ void Initialize();
+
+ // Get the top C++ try catch handler or NULL if none are registered.
+ //
+ // This method is not guarenteed to return an address that can be
+ // used for comparison with addresses into the JS stack. If such an
+ // address is needed, use try_catch_handler_address.
+ v8::TryCatch* TryCatchHandler();
+
+ // Get the address of the top C++ try catch handler or NULL if
+ // none are registered.
+ //
+ // This method always returns an address that can be compared to
+ // pointers into the JavaScript stack. When running on actual
+ // hardware, try_catch_handler_address and TryCatchHandler return
+ // the same pointer. When running on a simulator with a separate JS
+ // stack, try_catch_handler_address returns a JS stack address that
+ // corresponds to the place on the JS stack where the C++ handler
+ // would have been if the stack were not separate.
+ inline Address try_catch_handler_address() {
+ return try_catch_handler_address_;
+ }
+
+ // Set the address of the top C++ try catch handler.
+ inline void set_try_catch_handler_address(Address address) {
+ try_catch_handler_address_ = address;
+ }
+
+ void Free() {
+ ASSERT(!has_pending_message_);
+ ASSERT(!external_caught_exception_);
+ ASSERT(try_catch_handler_address_ == NULL);
+ }
+
// The context where the current execution method is created and for variable
// lookups.
Context* context_;
@@ -59,7 +94,6 @@ class ThreadLocalTop BASE_EMBEDDED {
// unify them later.
Object* scheduled_exception_;
bool external_caught_exception_;
- v8::TryCatch* try_catch_handler_;
SaveContext* save_context_;
v8::TryCatch* catcher_;
@@ -79,14 +113,11 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
- void Free() {
- ASSERT(!has_pending_message_);
- ASSERT(!external_caught_exception_);
- ASSERT(try_catch_handler_ == NULL);
- }
+ private:
+ Address try_catch_handler_address_;
};
-#define TOP_ADDRESS_LIST(C) \
+#define TOP_ADDRESS_LIST(C) \
C(handler_address) \
C(c_entry_fp_address) \
C(context_address) \
@@ -157,7 +188,10 @@ class Top {
thread_local_.pending_message_script_ = NULL;
}
static v8::TryCatch* try_catch_handler() {
- return thread_local_.try_catch_handler_;
+ return thread_local_.TryCatchHandler();
+ }
+ static Address try_catch_handler_address() {
+ return thread_local_.try_catch_handler_address();
}
// This method is called by the api after operations that may throw
// exceptions. If an exception was thrown and not handled by an external
@@ -170,6 +204,10 @@ class Top {
return &thread_local_.external_caught_exception_;
}
+ static Object** scheduled_exception_address() {
+ return &thread_local_.scheduled_exception_;
+ }
+
static Object* scheduled_exception() {
ASSERT(has_scheduled_exception());
return thread_local_.scheduled_exception_;
@@ -185,7 +223,7 @@ class Top {
thread_local_.external_caught_exception_ =
has_pending_exception() &&
(thread_local_.catcher_ != NULL) &&
- (thread_local_.try_catch_handler_ == thread_local_.catcher_);
+ (try_catch_handler() == thread_local_.catcher_);
}
// Tells whether the current context has experienced an out of memory
diff --git a/src/utils.cc b/src/utils.cc
index 3c684b81..08ee16ff 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -129,7 +129,7 @@ char* ReadLine(const char* prompt) {
}
return NULL;
}
- int len = strlen(line_buf);
+ int len = StrLength(line_buf);
if (len > 1 &&
line_buf[len - 2] == '\\' &&
line_buf[len - 1] == '\n') {
@@ -184,7 +184,7 @@ char* ReadCharsFromFile(const char* filename,
char* result = NewArray<char>(*size + extra_space);
for (int i = 0; i < *size;) {
- int read = fread(&result[i], 1, *size - i, file);
+ int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
if (read <= 0) {
fclose(file);
DeleteArray(result);
@@ -221,7 +221,7 @@ Vector<const char> ReadFile(const char* filename,
int WriteCharsToFile(const char* str, int size, FILE* f) {
int total = 0;
while (total < size) {
- int write = fwrite(str, 1, size - total, f);
+ int write = static_cast<int>(fwrite(str, 1, size - total, f));
if (write == 0) {
return total;
}
@@ -265,7 +265,7 @@ StringBuilder::StringBuilder(int size) {
void StringBuilder::AddString(const char* s) {
- AddSubstring(s, strlen(s));
+ AddSubstring(s, StrLength(s));
}
@@ -309,4 +309,13 @@ char* StringBuilder::Finalize() {
return buffer_.start();
}
+
+int TenToThe(int exponent) {
+ ASSERT(exponent <= 9);
+ ASSERT(exponent >= 1);
+ int answer = 10;
+ for (int i = 1; i < exponent; i++) answer *= 10;
+ return answer;
+}
+
} } // namespace v8::internal
diff --git a/src/utils.h b/src/utils.h
index f4a0598c..0fd24ec9 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -66,7 +66,7 @@ static inline intptr_t OffsetFrom(T x) {
// integral types.
template <typename T>
static inline T AddressFrom(intptr_t x) {
- return static_cast<T>(0) + x;
+ return static_cast<T>(static_cast<T>(0) + x);
}
@@ -137,6 +137,13 @@ static T Min(T a, T b) {
}
+inline int StrLength(const char* string) {
+ size_t length = strlen(string);
+ ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
+ return static_cast<int>(length);
+}
+
+
// ----------------------------------------------------------------------------
// BitField is a help template for encoding and decode bitfield with
// unsigned content.
@@ -449,15 +456,15 @@ class ScopedVector : public Vector<T> {
inline Vector<const char> CStrVector(const char* data) {
- return Vector<const char>(data, static_cast<int>(strlen(data)));
+ return Vector<const char>(data, StrLength(data));
}
inline Vector<char> MutableCStrVector(char* data) {
- return Vector<char>(data, static_cast<int>(strlen(data)));
+ return Vector<char>(data, StrLength(data));
}
inline Vector<char> MutableCStrVector(char* data, int max) {
- int length = static_cast<int>(strlen(data));
+ int length = StrLength(data);
return Vector<char>(data, (length < max) ? length : max);
}
@@ -577,6 +584,9 @@ static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
}
+// Calculate 10^exponent.
+int TenToThe(int exponent);
+
} } // namespace v8::internal
#endif // V8_UTILS_H_
diff --git a/src/v8-counters.h b/src/v8-counters.h
index b3f29f53..d6f53fab 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -153,8 +153,9 @@ namespace internal {
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
SC(compute_entry_frame, V8.ComputeEntryFrame) \
SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
- SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs)
-
+ SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
+ SC(string_add_runtime, V8.StringAddRuntime) \
+ SC(string_add_native, V8.StringAddNative)
// This file contains all the v8 counters that are in use.
class Counters : AllStatic {
diff --git a/src/v8.cc b/src/v8.cc
index 3c70ee96..3bec827a 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -30,12 +30,10 @@
#include "bootstrapper.h"
#include "debug.h"
#include "serialize.h"
+#include "simulator.h"
#include "stub-cache.h"
#include "oprofile-agent.h"
-
-#if V8_TARGET_ARCH_ARM
-#include "arm/simulator-arm.h"
-#endif
+#include "log.h"
namespace v8 {
namespace internal {
@@ -61,7 +59,6 @@ bool V8::Initialize(Deserializer *des) {
// Enable logging before setting up the heap
Logger::Setup();
- if (des) des->GetLog();
// Setup the platform OS support.
OS::Setup();
@@ -108,7 +105,7 @@ bool V8::Initialize(Deserializer *des) {
// Deserializing may put strange things in the root array's copy of the
// stack guard.
- Heap::SetStackLimit(StackGuard::jslimit());
+ Heap::SetStackLimits();
// Setup the CPU support. Must be done after heap setup and after
// any deserialization because we have to have the initial heap
@@ -117,6 +114,11 @@ bool V8::Initialize(Deserializer *des) {
OProfileAgent::Initialize();
+ if (FLAG_log_code) {
+ HandleScope scope;
+ LOG(LogCompiledFunctions());
+ }
+
return true;
}
diff --git a/src/v8.h b/src/v8.h
index 106ae612..b3624c5d 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -72,6 +72,8 @@
namespace v8 {
namespace internal {
+class Deserializer;
+
class V8 : public AllStatic {
public:
// Global actions.
diff --git a/src/v8natives.js b/src/v8natives.js
index 2fecee80..8f9adcbb 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -95,8 +95,8 @@ function GlobalParseInt(string, radix) {
// they make parseInt on a string 1.4% slower (274ns vs 270ns).
if (%_IsSmi(string)) return string;
if (IS_NUMBER(string) &&
- ((string < -0.01 && -1e9 < string) ||
- (0.01 < string && string < 1e9))) {
+ ((0.01 < string && string < 1e9) ||
+ (-1e9 < string && string < -0.01))) {
// Truncate number.
return string | 0;
}
@@ -196,10 +196,7 @@ $Object.prototype.constructor = $Object;
// ECMA-262 - 15.2.4.2
function ObjectToString() {
- var c = %_ClassOf(this);
- // Hide Arguments from the outside.
- if (c === 'Arguments') c = 'Object';
- return "[object " + c + "]";
+ return "[object " + %_ClassOf(this) + "]";
}
diff --git a/src/version.cc b/src/version.cc
index 54e688b0..3611d448 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -32,9 +32,9 @@
// These macros define the version number for the current version.
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
-#define MAJOR_VERSION 1
-#define MINOR_VERSION 3
-#define BUILD_NUMBER 17
+#define MAJOR_VERSION 2
+#define MINOR_VERSION 0
+#define BUILD_NUMBER 4
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION true
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 8f078ff2..9c7f9b61 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -176,7 +176,7 @@ Address Assembler::target_address_at(Address pc) {
void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::int32_at(pc) = target - pc - 4;
+ Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
CPU::FlushICache(pc, sizeof(int32_t));
}
@@ -191,13 +191,13 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
- Memory::Address_at(pc_) += delta;
+ Memory::Address_at(pc_) += static_cast<int32_t>(delta);
} else if (IsCodeTarget(rmode_)) {
- Memory::int32_at(pc_) -= delta;
+ Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
- Memory::int32_at(pc_ + 1) -= delta; // relocate entry
+ Memory::int32_at(pc_ + 1) -= static_cast<int32_t>(delta); // relocate entry
}
}
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 61e87536..2d524eaf 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -80,11 +80,15 @@ XMMRegister xmm15 = { 15 };
// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
uint64_t CpuFeatures::enabled_ = 0;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
void CpuFeatures::Probe() {
ASSERT(Heap::HasBeenSetup());
ASSERT(supported_ == kDefaultCpuFeatures);
- if (Serializer::enabled()) return; // No features if we might serialize.
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
Assembler assm(NULL, 0);
Label cpuid, done;
@@ -160,6 +164,11 @@ void CpuFeatures::Probe() {
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
+ found_by_runtime_probing_ = supported_;
+ found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
+ uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
+ supported_ |= os_guarantees;
+ found_by_runtime_probing_ &= ~os_guarantees;
// SSE2 and CMOV must be available on an X64 CPU.
ASSERT(IsSupported(CPUID));
ASSERT(IsSupported(SSE2));
@@ -337,7 +346,8 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system.
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->reloc_size =
+ static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
Counters::reloc_info_size.Increment(desc->reloc_size);
@@ -400,7 +410,8 @@ void Assembler::GrowBuffer() {
// setup new buffer
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+ desc.reloc_size =
+ static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos()));
// Clear the buffer in debug mode. Use 'int3' instructions to make
// sure to get into problems if we ever run uninitialized code.
@@ -887,7 +898,7 @@ void Assembler::cmpb_al(Immediate imm8) {
void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+ ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x0F);
@@ -2045,7 +2056,7 @@ void Assembler::fistp_s(const Operand& adr) {
void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 4f514f2a..fa7d33b1 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -37,6 +37,8 @@
#ifndef V8_X64_ASSEMBLER_X64_H_
#define V8_X64_ASSEMBLER_X64_H_
+#include "serialize.h"
+
namespace v8 {
namespace internal {
@@ -362,20 +364,11 @@ class Operand BASE_EMBEDDED {
// }
class CpuFeatures : public AllStatic {
public:
- // Feature flags bit positions. They are mostly based on the CPUID spec.
- // (We assign CPUID itself to one of the currently reserved bits --
- // feel free to change this if needed.)
- enum Feature { SSE3 = 32,
- SSE2 = 26,
- CMOV = 15,
- RDTSC = 4,
- CPUID = 10,
- SAHF = 0};
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
- static bool IsSupported(Feature f) {
+ static bool IsSupported(CpuFeature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -384,33 +377,35 @@ class CpuFeatures : public AllStatic {
return (supported_ & (V8_UINT64_C(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
- static bool IsEnabled(Feature f) {
+ static bool IsEnabled(CpuFeature f) {
return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit Scope(Feature f) {
+ explicit Scope(CpuFeature f) {
+ uint64_t mask = (V8_UINT64_C(1) << f);
ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= (V8_UINT64_C(1) << f);
+ CpuFeatures::enabled_ |= mask;
}
~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
uint64_t old_enabled_;
#else
public:
- explicit Scope(Feature f) {}
+ explicit Scope(CpuFeature f) {}
#endif
};
private:
// Safe defaults include SSE2 and CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
- static const uint64_t kDefaultCpuFeatures =
- (1 << CpuFeatures::SSE2 | 1 << CpuFeatures::CMOV);
+ static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
static uint64_t supported_;
static uint64_t enabled_;
+ static uint64_t found_by_runtime_probing_;
};
@@ -458,7 +453,25 @@ class Assembler : public Malloced {
// the relative displacements stored in the code.
static inline Address target_address_at(Address pc);
static inline void set_target_address_at(Address pc, Address target);
+
+ // This sets the branch destination (which is in the instruction on x64).
+ // This is for calls and branches within generated code.
+ inline static void set_target_at(Address instruction_payload,
+ Address target) {
+ set_target_address_at(instruction_payload, target);
+ }
+
+ // This sets the branch destination (which is a load instruction on x64).
+ // This is for calls and branches to runtime code.
+ inline static void set_external_target_at(Address instruction_payload,
+ Address target) {
+ *reinterpret_cast<Address*>(instruction_payload) = target;
+ }
+
inline Handle<Object> code_target_object_handle_at(Address pc);
+ // Number of bytes taken up by the branch target in the code.
+ static const int kCallTargetSize = 4; // Use 32-bit displacement.
+ static const int kExternalTargetSize = 8; // Use 64-bit absolute.
// Distance between the address of the code target in the call instruction
// and the return address pushed on the stack.
static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
@@ -469,6 +482,12 @@ class Assembler : public Malloced {
static const int kPatchReturnSequenceAddressOffset = 13 - 4;
// TODO(X64): Rename this, removing the "Real", after changing the above.
static const int kRealPatchReturnSequenceAddressOffset = 2;
+
+ // The x64 JS return sequence is padded with int3 to make it large
+ // enough to hold a call instruction when the debugger patches it.
+ static const int kCallInstructionLength = 13;
+ static const int kJSReturnSequenceLength = 13;
+
// ---------------------------------------------------------------------------
// Code generation
//
@@ -829,12 +848,12 @@ class Assembler : public Malloced {
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sar(Register dst) {
+ void sar_cl(Register dst) {
shift(dst, 0x7);
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sarl(Register dst) {
+ void sarl_cl(Register dst) {
shift_32(dst, 0x7);
}
@@ -842,11 +861,11 @@ class Assembler : public Malloced {
shift(dst, shift_amount, 0x4);
}
- void shl(Register dst) {
+ void shl_cl(Register dst) {
shift(dst, 0x4);
}
- void shll(Register dst) {
+ void shll_cl(Register dst) {
shift_32(dst, 0x4);
}
@@ -858,11 +877,11 @@ class Assembler : public Malloced {
shift(dst, shift_amount, 0x5);
}
- void shr(Register dst) {
+ void shr_cl(Register dst) {
shift(dst, 0x5);
}
- void shrl(Register dst) {
+ void shrl_cl(Register dst) {
shift_32(dst, 0x5);
}
@@ -920,7 +939,11 @@ class Assembler : public Malloced {
void testq(Register dst, Immediate mask);
void xor_(Register dst, Register src) {
- arithmetic_op(0x33, dst, src);
+ if (dst.code() == src.code()) {
+ arithmetic_op_32(0x33, dst, src);
+ } else {
+ arithmetic_op(0x33, dst, src);
+ }
}
void xorl(Register dst, Register src) {
@@ -1109,7 +1132,7 @@ class Assembler : public Malloced {
void RecordStatementPosition(int pos);
void WriteRecordedPositions();
- int pc_offset() const { return pc_ - buffer_; }
+ int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
int current_statement_position() const { return current_statement_position_; }
int current_position() const { return current_position_; }
@@ -1121,7 +1144,9 @@ class Assembler : public Malloced {
}
// Get the number of bytes available in the buffer.
- inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+ inline int available_space() const {
+ return static_cast<int>(reloc_info_writer.pos() - pc_);
+ }
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 01992ce4..f444d2cf 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -246,6 +246,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalIndex));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -318,47 +320,28 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(Operand(rbp, kArgumentsOffset));
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
- if (FLAG_check_stack) {
- // We need to catch preemptions right here, otherwise an unlucky preemption
- // could show up as a failed apply.
- Label retry_preemption;
- Label no_preemption;
- __ bind(&retry_preemption);
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ movq(kScratchRegister, stack_guard_limit);
- __ movq(rcx, rsp);
- __ subq(rcx, Operand(kScratchRegister, 0));
- // rcx contains the difference between the stack limit and the stack top.
- // We use it below to check that there is enough room for the arguments.
- __ j(above, &no_preemption);
-
- // Preemption!
- // Because runtime functions always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack.
- __ push(rax);
- __ Push(Smi::FromInt(0));
-
- // Do call to runtime routine.
- __ CallRuntime(Runtime::kStackGuard, 1);
- __ pop(rax);
- __ jmp(&retry_preemption);
-
- __ bind(&no_preemption);
-
- Label okay;
- // Make rdx the space we need for the array when it is unrolled onto the
- // stack.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
- __ cmpq(rcx, rdx);
- __ j(greater, &okay);
-
- // Too bad: Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- }
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movq(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subq(rcx, kScratchRegister);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmpq(rcx, rdx);
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
// Push current index and limit.
const int kLimitOffset =
@@ -400,6 +383,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 0029b747..36f0e635 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
#include "parser.h"
@@ -74,7 +75,6 @@ void DeferredCode::RestoreRegisters() {
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- typeof_state_(NOT_INSIDE_TYPEOF),
destination_(NULL),
previous_(NULL) {
owner_->set_state(this);
@@ -82,10 +82,8 @@ CodeGenState::CodeGenState(CodeGenerator* owner)
CodeGenState::CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
ControlDestination* destination)
: owner_(owner),
- typeof_state_(typeof_state),
destination_(destination),
previous_(owner->state()) {
owner_->set_state(this);
@@ -507,13 +505,13 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// Add padding that will be overwritten by a debugger breakpoint.
// frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
// with length 7 (3 + 1 + 3).
- const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+ const int kPadding = Assembler::kJSReturnSequenceLength - 7;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
// Check that the size of the code used for returning matches what is
// expected by the debugger.
- ASSERT_EQ(Debug::kX64JSReturnSequenceLength,
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
DeleteFrame();
@@ -643,27 +641,6 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#ifdef DEBUG
- void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
void CodeGenerator::CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
@@ -676,7 +653,7 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Load the apply function onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Reference ref(this, apply);
- ref.GetValue(NOT_INSIDE_TYPEOF);
+ ref.GetValue();
ASSERT(ref.type() == Reference::NAMED);
// Load the receiver and the existing arguments object onto the
@@ -852,12 +829,10 @@ void DeferredStackCheck::Generate() {
void CodeGenerator::CheckStack() {
- if (FLAG_check_stack) {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- deferred->Branch(below);
- deferred->BindExit();
- }
+ DeferredStackCheck* deferred = new DeferredStackCheck;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ deferred->Branch(below);
+ deferred->BindExit();
}
@@ -1003,7 +978,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
JumpTarget then;
JumpTarget else_;
ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The else target was bound, so we compile the else part first.
@@ -1030,7 +1005,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_else_stm);
JumpTarget then;
ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The exit label was bound. We may have dangling jumps to the
@@ -1050,7 +1025,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_then_stm);
JumpTarget else_;
ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.true_was_fall_through()) {
// The exit label was bound. We may have dangling jumps to the
@@ -1072,7 +1047,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
// or control flow effect). LoadCondition is called without
// forcing control flow.
ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->condition(), &dest, false);
if (!dest.is_used()) {
// We got a value on the frame rather than (or in addition to)
// control flow.
@@ -1343,8 +1318,10 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
node->continue_target()->Bind();
}
if (has_valid_frame()) {
+ Comment cmnt(masm_, "[ DoWhileCondition");
+ CodeForDoWhileConditionPosition(node);
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@@ -1401,7 +1378,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
if (dest.false_was_fall_through()) {
// If we got the break target as fall-through, the test may have
@@ -1448,7 +1425,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
// The break target is the fall-through (body is a backward
// jump from here and thus an invalid fall-through).
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
} else {
// If we have chosen not to recompile the test at the
@@ -1540,7 +1517,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
if (dest.false_was_fall_through()) {
// If we got the break target as fall-through, the test may have
@@ -1610,7 +1587,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// The break target is the fall-through (body is a backward
// jump from here).
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
} else {
// Otherwise, jump back to the test at the top.
@@ -1685,8 +1662,54 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
// rax: value to be iterated over
- frame_->EmitPush(rax); // push the object being iterated over (slot 4)
+ frame_->EmitPush(rax); // Push the object being iterated over.
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ JumpTarget call_runtime;
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ JumpTarget check_prototype;
+ JumpTarget use_cache;
+ __ movq(rcx, rax);
+ loop.Bind();
+ // Check that there are no elements.
+ __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
+ __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
+ call_runtime.Branch(not_equal);
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in ebx for the subsequent
+ // prototype load.
+ __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+ __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
+ call_runtime.Branch(equal);
+ // Check that there in an enum cache in the non-empty instance
+ // descriptors. This is the case if the next enumeration index
+ // field does not contain a smi.
+ __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
+ is_smi = masm_->CheckSmi(rdx);
+ call_runtime.Branch(is_smi);
+ // For all objects but the receiver, check that the cache is empty.
+ __ cmpq(rcx, rax);
+ check_prototype.Branch(equal);
+ __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
+ call_runtime.Branch(not_equal);
+ check_prototype.Bind();
+ // Load the prototype from the map and loop if non-null.
+ __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ CompareRoot(rcx, Heap::kNullValueRootIndex);
+ loop.Branch(not_equal);
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ use_cache.Jump();
+
+ call_runtime.Bind();
+ // Call the runtime to get the property names for the object.
frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
@@ -1699,8 +1722,11 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ CompareRoot(rcx, Heap::kMetaMapRootIndex);
fixed_array.Branch(not_equal);
+ use_cache.Bind();
// Get enum cache
- // rax: map (result from call to Runtime::kGetPropertyNamesFast)
+ // rax: map (either the result from a call to
+ // Runtime::kGetPropertyNamesFast or has been fetched directly from
+ // the object)
__ movq(rcx, rax);
__ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
// Get the bridge array held in the enumeration index field.
@@ -2190,7 +2216,8 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(node, script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
InstantiateBoilerplate(boilerplate);
@@ -2210,25 +2237,25 @@ void CodeGenerator::VisitConditional(Conditional* node) {
JumpTarget else_;
JumpTarget exit;
ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The else target was bound, so we compile the else part first.
- Load(node->else_expression(), typeof_state());
+ Load(node->else_expression());
if (then.is_linked()) {
exit.Jump();
then.Bind();
- Load(node->then_expression(), typeof_state());
+ Load(node->then_expression());
}
} else {
// The then target was bound, so we compile the then part first.
- Load(node->then_expression(), typeof_state());
+ Load(node->then_expression());
if (else_.is_linked()) {
exit.Jump();
else_.Bind();
- Load(node->else_expression(), typeof_state());
+ Load(node->else_expression());
}
}
@@ -2238,7 +2265,7 @@ void CodeGenerator::VisitConditional(Conditional* node) {
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, typeof_state());
+ LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
}
@@ -2251,7 +2278,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
} else {
ASSERT(var->is_global());
Reference ref(this, node);
- ref.GetValue(typeof_state());
+ ref.GetValue();
}
}
@@ -2642,9 +2669,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
// the target, with an implicit promise that it will be written to again
// before it is read.
if (literal != NULL || (right_var != NULL && right_var != var)) {
- target.TakeValue(NOT_INSIDE_TYPEOF);
+ target.TakeValue();
} else {
- target.GetValue(NOT_INSIDE_TYPEOF);
+ target.GetValue();
}
Load(node->value());
GenericBinaryOperation(node->binary_op(),
@@ -2692,7 +2719,7 @@ void CodeGenerator::VisitThrow(Throw* node) {
void CodeGenerator::VisitProperty(Property* node) {
Comment cmnt(masm_, "[ Property");
Reference property(this, node);
- property.GetValue(typeof_state());
+ property.GetValue();
}
@@ -2878,7 +2905,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Load the function to call from the property through a reference.
Reference ref(this, property);
- ref.GetValue(NOT_INSIDE_TYPEOF);
+ ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
@@ -2984,9 +3011,6 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- // Note that because of NOT and an optimization in comparison of a typeof
- // expression to a literal string, this function can fail to leave a value
- // on top of the frame or in the cc register.
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
@@ -2995,7 +3019,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
// Swap the true and false targets but keep the same actual label
// as the fall through.
destination()->Invert();
- LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+ LoadCondition(node->expression(), destination(), true);
// Swap the labels back.
destination()->Invert();
@@ -3235,7 +3259,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
if (!is_postfix) frame_->Push(Smi::FromInt(0));
return;
}
- target.TakeValue(NOT_INSIDE_TYPEOF);
+ target.TakeValue();
Result new_value = frame_->Pop();
new_value.ToRegister();
@@ -3293,9 +3317,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// TODO(X64): This code was copied verbatim from codegen-ia32.
// Either find a reason to change it or move it to a shared location.
- // Note that due to an optimization in comparison operations (typeof
- // compared to a string literal), we can evaluate a binary expression such
- // as AND or OR and not leave a value on the frame or in the cc register.
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
@@ -3311,7 +3332,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
if (op == Token::AND) {
JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->left(), &dest, false);
if (dest.false_was_fall_through()) {
// The current false target was used as the fall-through. If
@@ -3330,7 +3351,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
is_true.Bind();
// The left subexpression compiled to control flow, so the
// right one is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have actually just jumped to or bound the current false
// target but the current control destination is not marked as
@@ -3341,7 +3362,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (dest.is_used()) {
// The left subexpression compiled to control flow (and is_true
// was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have a materialized value on the frame, so we exit with
@@ -3374,7 +3395,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (op == Token::OR) {
JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->left(), &dest, false);
if (dest.true_was_fall_through()) {
// The current true target was used as the fall-through. If
@@ -3393,7 +3414,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
is_false.Bind();
// The left subexpression compiled to control flow, so the
// right one is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have just jumped to or bound the current true target but
// the current control destination is not marked as used.
@@ -3403,7 +3424,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (dest.is_used()) {
// The left subexpression compiled to control flow (and is_false
// was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have a materialized value on the frame, so we exit with
@@ -3525,6 +3546,9 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->false_target()->Branch(is_smi);
frame_->Spill(answer.reg());
__ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+ destination()->true_target()->Branch(equal);
+ // Regular expressions are callable so typeof == 'function'.
+ __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
answer.Unuse();
destination()->Split(equal);
@@ -3534,9 +3558,11 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
__ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
destination()->true_target()->Branch(equal);
+ // Regular expressions are typeof == 'function', not 'object'.
+ __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
+ destination()->false_target()->Branch(equal);
+
// It can be an undetectable object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
destination()->false_target()->Branch(not_zero);
@@ -3639,6 +3665,48 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+
+ __ Move(kScratchRegister, Factory::null_value());
+ __ cmpq(obj.reg(), kScratchRegister);
+ destination()->true_target()->Branch(equal);
+
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
+ destination()->false_target()->Branch(less);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ obj.Unuse();
+ destination()->Split(less_equal);
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+ obj.Unuse();
+ destination()->Split(equal);
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -3681,7 +3749,6 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Label slow_case;
Label end;
Label not_a_flat_string;
- Label a_cons_string;
Label try_again_with_new_string;
Label ascii_string;
Label got_char_code;
@@ -3749,30 +3816,19 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ testb(rcx, Immediate(kIsNotStringMask));
__ j(not_zero, &slow_case);
- // Here we make assumptions about the tag values and the shifts needed.
- // See the comment in objects.h.
- ASSERT(kLongStringTag == 0);
- ASSERT(kMediumStringTag + String::kLongLengthShift ==
- String::kMediumLengthShift);
- ASSERT(kShortStringTag + String::kLongLengthShift ==
- String::kShortLengthShift);
- __ and_(rcx, Immediate(kStringSizeMask));
- __ addq(rcx, Immediate(String::kLongLengthShift));
- // Fetch the length field into the temporary register.
- __ movl(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
- __ shrl(temp.reg()); // The shift amount in ecx is implicit operand.
// Check for index out of range.
- __ cmpl(index.reg(), temp.reg());
+ __ cmpl(index.reg(), FieldOperand(object.reg(), String::kLengthOffset));
__ j(greater_equal, &slow_case);
// Reload the instance type (into the temp register this time)..
__ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
__ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
// We need special handling for non-flat strings.
- ASSERT(kSeqStringTag == 0);
+ ASSERT_EQ(0, kSeqStringTag);
__ testb(temp.reg(), Immediate(kStringRepresentationMask));
__ j(not_zero, &not_a_flat_string);
// Check for 1-byte or 2-byte string.
+ ASSERT_EQ(0, kTwoByteStringTag);
__ testb(temp.reg(), Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
@@ -3799,21 +3855,16 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ bind(&not_a_flat_string);
__ and_(temp.reg(), Immediate(kStringRepresentationMask));
__ cmpb(temp.reg(), Immediate(kConsStringTag));
- __ j(equal, &a_cons_string);
- __ cmpb(temp.reg(), Immediate(kSlicedStringTag));
__ j(not_equal, &slow_case);
- // SlicedString.
- // Add the offset to the index and trigger the slow case on overflow.
- __ addl(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
- __ j(overflow, &slow_case);
- // Getting the underlying string is done by running the cons string code.
-
// ConsString.
- __ bind(&a_cons_string);
- // Get the first of the two strings. Both sliced and cons strings
- // store their source string at the same offset.
- ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+ // Check that the right hand side is the empty string (ie if this is really a
+ // flat string in a cons string). If that is not the case we would rather go
+ // to the runtime system now, to flatten the string.
+ __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
+ __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex);
+ __ j(not_equal, &slow_case);
+ // Get the first of the two strings.
__ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
__ jmp(&try_again_with_new_string);
@@ -3994,6 +4045,17 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ Result answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+ frame_->Push(&answer);
+}
+
+
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
JumpTarget leave, null, function, non_function_constructor;
@@ -4124,18 +4186,17 @@ void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
// -----------------------------------------------------------------------------
// CodeGenerator implementation of Expressions
-void CodeGenerator::LoadAndSpill(Expression* expression,
- TypeofState typeof_state) {
+void CodeGenerator::LoadAndSpill(Expression* expression) {
// TODO(x64): No architecture specific code. Move to shared location.
ASSERT(in_spilled_code());
set_in_spilled_code(false);
- Load(expression, typeof_state);
+ Load(expression);
frame_->SpillAll();
set_in_spilled_code(true);
}
-void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
@@ -4143,7 +4204,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
JumpTarget true_target;
JumpTarget false_target;
ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(x, typeof_state, &dest, false);
+ LoadCondition(expr, &dest, false);
if (dest.false_was_fall_through()) {
// The false target was just bound.
@@ -4203,13 +4264,12 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
// partially compiled) into control flow to the control destination.
// If force_control is true, control flow is forced.
void CodeGenerator::LoadCondition(Expression* x,
- TypeofState typeof_state,
ControlDestination* dest,
bool force_control) {
ASSERT(!in_spilled_code());
int original_height = frame_->height();
- { CodeGenState new_state(this, typeof_state, dest);
+ { CodeGenState new_state(this, dest);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@@ -4837,23 +4897,25 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
}
-// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
-// variables w/o reference errors elsewhere.
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
- Variable* variable = x->AsVariableProxy()->AsVariable();
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // NOTE: This is somewhat nasty. We force the compiler to load
- // the variable as if through '<global>.<variable>' to make sure we
- // do not get reference errors.
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
Literal key(variable->name());
- // TODO(1241834): Fetch the position from the variable instead of using
- // no position.
Property property(&global, &key, RelocInfo::kNoPosition);
- Load(&property);
+ Reference ref(this, &property);
+ ref.GetValue();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
} else {
- Load(x, INSIDE_TYPEOF);
+ // Anything else can be handled normally.
+ Load(expr);
}
}
@@ -5057,10 +5119,8 @@ class DeferredInlineBinaryOperation: public DeferredCode {
void DeferredInlineBinaryOperation::Generate() {
- __ push(left_);
- __ push(right_);
- GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
- __ CallStub(&stub);
+ GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
@@ -5089,16 +5149,16 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
// Bit operations always assume they likely operate on Smis. Still only
// generate the inline Smi check code if this operation is part of a loop.
flags = (loop_nesting() > 0)
- ? SMI_CODE_INLINED
- : SMI_CODE_IN_STUB;
+ ? NO_SMI_CODE_IN_STUB
+ : NO_GENERIC_BINARY_FLAGS;
break;
default:
// By default only inline the Smi check code for likely smis if this
// operation is part of a loop.
flags = ((loop_nesting() > 0) && type->IsLikelySmi())
- ? SMI_CODE_INLINED
- : SMI_CODE_IN_STUB;
+ ? NO_SMI_CODE_IN_STUB
+ : NO_GENERIC_BINARY_FLAGS;
break;
}
@@ -5157,7 +5217,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
return;
}
- if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+ if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
} else {
frame_->Push(&left);
@@ -5166,7 +5226,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
// that does not check for the fast smi case.
// The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
if (generate_no_smi_code) {
- flags = SMI_CODE_INLINED;
+ flags = NO_SMI_CODE_IN_STUB;
}
GenericBinaryOpStub stub(op, overwrite_mode, flags);
Result answer = frame_->CallStub(&stub, 2);
@@ -5221,41 +5281,33 @@ void DeferredReferenceGetNamedValue::Generate() {
void DeferredInlineSmiAdd::Generate() {
- __ push(dst_);
- __ Push(value_);
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
void DeferredInlineSmiAddReversed::Generate() {
- __ Push(value_);
- __ push(dst_);
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, value_, dst_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
void DeferredInlineSmiSub::Generate() {
- __ push(dst_);
- __ Push(value_);
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
void DeferredInlineSmiOperation::Generate() {
- __ push(src_);
- __ Push(value_);
// For mod we don't generate all the Smi code inline.
GenericBinaryOpStub stub(
op_,
overwrite_mode_,
- (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
- __ CallStub(&stub);
+ (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, src_, value_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
@@ -5758,7 +5810,7 @@ Handle<String> Reference::GetName() {
}
-void Reference::GetValue(TypeofState typeof_state) {
+void Reference::GetValue() {
ASSERT(!cgen_->in_spilled_code());
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
@@ -5775,17 +5827,11 @@ void Reference::GetValue(TypeofState typeof_state) {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
+ cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
- // TODO(1241834): Make sure that it is safe to ignore the
- // distinction between expressions in a typeof and not in a
- // typeof. If there is a chance that reference errors can be
- // thrown below, we must distinguish between the two kinds of
- // loads (typeof expression loads must not throw a reference
- // error).
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
@@ -5867,8 +5913,6 @@ void Reference::GetValue(TypeofState typeof_state) {
}
case KEYED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof.
Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
@@ -5990,7 +6034,7 @@ void Reference::GetValue(TypeofState typeof_state) {
}
-void Reference::TakeValue(TypeofState typeof_state) {
+void Reference::TakeValue() {
// TODO(X64): This function is completely architecture independent. Move
// it somewhere shared.
@@ -5999,7 +6043,7 @@ void Reference::TakeValue(TypeofState typeof_state) {
ASSERT(!cgen_->in_spilled_code());
ASSERT(!is_illegal());
if (type_ != SLOT) {
- GetValue(typeof_state);
+ GetValue();
return;
}
@@ -6009,7 +6053,7 @@ void Reference::TakeValue(TypeofState typeof_state) {
slot->type() == Slot::CONTEXT ||
slot->var()->mode() == Variable::CONST ||
slot->is_arguments()) {
- GetValue(typeof_state);
+ GetValue();
return;
}
@@ -6179,11 +6223,8 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
// String value => false iff empty.
__ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
__ j(above_equal, &not_string);
- __ and_(rcx, Immediate(kStringSizeMask));
- __ cmpq(rcx, Immediate(kShortStringTag));
- __ j(not_equal, &true_result); // Empty string is always short.
__ movl(rdx, FieldOperand(rax, String::kLengthOffset));
- __ shr(rdx, Immediate(String::kShortLengthShift));
+ __ testl(rdx, rdx);
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -6379,19 +6420,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
// not NaN.
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
- // Read double representation into rax.
- __ movq(rbx, V8_UINT64_C(0x7ff0000000000000), RelocInfo::NONE);
- __ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Test that exponent bits are all set.
- __ or_(rbx, rax);
- __ cmpq(rbx, rax);
- __ j(not_equal, &return_equal);
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ shl(rax, Immediate(12));
- // If all bits in the mantissa are zero the number is Infinity, and
- // we return zero. Otherwise it is a NaN, and we return non-zero.
- // We cannot just return rax because only eax is tested on return.
- __ setcc(not_zero, rax);
+ // We only allow QNaNs, which have bit 51 set (which also rules out
+ // the value being Infinity).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+ __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
+ __ xorl(rax, rax);
+ __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
+ __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
+ __ setcc(above_equal, rax);
__ ret(0);
__ bind(&not_identical);
@@ -6614,11 +6654,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ jmp(&loop);
__ bind(&is_instance);
- __ xor_(rax, rax);
+ __ xorl(rax, rax);
__ ret(2 * kPointerSize);
__ bind(&is_not_instance);
- __ Move(rax, Smi::FromInt(1));
+ __ movl(rax, Immediate(1));
__ ret(2 * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
@@ -6784,7 +6824,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
@@ -6854,7 +6894,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// If return value is on the stack, pop it to registers.
if (result_size_ > 1) {
ASSERT_EQ(2, result_size_);
- // Position above 4 argument mirrors and arguments object.
+ // Read result values stored on stack. Result is stored
+ // above the four argument mirror slots and the two
+ // Arguments object slots.
__ movq(rax, Operand(rsp, 6 * kPointerSize));
__ movq(rdx, Operand(rsp, 7 * kPointerSize));
}
@@ -6865,7 +6907,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(frame_type, result_size_);
+ __ LeaveExitFrame(mode, result_size_);
__ ret(0);
// Handling of failure.
@@ -6995,12 +7037,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// this by performing a garbage collection and retrying the
// builtin once.
- StackFrame::Type frame_type = is_debug_break ?
- StackFrame::EXIT_DEBUG :
- StackFrame::EXIT;
+ ExitFrame::Mode mode = is_debug_break ?
+ ExitFrame::MODE_DEBUG :
+ ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(frame_type, result_size_);
+ __ EnterExitFrame(mode, result_size_);
// rax: Holds the context at this point, but should not be used.
// On entry to code generated by GenerateCore, it must hold
@@ -7023,7 +7065,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
false,
false);
@@ -7032,7 +7074,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
false);
@@ -7043,7 +7085,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
true);
@@ -7058,6 +7100,11 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
}
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ UNREACHABLE();
+}
+
+
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
#ifdef ENABLE_LOGGING_AND_PROFILING
@@ -7340,6 +7387,127 @@ const char* GenericBinaryOpStub::GetName() {
}
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ SetArgsReversed();
+ } else {
+ __ xchg(left, right);
+ }
+ } else if (left.is(left_arg)) {
+ __ movq(right_arg, right);
+ } else if (left.is(right_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying left argument.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ } else if (right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying right argument.
+ __ movq(right_arg, right);
+ __ movq(left_arg, left);
+ }
+ } else if (right.is(right_arg)) {
+ __ movq(left_arg, left);
+ } else {
+ // Order of moves is not important.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Smi* right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ Push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (left.is(left_arg)) {
+ __ Move(right_arg, right);
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ Move(left_arg, right);
+ SetArgsReversed();
+ } else {
+ __ movq(left_arg, left);
+ __ Move(right_arg, right);
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Smi* left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ Push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (right.is(right_arg)) {
+ __ Move(left_arg, left);
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ Move(right_arg, left);
+ SetArgsReversed();
+ } else {
+ __ Move(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (rax <op> rbx) and
// leave result in register rax.
@@ -7412,22 +7580,21 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
- if (flags_ == SMI_CODE_IN_STUB) {
+ if (HasSmiCodeInStub()) {
// The fast case smi code wasn't inlined in the stub caller
// code. Generate it here to speed up common operations.
Label slow;
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
__ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
GenerateSmiCode(masm, &slow);
- __ ret(2 * kPointerSize); // remove both operands
+ GenerateReturn(masm);
// Too bad. The fast case smi code didn't succeed.
__ bind(&slow);
}
- // Setup registers.
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // get y
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // get x
+ // Make sure the arguments are in rdx and rax.
+ GenerateLoadArguments(masm);
// Floating point case.
switch (op_) {
@@ -7451,7 +7618,10 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
- __ AllocateHeapNumber(rax, rcx, &call_runtime);
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rax, rbx);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
@@ -7467,7 +7637,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default: UNREACHABLE();
}
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
}
case Token::MOD: {
// For MOD we go directly to runtime in the non-smi case.
@@ -7492,7 +7662,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
if (use_sse3_) {
// Truncate the operands to 32-bit integers and check for
// exceptions in doing so.
- CpuFeatures::Scope scope(CpuFeatures::SSE3);
+ CpuFeatures::Scope scope(SSE3);
__ fisttp_s(Operand(rsp, 0 * kPointerSize));
__ fisttp_s(Operand(rsp, 1 * kPointerSize));
__ fnstsw_ax();
@@ -7521,9 +7691,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl(rax); break;
- case Token::SHL: __ shll(rax); break;
- case Token::SHR: __ shrl(rax); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: __ shrl_cl(rax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
@@ -7535,7 +7705,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ JumpIfNotValidSmiValue(rax, &non_smi_result);
// Tag smi result, if possible, and return.
__ Integer32ToSmi(rax, rax);
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
if (op_ != Token::SHR && non_smi_result.is_linked()) {
@@ -7561,7 +7731,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ movq(Operand(rsp, 1 * kPointerSize), rbx);
__ fild_s(Operand(rsp, 1 * kPointerSize));
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
}
// Clear the FPU exception flag and reset the stack before calling
@@ -7592,12 +7762,62 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
// If all else fails, use the runtime system to get the correct
- // result.
+ // result. If arguments was passed in registers now place them on the
+ // stack in the correct order below the return address.
__ bind(&call_runtime);
+ if (HasArgumentsInRegisters()) {
+ __ pop(rcx);
+ if (HasArgumentsReversed()) {
+ __ push(rax);
+ __ push(rdx);
+ } else {
+ __ push(rdx);
+ __ push(rax);
+ }
+ __ push(rcx);
+ }
switch (op_) {
- case Token::ADD:
+ case Token::ADD: {
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1;
+ Condition is_smi;
+ Result answer;
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // First argument.
+ __ movq(rax, Operand(rsp, 1 * kPointerSize)); // Second argument.
+ is_smi = masm->CheckSmi(rdx);
+ __ j(is_smi, &not_string1);
+ __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
+ __ j(above_equal, &not_string1);
+
+ // First argument is a a string, test second.
+ is_smi = masm->CheckSmi(rax);
+ __ j(is_smi, &string1);
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings.
+ Runtime::Function* f = Runtime::FunctionForId(Runtime::kStringAdd);
+ __ TailCallRuntime(ExternalReference(f), 2, f->result_size);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ is_smi = masm->CheckSmi(rax);
+ __ j(is_smi, &not_strings);
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
+ __ j(above_equal, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(&not_strings);
+ // Neither argument is a string.
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
+ }
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
break;
@@ -7634,6 +7854,26 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+ // If arguments are not passed in registers read them from the stack.
+ if (!HasArgumentsInRegisters()) {
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+ // If arguments are not passed in registers remove them from the stack before
+ // returning.
+ if (!HasArgumentsInRegisters()) {
+ __ ret(2 * kPointerSize); // Remove both operands
+ } else {
+ __ ret(0);
+ }
+}
+
+
int CompareStub::MinorKey() {
// Encode the two parameters in a unique 16 bit value.
ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
@@ -7653,7 +7893,7 @@ ModuloFunction CreateModuloFunction() {
&actual_size,
true));
CHECK(buffer);
- Assembler masm(buffer, actual_size);
+ Assembler masm(buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 56b88b74..8539884a 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -77,12 +77,12 @@ class Reference BASE_EMBEDDED {
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
- void GetValue(TypeofState typeof_state);
+ void GetValue();
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
- void TakeValue(TypeofState typeof_state);
+ void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
@@ -241,28 +241,20 @@ class CodeGenState BASE_EMBEDDED {
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
- // state. The new state may or may not be inside a typeof, and has its
- // own control destination.
- CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
- ControlDestination* destination);
+ // state. The new state has its own control destination.
+ CodeGenState(CodeGenerator* owner, ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
// Accessors for the state.
- TypeofState typeof_state() const { return typeof_state_; }
ControlDestination* destination() const { return destination_; }
private:
// The owning code generator.
CodeGenerator* owner_;
- // A flag indicating whether we are compiling the immediate subexpression
- // of a typeof expression.
- TypeofState typeof_state_;
-
// A control destination in case the expression has a control-flow
// effect.
ControlDestination* destination_;
@@ -307,17 +299,12 @@ class CodeGenerator: public AstVisitor {
static bool ShouldGenerateLog(Expression* type);
#endif
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
static void RecordPositions(MacroAssembler* masm, int pos);
// Accessors
MacroAssembler* masm() { return masm_; }
-
VirtualFrame* frame() const { return frame_; }
+ Handle<Script> script() { return script_; }
bool has_valid_frame() const { return frame_ != NULL; }
@@ -353,7 +340,6 @@ class CodeGenerator: public AstVisitor {
bool is_eval() { return is_eval_; }
// State
- TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
@@ -414,18 +400,16 @@ class CodeGenerator: public AstVisitor {
}
void LoadCondition(Expression* x,
- TypeofState typeof_state,
ControlDestination* destination,
bool force_control);
- void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression,
- TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void LoadAndSpill(Expression* expression);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
@@ -511,8 +495,6 @@ class CodeGenerator: public AstVisitor {
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
- static Handle<Code> ComputeLazyCompile(int argc);
- Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@@ -528,6 +510,8 @@ class CodeGenerator: public AstVisitor {
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsFunction(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -560,6 +544,9 @@ class CodeGenerator: public AstVisitor {
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
+ // Fast support for StringAdd.
+ void GenerateStringAdd(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -574,6 +561,7 @@ class CodeGenerator: public AstVisitor {
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* node);
+ void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -633,6 +621,25 @@ class CodeGenerator: public AstVisitor {
// times by generated code to perform common tasks, often the slow
// case of a JavaScript operation. They are all subclasses of CodeStub,
// which is declared in code-stubs.h.
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#ifdef DEBUG
+ void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
class ToBooleanStub: public CodeStub {
@@ -647,11 +654,10 @@ class ToBooleanStub: public CodeStub {
};
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
- SMI_CODE_IN_STUB,
- SMI_CODE_INLINED
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
};
@@ -660,45 +666,82 @@ class GenericBinaryOpStub: public CodeStub {
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
- : op_(op), mode_(mode), flags_(flags) {
- use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
+ : op_(op),
+ mode_(mode),
+ flags_(flags),
+ args_in_registers_(false),
+ args_reversed_(false) {
+ use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ // Generate code to call the stub with the supplied arguments. This will add
+ // code at the call site to prepare arguments either in registers or on the
+ // stack together with the actual call.
+ void GenerateCall(MacroAssembler* masm, Register left, Register right);
+ void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+ void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
private:
Token::Value op_;
OverwriteMode mode_;
GenericBinaryFlags flags_;
+ bool args_in_registers_; // Arguments passed in registers not on the stack.
+ bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
const char* GetName();
#ifdef DEBUG
void Print() {
- PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ PrintF("GenericBinaryOpStub (op %s), "
+ "(mode %d, flags %d, registers %d, reversed %d)\n",
Token::String(op_),
static_cast<int>(mode_),
- static_cast<int>(flags_));
+ static_cast<int>(flags_),
+ static_cast<int>(args_in_registers_),
+ static_cast<int>(args_reversed_));
}
#endif
- // Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
+ // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 12> {};
- class SSE3Bits: public BitField<bool, 14, 1> {};
+ class OpBits: public BitField<Token::Value, 2, 10> {};
+ class SSE3Bits: public BitField<bool, 12, 1> {};
+ class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
+ class ArgsReversedBits: public BitField<bool, 14, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | SSE3Bits::encode(use_sse3_);
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_)
+ | SSE3Bits::encode(use_sse3_)
+ | ArgsInRegistersBits::encode(args_in_registers_)
+ | ArgsReversedBits::encode(args_reversed_);
}
+
void Generate(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+
+ bool ArgsInRegistersSupported() {
+ return ((op_ == Token::ADD) || (op_ == Token::SUB)
+ || (op_ == Token::MUL) || (op_ == Token::DIV))
+ && flags_ != NO_SMI_CODE_IN_STUB;
+ }
+ bool IsOperationCommutative() {
+ return (op_ == Token::ADD) || (op_ == Token::MUL);
+ }
+
+ void SetArgsInRegisters() { args_in_registers_ = true; }
+ void SetArgsReversed() { args_reversed_ = true; }
+ bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+ bool HasArgumentsInRegisters() { return args_in_registers_; }
+ bool HasArgumentsReversed() { return args_reversed_; }
};
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index 8df0ab7e..cc20c58a 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -27,6 +27,10 @@
// CPU specific code for x64 independent of OS goes here.
+#ifdef __GNUC__
+#include "third_party/valgrind/valgrind.h"
+#endif
+
#include "v8.h"
#include "cpu.h"
@@ -49,6 +53,15 @@ void CPU::FlushICache(void* start, size_t size) {
// If flushing of the instruction cache becomes necessary Windows has the
// API function FlushInstructionCache.
+
+ // By default, valgrind only checks the stack for writes that might need to
+ // invalidate already cached translated code. This leads to random
+ // instability when code patches or moves are sometimes unnoticed. One
+ // solution is to run valgrind with --smc-check=all, but this comes at a big
+ // performance cost. We can notify valgrind to invalidate its cache.
+#ifdef VALGRIND_DISCARD_TRANSLATIONS
+ VALGRIND_DISCARD_TRANSLATIONS(start, size);
+#endif
}
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 49240b40..bc88d466 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -181,7 +181,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
- Debug::kX64JSReturnSequenceLength);
+ Assembler::kJSReturnSequenceLength);
}
@@ -191,9 +191,10 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Debug::kX64JSReturnSequenceLength >= Debug::kX64CallInstructionLength);
+ ASSERT(Assembler::kJSReturnSequenceLength >=
+ Assembler::kCallInstructionLength);
rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
- Debug::kX64JSReturnSequenceLength - Debug::kX64CallInstructionLength);
+ Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 19bcf663..0b43e766 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -218,7 +218,7 @@ void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
OperandType op_order = bm[i].op_order_;
id->op_order_ =
static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
- assert(id->type == NO_INSTR); // Information not already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
id->type = type;
id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
}
@@ -232,7 +232,7 @@ void InstructionTable::SetTableRange(InstructionType type,
const char* mnem) {
for (byte b = start; b <= end; b++) {
InstructionDesc* id = &instructions_[b];
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
id->mnem = mnem;
id->type = type;
id->byte_size_operation = byte_size;
@@ -243,7 +243,7 @@ void InstructionTable::SetTableRange(InstructionType type,
void InstructionTable::AddJumpConditionalShort() {
for (byte b = 0x70; b <= 0x7F; b++) {
InstructionDesc* id = &instructions_[b];
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
id->mnem = NULL; // Computed depending on condition code.
id->type = JUMP_CONDITIONAL_SHORT_INSTR;
}
@@ -393,6 +393,7 @@ class DisassemblerX64 {
RegisterNameMapping register_name);
int PrintRightOperand(byte* modrmp);
int PrintRightByteOperand(byte* modrmp);
+ int PrintRightXMMOperand(byte* modrmp);
int PrintOperands(const char* mnem,
OperandType op_order,
byte* data);
@@ -400,13 +401,15 @@ class DisassemblerX64 {
int PrintImmediateOp(byte* data);
const char* TwoByteMnemonic(byte opcode);
int TwoByteOpcodeInstruction(byte* data);
- int F7Instruction(byte* data);
+ int F6F7Instruction(byte* data);
int ShiftInstruction(byte* data);
int JumpShort(byte* data);
int JumpConditional(byte* data);
int JumpConditionalShort(byte* data);
int SetCC(byte* data);
int FPUInstruction(byte* data);
+ int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
+ int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
void AppendToBuffer(const char* format, ...);
void UnimplementedInstruction() {
@@ -568,6 +571,12 @@ int DisassemblerX64::PrintRightByteOperand(byte* modrmp) {
}
+int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerX64::NameOfXMMRegister);
+}
+
+
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
int DisassemblerX64::PrintOperands(const char* mnem,
@@ -648,8 +657,8 @@ int DisassemblerX64::PrintImmediateOp(byte* data) {
// Returns number of bytes used, including *data.
-int DisassemblerX64::F7Instruction(byte* data) {
- assert(*data == 0xF7);
+int DisassemblerX64::F6F7Instruction(byte* data) {
+ ASSERT(*data == 0xF7 || *data == 0xF6);
byte modrm = *(data + 1);
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
@@ -676,19 +685,12 @@ int DisassemblerX64::F7Instruction(byte* data) {
operand_size_code(),
NameOfCPURegister(rm));
return 2;
- } else if (mod == 3 && regop == 0) {
- int32_t imm = *reinterpret_cast<int32_t*>(data + 2);
- AppendToBuffer("test%c %s,0x%x",
- operand_size_code(),
- NameOfCPURegister(rm),
- imm);
- return 6;
} else if (regop == 0) {
AppendToBuffer("test%c ", operand_size_code());
- int count = PrintRightOperand(data + 1);
- int32_t imm = *reinterpret_cast<int32_t*>(data + 1 + count);
- AppendToBuffer(",0x%x", imm);
- return 1 + count + 4 /*int32_t*/;
+ int count = PrintRightOperand(data + 1); // Use name of 64-bit register.
+ AppendToBuffer(",0x");
+ count += PrintImmediate(data + 1 + count, operand_size());
+ return 1 + count;
} else {
UnimplementedInstruction();
return 2;
@@ -739,7 +741,7 @@ int DisassemblerX64::ShiftInstruction(byte* data) {
UnimplementedInstruction();
return num_bytes;
}
- assert(mnem != NULL);
+ ASSERT_NE(NULL, mnem);
if (op == 0xD0) {
imm8 = 1;
} else if (op == 0xC0) {
@@ -762,7 +764,7 @@ int DisassemblerX64::ShiftInstruction(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerX64::JumpShort(byte* data) {
- assert(*data == 0xEB);
+ ASSERT_EQ(0xEB, *data);
byte b = *(data + 1);
byte* dest = data + static_cast<int8_t>(b) + 2;
AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -772,7 +774,7 @@ int DisassemblerX64::JumpShort(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerX64::JumpConditional(byte* data) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
const char* mnem = conditional_code_suffix[cond];
@@ -794,7 +796,7 @@ int DisassemblerX64::JumpConditionalShort(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerX64::SetCC(byte* data) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
const char* mnem = conditional_code_suffix[cond];
AppendToBuffer("set%s%c ", mnem, operand_size_code());
@@ -805,168 +807,170 @@ int DisassemblerX64::SetCC(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerX64::FPUInstruction(byte* data) {
- byte b1 = *data;
- byte b2 = *(data + 1);
- if (b1 == 0xD9) {
- const char* mnem = NULL;
- switch (b2) {
- case 0xE0:
- mnem = "fchs";
- break;
- case 0xE1:
- mnem = "fabs";
- break;
- case 0xE4:
- mnem = "ftst";
- break;
- case 0xF5:
- mnem = "fprem1";
- break;
- case 0xF7:
- mnem = "fincstp";
- break;
- case 0xE8:
- mnem = "fld1";
- break;
- case 0xEE:
- mnem = "fldz";
- break;
- case 0xF8:
- mnem = "fprem";
- break;
- }
- if (mnem != NULL) {
- AppendToBuffer("%s", mnem);
- return 2;
- } else if ((b2 & 0xF8) == 0xC8) {
- AppendToBuffer("fxch st%d", b2 & 0x7);
- return 2;
- } else {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case 0:
- mnem = "fld_s";
- break;
- case 3:
- mnem = "fstp_s";
- break;
- default:
- UnimplementedInstruction();
+ byte escape_opcode = *data;
+ ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+ byte modrm_byte = *(data+1);
+
+ if (modrm_byte >= 0xC0) {
+ return RegisterFPUInstruction(escape_opcode, modrm_byte);
+ } else {
+ return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
+ }
+}
+
+int DisassemblerX64::MemoryFPUInstruction(int escape_opcode,
+ int modrm_byte,
+ byte* modrm_start) {
+ const char* mnem = "?";
+ int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
+ switch (escape_opcode) {
+ case 0xD9: switch (regop) {
+ case 0: mnem = "fld_s"; break;
+ case 3: mnem = "fstp_s"; break;
+ case 7: mnem = "fstcw"; break;
+ default: UnimplementedInstruction();
}
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- }
- } else if (b1 == 0xDD) {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, &regop, &rm);
- if (mod == 3) {
- switch (regop) {
- case 0:
- AppendToBuffer("ffree st%d", rm & 7);
- break;
- case 2:
- AppendToBuffer("fstp st%d", rm & 7);
- break;
- default:
- UnimplementedInstruction();
- break;
+ break;
+
+ case 0xDB: switch (regop) {
+ case 0: mnem = "fild_s"; break;
+ case 1: mnem = "fisttp_s"; break;
+ case 2: mnem = "fist_s"; break;
+ case 3: mnem = "fistp_s"; break;
+ default: UnimplementedInstruction();
}
- return 2;
- } else {
- const char* mnem = "?";
- switch (regop) {
- case 0:
- mnem = "fld_d";
- break;
- case 3:
- mnem = "fstp_d";
+ break;
+
+ case 0xDD: switch (regop) {
+ case 0: mnem = "fld_d"; break;
+ case 3: mnem = "fstp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDF: switch (regop) {
+ case 5: mnem = "fild_d"; break;
+ case 7: mnem = "fistp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(modrm_start);
+ return count + 1;
+}
+
+int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
+ byte modrm_byte) {
+ bool has_register = false; // Is the FPU register encoded in modrm_byte?
+ const char* mnem = "?";
+
+ switch (escape_opcode) {
+ case 0xD8:
+ UnimplementedInstruction();
+ break;
+
+ case 0xD9:
+ switch (modrm_byte & 0xF8) {
+ case 0xC8:
+ mnem = "fxch";
+ has_register = true;
break;
default:
- UnimplementedInstruction();
+ switch (modrm_byte) {
+ case 0xE0: mnem = "fchs"; break;
+ case 0xE1: mnem = "fabs"; break;
+ case 0xE4: mnem = "ftst"; break;
+ case 0xE8: mnem = "fld1"; break;
+ case 0xEE: mnem = "fldz"; break;
+ case 0xF5: mnem = "fprem1"; break;
+ case 0xF7: mnem = "fincstp"; break;
+ case 0xF8: mnem = "fprem"; break;
+ case 0xFE: mnem = "fsin"; break;
+ case 0xFF: mnem = "fcos"; break;
+ default: UnimplementedInstruction();
+ }
}
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- }
- } else if (b1 == 0xDB) {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case 0:
- mnem = "fild_s";
- break;
- case 2:
- mnem = "fist_s";
- break;
- case 3:
- mnem = "fistp_s";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- } else if (b1 == 0xDF) {
- if (b2 == 0xE0) {
- AppendToBuffer("fnstsw_ax");
- return 2;
- }
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case 5:
- mnem = "fild_d";
- break;
- case 7:
- mnem = "fistp_d";
- break;
- default:
+ break;
+
+ case 0xDA:
+ if (modrm_byte == 0xE9) {
+ mnem = "fucompp";
+ } else {
UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- } else if (b1 == 0xDC || b1 == 0xDE) {
- bool is_pop = (b1 == 0xDE);
- if (is_pop && b2 == 0xD9) {
- AppendToBuffer("fcompp");
- return 2;
- }
- const char* mnem = "FP0xDC";
- switch (b2 & 0xF8) {
- case 0xC0:
- mnem = "fadd";
- break;
- case 0xE8:
- mnem = "fsub";
- break;
- case 0xC8:
- mnem = "fmul";
- break;
- case 0xF8:
- mnem = "fdiv";
- break;
- default:
+ }
+ break;
+
+ case 0xDB:
+ if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomi";
+ has_register = true;
+ } else if (modrm_byte == 0xE2) {
+ mnem = "fclex";
+ } else {
UnimplementedInstruction();
- }
- AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
- return 2;
- } else if (b1 == 0xDA && b2 == 0xE9) {
- const char* mnem = "fucompp";
+ }
+ break;
+
+ case 0xDC:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd"; break;
+ case 0xE8: mnem = "fsub"; break;
+ case 0xC8: mnem = "fmul"; break;
+ case 0xF8: mnem = "fdiv"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "ffree"; break;
+ case 0xD8: mnem = "fstp"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDE:
+ if (modrm_byte == 0xD9) {
+ mnem = "fcompp";
+ } else {
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "faddp"; break;
+ case 0xE8: mnem = "fsubp"; break;
+ case 0xC8: mnem = "fmulp"; break;
+ case 0xF8: mnem = "fdivp"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDF:
+ if (modrm_byte == 0xE0) {
+ mnem = "fnstsw_ax";
+ } else if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomip";
+ has_register = true;
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+
+ if (has_register) {
+ AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
+ } else {
AppendToBuffer("%s", mnem);
- return 2;
}
- AppendToBuffer("Unknown FP instruction");
return 2;
}
+
// Handle all two-byte opcodes, which start with 0x0F.
// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
@@ -1045,13 +1049,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
+ current += PrintRightOperand(current);
} else if ((opcode & 0xF8) == 0x58) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,%s", mnemonic, NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
+ AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
} else {
UnimplementedInstruction();
}
@@ -1060,12 +1064,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// CVTTSS2SI: Convert scalar single-precision FP to dword integer.
// Assert that mod is not 3, so source is memory, not an XMM register.
- ASSERT((*current & 0xC0) != 0xC0);
+ ASSERT_NE(0xC0, *current & 0xC0);
current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
} else {
UnimplementedInstruction();
}
- return current - data;
+ return static_cast<int>(current - data);
}
@@ -1236,18 +1240,6 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
}
- case 0xF6: {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, &regop, &rm);
- if (mod == 3 && regop == 0) {
- AppendToBuffer("testb %s,%d", NameOfCPURegister(rm), *(data + 2));
- } else {
- UnimplementedInstruction();
- }
- data += 3;
- break;
- }
-
case 0x81: // fall through
case 0x83: // 0x81 with sign extension bit set
data += PrintImmediateOp(data);
@@ -1344,7 +1336,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x95:
case 0x96:
case 0x97: {
- int reg = (current & 0x7) | (rex_b() ? 8 : 0);
+ int reg = (*data & 0x7) | (rex_b() ? 8 : 0);
if (reg == 0) {
AppendToBuffer("nop"); // Common name for xchg rax,rax.
} else {
@@ -1352,8 +1344,9 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
operand_size_code(),
NameOfCPURegister(reg));
}
+ data++;
}
-
+ break;
case 0xFE: {
data++;
@@ -1465,8 +1458,10 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += JumpShort(data);
break;
+ case 0xF6:
+ byte_size_operand_ = true; // fall through
case 0xF7:
- data += F7Instruction(data);
+ data += F6F7Instruction(data);
break;
default:
@@ -1479,7 +1474,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
tmp_buffer_[tmp_buffer_pos_] = '\0';
}
- int instr_len = data - instr;
+ int instr_len = static_cast<int>(data - instr);
ASSERT(instr_len > 0); // Ensure progress.
int outp = 0;
@@ -1591,7 +1586,7 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
for (byte* bp = prev_pc; bp < pc; bp++) {
fprintf(f, "%02x", *bp);
}
- for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+ for (int i = 6 - static_cast<int>(pc - prev_pc); i >= 0; i--) {
fprintf(f, " ");
}
fprintf(f, " %s\n", buffer.start());
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
index 46d8dc4b..333a47dd 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "fast-codegen.h"
#include "parser.h"
@@ -61,11 +62,76 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = fun->scope()->num_stack_slots();
- for (int i = 0; i < locals_count; i++) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ if (locals_count <= 1) {
+ if (locals_count > 0) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ } else {
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < locals_count; i++) {
+ __ push(rdx);
+ }
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in rdi.
+ __ push(rdi);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both rax and rsi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in rsi.
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ movq(rax, Operand(rbp, parameter_offset));
+ // Store it in the context
+ __ movq(Operand(rsi, Context::SlotOffset(slot->index())), rax);
+ }
}
}
+ // Possibly allocate an arguments object.
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Arguments object must be allocated after the context object, in
+ // case the "arguments" or ".arguments" variables are in the context.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(rdi);
+ } else {
+ __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ // The receiver is just before the parameters on the caller's stack.
+ __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ push(rdx);
+ __ Push(Smi::FromInt(fun->num_parameters()));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Store new arguments object in both "arguments" and ".arguments" slots.
+ __ movq(rcx, rax);
+ Move(arguments->slot(), rax, rbx, rdx);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, rcx, rbx, rdx);
+ }
+
{ Comment cmnt(masm_, "[ Stack check");
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
@@ -84,38 +150,371 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
}
{ Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
}
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the
- // body.
+ // Emit a 'return undefined' in case control fell off the end of the body.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- SetReturnPosition(fun);
+ EmitReturnSequence(function_->end_position());
+ }
+}
+
+
+void FastCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ __ bind(&return_label_);
if (FLAG_trace) {
__ push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ CodeGenerator::RecordPositions(masm_, position);
__ RecordJSReturn();
-
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
__ movq(rsp, rbp);
__ pop(rbp);
- __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+ __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint. We
// have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
// (3 + 1 + 3).
- const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+ const int kPadding = Assembler::kJSReturnSequenceLength - 7;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
}
+void FastCodeGenerator::Move(Expression::Context context, Register source) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(source);
+ break;
+ case Expression::kTest:
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+template <>
+Operand FastCodeGenerator::CreateSlotOperand<Operand>(Slot* source,
+ Register scratch) {
+ switch (source->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return Operand(rbp, SlotOffset(source));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(source->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, source->index());
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ // Fall-through.
+ default:
+ UNREACHABLE();
+ return Operand(rax, 0); // Dead code to make the compiler happy.
+ }
+}
+
+
+void FastCodeGenerator::Move(Register dst, Slot* source) {
+ Operand location = CreateSlotOperand<Operand>(source, dst);
+ __ movq(dst, location);
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context,
+ Slot* source,
+ Register scratch) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: {
+ Operand location = CreateSlotOperand<Operand>(source, scratch);
+ __ push(location);
+ break;
+ }
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ Move(scratch, source);
+ Move(context, scratch);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ Push(expr->handle());
+ break;
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ __ Move(rax, expr->handle());
+ Move(context, rax);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ switch (dst->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ __ movq(Operand(rbp, SlotOffset(dst)), src);
+ break;
+ case Slot::CONTEXT: {
+ ASSERT(!src.is(scratch1));
+ ASSERT(!src.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ int context_chain_length =
+ function_->scope()->ContextChainLength(dst->var()->scope());
+ __ LoadContext(scratch1, context_chain_length);
+ __ movq(Operand(scratch1, Context::SlotOffset(dst->index())), src);
+ int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ __ RecordWrite(scratch1, offset, src, scratch2);
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FastCodeGenerator::DropAndMove(Expression::Context context,
+ Register source,
+ int drop_count) {
+ ASSERT(drop_count > 0);
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ __ addq(rsp, Immediate(drop_count * kPointerSize));
+ break;
+ case Expression::kValue:
+ if (drop_count > 1) {
+ __ addq(rsp, Immediate((drop_count - 1) * kPointerSize));
+ }
+ __ movq(Operand(rsp, 0), source);
+ break;
+ case Expression::kTest:
+ ASSERT(!source.is(rsp));
+ __ addq(rsp, Immediate(drop_count * kPointerSize));
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (drop_count > 1) {
+ __ addq(rsp, Immediate((drop_count - 1) * kPointerSize));
+ }
+ __ movq(Operand(rsp, 0), source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ movq(Operand(rsp, 0), source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+void FastCodeGenerator::TestAndBranch(Register source,
+ Label* true_label,
+ Label* false_label) {
+ ASSERT_NE(NULL, true_label);
+ ASSERT_NE(NULL, false_label);
+ // Use the shared ToBoolean stub to compile the value in the register into
+ // control flow to the code generator's true and false labels. Perform
+ // the fast checks assumed by the stub.
+
+ // The undefined value is false.
+ __ CompareRoot(source, Heap::kUndefinedValueRootIndex);
+ __ j(equal, false_label);
+ __ CompareRoot(source, Heap::kTrueValueRootIndex); // True is true.
+ __ j(equal, true_label);
+ __ CompareRoot(source, Heap::kFalseValueRootIndex); // False is false.
+ __ j(equal, false_label);
+ ASSERT_EQ(0, kSmiTag);
+ __ SmiCompare(source, Smi::FromInt(0)); // The smi zero is false.
+ __ j(equal, false_label);
+ Condition is_smi = masm_->CheckSmi(source); // All other smis are true.
+ __ j(is_smi, true_label);
+
+ // Call the stub for all other cases.
+ __ push(source);
+ ToBooleanStub stub;
+ __ CallStub(&stub);
+ __ testq(rax, rax); // The stub returns nonzero for true.
+ __ j(not_zero, true_label);
+ __ jmp(false_label);
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER: // Fall through.
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(Operand(rbp, SlotOffset(var->slot())));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ movq(rbx,
+ CodeGenerator::ContextOperand(rsi, Context::FCONTEXT_INDEX));
+ __ cmpq(rbx, rsi);
+ __ Check(equal, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
+ kScratchRegister);
+ // No write barrier since the hole value is in old space.
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(rax);
+ __ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
+ int offset = Context::SlotOffset(slot->index());
+ __ RecordWrite(rsi, offset, rax, rcx);
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ push(rsi);
+ __ Push(var->name());
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ Push(Smi::FromInt(attr));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ } else {
+ __ Push(Smi::FromInt(0)); // no initial value!
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ Visit(prop->key());
+
+ if (decl->fun() != NULL) {
+ ASSERT_EQ(Expression::kValue, decl->fun()->context());
+ Visit(decl->fun());
+ __ pop(rax);
+ } else {
+ __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+
+ // Absence of a test rax instruction following the call
+ // indicates that none of the load was inlined.
+
+ // Value in rax is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ }
+ }
+}
+
+
void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(rsi); // The context is the first argument.
@@ -126,56 +525,17 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FastCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- SetStatementPosition(stmt);
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- Visit(stmt->expression());
-}
-
-
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
Expression* expr = stmt->expression();
- Visit(expr);
-
- // Complete the statement based on the location of the subexpression.
- Location source = expr->location();
- ASSERT(!source.is_nowhere());
- if (source.is_temporary()) {
- __ pop(rax);
- } else {
- ASSERT(source.is_constant());
- ASSERT(expr->AsLiteral() != NULL);
+ if (expr->AsLiteral() != NULL) {
__ Move(rax, expr->AsLiteral()->handle());
+ } else {
+ Visit(expr);
+ ASSERT_EQ(Expression::kValue, expr->context());
+ __ pop(rax);
}
- if (FLAG_trace) {
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ movq(rsp, rbp);
- __ pop(rbp);
- __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
- // (3 + 1 + 3).
- const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
-#endif
+ EmitReturnSequence(stmt->statement_pos());
}
@@ -183,7 +543,8 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
@@ -192,12 +553,7 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
__ push(rsi);
__ Push(boilerplate);
__ CallRuntime(Runtime::kNewClosure, 2);
-
- if (expr->location().is_temporary()) {
- __ push(rax);
- } else {
- ASSERT(expr->location().is_nowhere());
- }
+ Move(expr->context(), rax);
}
@@ -205,6 +561,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
+ ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
@@ -212,33 +569,73 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
__ Move(rcx, expr->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
// A test rax instruction following the call is used by the IC to
// indicate that the inobject property case was inlined. Ensure there
// is no test rax instruction here.
- if (expr->location().is_temporary()) {
- // Replace the global object with the result.
- __ movq(Operand(rsp, 0), rax);
- } else {
- ASSERT(expr->location().is_nowhere());
- __ addq(rsp, Immediate(kPointerSize));
- }
+ __ nop();
- } else {
- Comment cmnt(masm_, "Stack slot");
+ DropAndMove(expr->context(), rax);
+ } else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
- ASSERT(slot != NULL);
- if (expr->location().is_temporary()) {
- __ push(Operand(rbp, SlotOffset(slot)));
- } else {
- ASSERT(expr->location().is_nowhere());
+ if (FLAG_debug_code) {
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ Comment cmnt(masm_, "Stack slot");
+ break;
+ }
+ case Slot::CONTEXT: {
+ Comment cmnt(masm_, "Context slot");
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ Move(expr->context(), slot, rax);
+ } else {
+ // A variable has been rewritten into an explicit access to
+ // an object property.
+ Property* property = rewrite->AsProperty();
+ ASSERT_NOT_NULL(property);
+
+ // Currently the only parameter expressions that can occur are
+ // on the form "slot[literal]".
+
+ // Check that the object is in a slot.
+ Variable* object = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object);
+ Slot* object_slot = object->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ Move(Expression::kValue, object_slot, rax);
+
+ // Check that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ Move(Expression::kValue, key_literal);
+
+ // Do a KEYED property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Notice: We must not have a "test rax, ..." instruction after
+ // the call. It is treated specially by the LoadIC code.
+
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndMove(expr->context(), rax, 2);
}
}
void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExp Literal");
+ Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
// rdi = JS function.
@@ -260,10 +657,126 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
// Label done:
__ bind(&done);
- if (expr->location().is_temporary()) {
- __ push(rax);
+ Move(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ Label boilerplate_exists;
+
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ movq(rax, FieldOperand(rbx, literal_offset));
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &boilerplate_exists);
+ // Create boilerplate if it does not exist.
+ // Literal array (0).
+ __ push(rbx);
+ // Literal index (1).
+ __ Push(Smi::FromInt(expr->literal_index()));
+ // Constant properties (2).
+ __ Push(expr->constant_properties());
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&boilerplate_exists);
+ // rax contains boilerplate.
+ // Clone boilerplate.
+ __ push(rax);
+ if (expr->depth() == 1) {
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ }
+
+ // If result_saved == true: The result is saved on top of the
+ // stack and in rax.
+ // If result_saved == false: The result not on the stack, just in rax.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(rax); // Save result on the stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ pop(rax);
+ __ Move(rcx, key->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // StoreIC leaves the receiver on the stack.
+ __ movq(rax, Operand(rsp, 0)); // Restore result back into rax.
+ break;
+ }
+ // fall through
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(rax);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ __ movq(rax, Operand(rsp, 0)); // Restore result into rax.
+ break;
+ case ObjectLiteral::Property::SETTER: // fall through
+ case ObjectLiteral::Property::GETTER:
+ __ push(rax);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0));
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ movq(rax, Operand(rsp, 0)); // Restore result into rax.
+ break;
+ default: UNREACHABLE();
+ }
+ }
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ addq(rsp, Immediate(kPointerSize));
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(rax);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(rax);
+ TestAndBranch(rax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(rax);
+ TestAndBranch(rax, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(rax);
+ TestAndBranch(rax, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
}
}
@@ -316,7 +829,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
result_saved = true;
}
Visit(subexpr);
- ASSERT(subexpr->location().is_temporary());
+ ASSERT_EQ(Expression::kValue, subexpr->context());
// Store the subexpression value in the array's elements.
__ pop(rax); // Subexpression value.
@@ -329,231 +842,849 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ RecordWrite(rbx, offset, rax, rcx);
}
- Location destination = expr->location();
- if (destination.is_nowhere() && result_saved) {
- __ addq(rsp, Immediate(kPointerSize));
- } else if (destination.is_temporary() && !result_saved) {
- __ push(rax);
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ addq(rsp, Immediate(kPointerSize));
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(rax);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(rax);
+ TestAndBranch(rax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(rax);
+ TestAndBranch(rax, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(rax);
+ TestAndBranch(rax, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
}
}
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
- Expression* rhs = expr->value();
- Visit(rhs);
-
- // Left-hand side can only be a global or a (parameter or local) slot.
+void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
-
- // Complete the assignment based on the location of the right-hand-side
- // value and the desired location of the assignment value.
- Location destination = expr->location();
- Location source = rhs->location();
- ASSERT(!destination.is_constant());
- ASSERT(!source.is_nowhere());
-
if (var->is_global()) {
- // Assignment to a global variable, use inline caching. Right-hand-side
- // value is passed in rax, variable name in rcx, and the global object
- // on the stack.
- if (source.is_temporary()) {
- __ pop(rax);
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- __ Move(rax, rhs->AsLiteral()->handle());
- }
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in rax, variable name in
+ // rcx, and the global object on the stack.
+ __ pop(rax);
__ Move(rcx, var->name());
__ push(CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
- if (destination.is_temporary()) {
- __ movq(Operand(rsp, 0), rax);
- } else {
- __ addq(rsp, Immediate(kPointerSize));
- }
- } else {
- if (source.is_temporary()) {
- if (destination.is_temporary()) {
- // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side temporary
- // on the stack.
- __ movq(kScratchRegister, Operand(rsp, 0));
- __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
- } else {
- ASSERT(destination.is_nowhere());
- // Case 'var = temp'. Discard right-hand-side temporary.
- __ pop(Operand(rbp, SlotOffset(var->slot())));
+ DropAndMove(expr->context(), rax);
+
+ } else if (var->slot()) {
+ Slot* slot = var->slot();
+ ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled.
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Perform assignment and discard value.
+ __ pop(Operand(rbp, SlotOffset(var->slot())));
+ break;
+ case Expression::kValue:
+ // Perform assignment and preserve value.
+ __ movq(rax, Operand(rsp, 0));
+ __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ break;
+ case Expression::kTest:
+ // Perform assignment and test (and discard) value.
+ __ pop(rax);
+ __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ TestAndBranch(rax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ movq(rax, Operand(rsp, 0));
+ __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ TestAndBranch(rax, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ movq(rax, Operand(rsp, 0));
+ __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ TestAndBranch(rax, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+ break;
}
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
- // discarded result. Always perform the assignment.
- __ Move(kScratchRegister, rhs->AsLiteral()->handle());
- __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
- if (destination.is_temporary()) {
- // Case 'temp <- (var = constant)'. Save result.
- __ push(kScratchRegister);
+
+ case Slot::CONTEXT: {
+ int chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ if (chain_length > 0) {
+ // Move up the context chain to the context containing the slot.
+ __ movq(rax,
+ Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+ for (int i = 1; i < chain_length; i++) {
+ __ movq(rax,
+ Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+ }
+ } else { // Slot is in the current context. Generate optimized code.
+ __ movq(rax, rsi); // RecordWrite destroys the object register.
+ }
+ if (FLAG_debug_code) {
+ __ cmpq(rax,
+ Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ Check(equal, "Context Slot chain length wrong.");
+ }
+ __ pop(rcx);
+ __ movq(Operand(rax, Context::SlotOffset(slot->index())), rcx);
+
+ // RecordWrite may destroy all its register arguments.
+ if (expr->context() == Expression::kValue) {
+ __ push(rcx);
+ } else if (expr->context() != Expression::kEffect) {
+ __ movq(rdx, rcx);
+ }
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(rax, offset, rcx, rbx);
+ if (expr->context() != Expression::kEffect &&
+ expr->context() != Expression::kValue) {
+ Move(expr->context(), rdx);
+ }
+ break;
}
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
}
}
}
-void FastCodeGenerator::VisitCall(Call* expr) {
- Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- Variable* var = fun->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL && !var->is_this() && var->is_global());
- ASSERT(!var->is_possibly_eval());
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
- __ Push(var->name());
- // Push global object (receiver).
- __ push(CodeGenerator::GlobalObject());
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(rax);
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(rax); // Result of assignment, saved even if not needed.
+ __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(rax);
+ }
+
+ DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ // Reciever is under the key and value.
+ __ push(Operand(rsp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(rax);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(rax); // Result of assignment, saved even if not needed.
+ // Reciever is under the key and value.
+ __ push(Operand(rsp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(rax);
+ }
+
+ // Receiver and key are still on stack.
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ Move(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+ uint32_t dummy;
+
+ // Record the source position for the property load.
+ SetSourcePosition(expr->position());
+
+ // Evaluate receiver.
+ Visit(expr->obj());
+
+
+ if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+ !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+ // Do a NAMED property load.
+ // The IC expects the property name in rcx and the receiver on the stack.
+ __ Move(rcx, key->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test rax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ } else {
+ // Do a KEYED property load.
+ Visit(expr->key());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Notice: We must not have a "test rax, ..." instruction after
+ // the call. It is treated specially by the LoadIC code.
+
+ // Drop key left on the stack by IC.
+ __ addq(rsp, Immediate(kPointerSize));
+ }
+ DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ Push(args->at(i)->AsLiteral()->handle());
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
- // Record source position for debugger
+ // Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ call(ic, reloc_info);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+ __ CallStub(&stub);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- if (expr->location().is_temporary()) {
- __ movq(Operand(rsp, 0), rax);
+ DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Call to a global variable.
+ __ Push(var->name());
+ // Push global object as receiver for the call IC lookup.
+ __ push(CodeGenerator::GlobalObject());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ __ Push(key->handle());
+ Visit(prop->obj());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ Visit(prop->obj());
+ Visit(prop->key());
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test rax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key left on the stack by IC.
+ __ addq(rsp, Immediate(kPointerSize));
+ // Pop receiver.
+ __ pop(rbx);
+ // Push result (function).
+ __ push(rax);
+ // Push receiver object on stack.
+ if (prop->is_synthetic()) {
+ __ push(CodeGenerator::GlobalObject());
+ } else {
+ __ push(rbx);
+ }
+ EmitCallWithStub(expr);
+ }
} else {
- ASSERT(expr->location().is_nowhere());
- __ addq(rsp, Immediate(kPointerSize));
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the fast code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_fast_codegen(true);
+ }
+ Visit(fun);
+ // Load global receiver object.
+ __ movq(rbx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+ // If location is value, already on the stack,
+
+ // Push global object (receiver).
+ __ push(CodeGenerator::GlobalObject());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ // If location is value, it is already on the stack,
+ // so nothing to do here.
}
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function, arg_count into rdi and rax.
+ __ Set(rax, arg_count);
+ // Function is in rsp[arg_count + 1].
+ __ movq(rdi, Operand(rsp, rax, times_pointer_size, kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in rax, or pop it.
+ DropAndMove(expr->context(), rax);
}
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
- Runtime::Function* function = expr->function();
- ASSERT(function != NULL);
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ Push(expr->name());
+ __ movq(rax, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
+ }
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ Push(args->at(i)->AsLiteral()->handle());
- } else {
- ASSERT(args->at(i)->location().is_temporary());
- // If location is temporary, it is already on the stack,
- // so nothing to do here.
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ }
+
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), rax);
+ } else {
+ __ CallRuntime(expr->function(), arg_count);
+ Move(expr->context(), rax);
}
+}
- __ CallRuntime(function, arg_count);
- if (expr->location().is_temporary()) {
- __ push(rax);
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+
+ Visit(proxy);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kValue: // Fall through
+ case Expression::kTest: // Fall through
+ case Expression::kTestValue: // Fall through
+ case Expression::kValueTest:
+ // Duplicate the result on the stack.
+ __ push(rax);
+ break;
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ }
+ // Call runtime for +1/-1.
+ __ push(rax);
+ __ Push(Smi::FromInt(1));
+ if (expr->op() == Token::INC) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ // Call Store IC.
+ __ Move(rcx, proxy->AsVariable()->name());
+ __ push(CodeGenerator::GlobalObject());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Restore up stack after store IC
+ __ addq(rsp, Immediate(kPointerSize));
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through
+ case Expression::kValue:
+ // Do nothing. Result in either on the stack for value context
+ // or discarded for effect context.
+ break;
+ case Expression::kTest:
+ __ pop(rax);
+ TestAndBranch(rax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ movq(rax, Operand(rsp, 0));
+ TestAndBranch(rax, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ movq(rax, Operand(rsp, 0));
+ TestAndBranch(rax, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kEffect, expr->expression()->context());
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ ASSERT_EQ(Expression::kTest, expr->expression()->context());
+
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ Visit(expr->expression());
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ true_label_ = saved_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = saved_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ true_label_ = &push_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ __ bind(&push_false);
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ push(CodeGenerator::GlobalObject());
+ __ Move(rcx, proxy->name());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ movq(Operand(rsp, 0), rax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(rsi);
+ __ Push(proxy->name());
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(rax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ Visit(expr->expression());
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Move(expr->context(), rax);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
}
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- // Compile a short-circuited boolean or operation in a non-test
- // context.
- ASSERT(expr->op() == Token::OR);
- // Compile (e0 || e1) as if it were
- // (let (temp = e0) temp ? temp : e1).
-
- Label eval_right, done;
- Location destination = expr->location();
- ASSERT(!destination.is_constant());
-
- Expression* left = expr->left();
- Location left_source = left->location();
- ASSERT(!left_source.is_nowhere());
-
- Expression* right = expr->right();
- Location right_source = right->location();
- ASSERT(!right_source.is_nowhere());
-
- Visit(left);
- // Use the shared ToBoolean stub to find the boolean value of the
- // left-hand subexpression. Load the value into rax to perform some
- // inlined checks assumed by the stub.
- if (left_source.is_temporary()) {
- if (destination.is_temporary()) {
- // Copy the left-hand value into rax because we may need it as the
- // final result.
- __ movq(rax, Operand(rsp, 0));
- } else {
- // Pop the left-hand value into rax because we will not need it as the
- // final result.
- __ pop(rax);
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ ASSERT_EQ(Expression::kEffect, expr->left()->context());
+ ASSERT_EQ(expr->context(), expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+ break;
+
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
+
+ Visit(expr->left());
+ Visit(expr->right());
+ GenericBinaryOpStub stub(expr->op(),
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ __ CallStub(&stub);
+ Move(expr->context(), rax);
+
+ break;
}
- } else {
- // Load the left-hand value into rax. Put it on the stack if we may
- // need it.
- ASSERT(left->AsLiteral() != NULL);
- __ Move(rax, left->AsLiteral()->handle());
- if (destination.is_temporary()) __ push(rax);
+ default:
+ UNREACHABLE();
}
- // The left-hand value is in rax. It is also on the stack iff the
- // destination location is temporary.
+}
- // Perform fast checks assumed by the stub.
- // The undefined value is false.
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, &eval_right);
- __ CompareRoot(rax, Heap::kTrueValueRootIndex); // True is true.
- __ j(equal, &done);
- __ CompareRoot(rax, Heap::kFalseValueRootIndex); // False is false.
- __ j(equal, &eval_right);
- ASSERT(kSmiTag == 0);
- __ SmiCompare(rax, Smi::FromInt(0)); // The smi zero is false.
- __ j(equal, &eval_right);
- Condition is_smi = masm_->CheckSmi(rax); // All other smis are true.
- __ j(is_smi, &done);
- // Call the stub for all other cases.
- __ push(rax);
- ToBooleanStub stub;
- __ CallStub(&stub);
- __ testq(rax, rax); // The stub returns nonzero for true.
- __ j(not_zero, &done);
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
- __ bind(&eval_right);
- // Discard the left-hand value if present on the stack.
- if (destination.is_temporary()) {
- __ addq(rsp, Immediate(kPointerSize));
+ // Convert current context to test context: Pre-test code.
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_true;
+ false_label_ = &push_false;
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = &push_true;
+ break;
+
+ case Expression::kTestValue:
+ false_label_ = &push_false;
+ break;
}
- Visit(right);
+ // Convert current context to test context: End pre-test code.
+
+ switch (expr->op()) {
+ case Token::IN: {
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ __ j(equal, true_label_);
+ __ jmp(false_label_);
+ break;
+ }
- // Save or discard the right-hand value as needed.
- if (destination.is_temporary() && right_source.is_constant()) {
- ASSERT(right->AsLiteral() != NULL);
- __ Push(right->AsLiteral()->handle());
- } else if (destination.is_nowhere() && right_source.is_temporary()) {
- __ addq(rsp, Immediate(kPointerSize));
+ case Token::INSTANCEOF: {
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ j(zero, true_label_); // The stub returns 0 for true.
+ __ jmp(false_label_);
+ break;
+ }
+
+ default: {
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = equal;
+ __ pop(rax);
+ __ pop(rdx);
+ break;
+ case Token::LT:
+ cc = less;
+ __ pop(rax);
+ __ pop(rdx);
+ break;
+ case Token::GT:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = less;
+ __ pop(rdx);
+ __ pop(rax);
+ break;
+ case Token::LTE:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = greater_equal;
+ __ pop(rdx);
+ __ pop(rax);
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ __ pop(rax);
+ __ pop(rdx);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ JumpIfNotBothSmi(rax, rdx, &slow_case);
+ __ SmiCompare(rdx, rax);
+ __ j(cc, true_label_);
+ __ jmp(false_label_);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ j(cc, true_label_);
+ __ jmp(false_label_);
+ }
}
- __ bind(&done);
+ // Convert current context to test context: Post-test code.
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ __ bind(&push_true);
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(&push_true);
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(&push_false);
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ // Convert current context to test context: End post-test code.
}
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ Move(expr->context(), rax);
+}
+
+
+#undef __
+
+
} } // namespace v8::internal
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index fe224ad9..6a0527cf 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -57,11 +57,7 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
// Determine frame type.
- if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
- return EXIT_DEBUG;
- } else {
- return EXIT;
- }
+ return EXIT;
}
int JavaScriptFrame::GetProvidedParametersCount() const {
@@ -69,10 +65,10 @@ int JavaScriptFrame::GetProvidedParametersCount() const {
}
-void ExitFrame::Iterate(ObjectVisitor* a) const {
- // Exit frames on X64 do not contain any pointers. The arguments
- // are traversed as part of the expression stack of the calling
- // frame.
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+ v->VisitPointer(&code_slot());
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
}
byte* InternalFrame::GetCallerStackPointer() const {
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index eefaa0ae..a92b248d 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -63,7 +63,7 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- static const int kDebugMarkOffset = -2 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = +0 * kPointerSize;
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 2812df15..ccbc615b 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -31,6 +31,7 @@
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -107,7 +108,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, String::kLengthOffset));
+ __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
__ shrl(r1, Immediate(String::kHashShift));
if (i > 0) {
__ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
@@ -239,18 +240,6 @@ void KeyedLoadIC::Generate(MacroAssembler* masm,
}
-#ifdef DEBUG
-// For use in assert below.
-static int TenToThe(int exponent) {
- ASSERT(exponent <= 9);
- ASSERT(exponent >= 1);
- int answer = 10;
- for (int i = 1; i < exponent; i++) answer *= 10;
- return answer;
-}
-#endif
-
-
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
@@ -313,7 +302,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
- __ movb(rax, Operand(rcx, rax, times_1, 0));
+ __ movzxbq(rax, Operand(rcx, rax, times_1, 0));
__ Integer32ToSmi(rax, rax);
__ ret(0);
@@ -327,7 +316,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
- __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
__ testl(rbx, Immediate(String::kIsArrayIndexMask));
// If the string is a symbol, do a quick inline probe of the receiver's
@@ -342,20 +331,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movq(rax, rcx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
- // Array index string: If short enough use cache in length/hash field (rbx).
- // We assert that there are enough bits in an int32_t after the hash shift
- // bits have been subtracted to allow space for the length and the cached
- // array index.
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << (String::kShortLengthShift - String::kHashShift)));
+ (1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- const int kLengthFieldLimit =
- (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
- __ cmpl(rbx, Immediate(kLengthFieldLimit));
- __ j(above_equal, &slow);
__ movl(rax, rbx);
- __ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
- __ shrl(rax, Immediate(String::kLongLengthShift));
+ __ and_(rax, Immediate(String::kArrayIndexHashMask));
+ __ shrl(rax, Immediate(String::kHashShift));
__ jmp(&index_int);
}
@@ -393,7 +378,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// ExternalArray.
// rax: index (as a smi)
// rcx: JSObject
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
@@ -413,7 +398,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ movsxbq(rax, Operand(rcx, rax, times_1, 0));
break;
case kExternalUnsignedByteArray:
- __ movb(rax, Operand(rcx, rax, times_1, 0));
+ __ movzxbq(rax, Operand(rcx, rax, times_1, 0));
break;
case kExternalShortArray:
__ movsxwq(rax, Operand(rcx, rax, times_2, 0));
@@ -790,6 +775,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ fstp_s(Operand(rcx, rbx, times_4, 0));
+ __ movq(rax, rdx); // Return the original value.
+ __ ret(0);
} else {
// Need to perform float-to-int conversion.
// Test the top of the FP stack for NaN.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index b2f69bb7..71157914 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -67,6 +67,12 @@ void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
}
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ j(below, on_stack_overflow);
+}
+
+
static void RecordWriteHelper(MacroAssembler* masm,
Register object,
Register addr,
@@ -282,15 +288,19 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ set_allow_stub_calls(true);
+
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE);
push(kScratchRegister);
movq(kScratchRegister,
- reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)),
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
RelocInfo::NONE);
push(kScratchRegister);
CallRuntime(Runtime::kAbort, 2);
// will not return here
+ int3();
}
@@ -402,9 +412,9 @@ void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xor_(dst, dst);
} else if (is_int32(x)) {
- movq(dst, Immediate(x));
+ movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
- movl(dst, Immediate(x));
+ movl(dst, Immediate(static_cast<uint32_t>(x)));
} else {
movq(dst, x, RelocInfo::NONE);
}
@@ -416,9 +426,9 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
xor_(kScratchRegister, kScratchRegister);
movq(dst, kScratchRegister);
} else if (is_int32(x)) {
- movq(dst, Immediate(x));
+ movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
- movl(dst, Immediate(x));
+ movl(dst, Immediate(static_cast<uint32_t>(x)));
} else {
movq(kScratchRegister, x, RelocInfo::NONE);
movq(dst, kScratchRegister);
@@ -1078,7 +1088,7 @@ void MacroAssembler::SmiShiftLeft(Register dst,
SmiToInteger32(rcx, src2);
// Shift amount specified by lower 5 bits, not six as the shl opcode.
and_(rcx, Immediate(0x1f));
- shl(dst);
+ shl_cl(dst);
}
@@ -1099,7 +1109,7 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
- shr(dst); // Shift is rcx modulo 0x1f + 32.
+ shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
shl(dst, Immediate(kSmiShift));
testq(dst, dst);
if (src1.is(rcx) || src2.is(rcx)) {
@@ -1135,7 +1145,7 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
- sar(dst); // Shift 32 + original rcx & 0x1f.
+ sar_cl(dst); // Shift 32 + original rcx & 0x1f.
shl(dst, Immediate(kSmiShift));
if (src1.is(rcx)) {
movq(src1, kScratchRegister);
@@ -1787,9 +1797,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
- ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
// Setup the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
@@ -1801,7 +1809,12 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
// Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call
- push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+ if (mode == ExitFrame::MODE_DEBUG) {
+ push(Immediate(0));
+ } else {
+ movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ push(kScratchRegister);
+ }
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
@@ -1821,7 +1834,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// TODO(1243899): This should be symmetric to
// CopyRegistersFromStackToMemory() but it isn't! esp is assumed
// correct here, but computed for the other call. Very error
@@ -1860,17 +1873,17 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
}
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type, int result_size) {
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
// Registers:
// r15 : argv
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// It's okay to clobber register rbx below because we don't need
// the function pointer after this.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
lea(rbx, Operand(rbp, kOffset));
CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
}
@@ -2085,6 +2098,11 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
+ if (FLAG_debug_code) {
+ testq(result_end, Immediate(kObjectAlignmentMask));
+ Check(zero, "Unaligned allocation in new space");
+ }
+
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
@@ -2226,6 +2244,25 @@ void MacroAssembler::AllocateHeapNumber(Register result,
}
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+ for (int i = 1; i < context_chain_length; i++) {
+ movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ }
+ // The context may be an intermediate context, not a function context.
+ movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else { // context is the current function context.
+ // The context may be an intermediate context, not a function context.
+ movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ }
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 4c2f35bd..9e7c25c9 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -98,6 +98,12 @@ class MacroAssembler: public Assembler {
#endif
// ---------------------------------------------------------------------------
+ // Stack limit support
+
+ // Do simple test for stack overflow. This doesn't handle an overflow.
+ void StackLimitCheck(Label* on_stack_limit_hit);
+
+ // ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -106,16 +112,16 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register rax and
+ // Enter specific kind of exit frame; either in normal or
+ // debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
// to the first argument in register rsi.
- void EnterExitFrame(StackFrame::Type type, int result_size = 1);
+ void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
// Leave the current exit frame. Expects/provides the return value in
// register rax:rdx (untouched) and the pointer to the first
// argument in register rsi.
- void LeaveExitFrame(StackFrame::Type type, int result_size = 1);
+ void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
// ---------------------------------------------------------------------------
@@ -542,6 +548,9 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+ // Find the function context up the context chain.
+ void LoadContext(Register dst, int context_chain_length);
+
// ---------------------------------------------------------------------------
// Runtime calls
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 5d17a2d2..639f5e95 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -643,10 +643,10 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label stack_limit_hit;
Label stack_ok;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
__ movq(rcx, rsp);
- __ movq(kScratchRegister, stack_guard_limit);
+ __ movq(kScratchRegister, stack_limit);
__ subq(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
@@ -1079,7 +1079,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
// If there is a difference, update the object pointer and start and end
// addresses in the RegExp stack frame to match the new value.
const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = end_address - start_address;
+ int byte_length = static_cast<int>(end_address - start_address);
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
@@ -1196,9 +1196,9 @@ void RegExpMacroAssemblerX64::Drop() {
void RegExpMacroAssemblerX64::CheckPreemption() {
// Check for preemption.
Label no_preempt;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ load_rax(stack_guard_limit);
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ load_rax(stack_limit);
__ cmpq(rsp, rax);
__ j(above, &no_preempt);
@@ -1209,18 +1209,16 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
void RegExpMacroAssemblerX64::CheckStackLimit() {
- if (FLAG_check_stack) {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
- __ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
- __ j(above, &no_stack_overflow);
+ Label no_stack_overflow;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit();
+ __ load_rax(stack_limit);
+ __ cmpq(backtrack_stackpointer(), rax);
+ __ j(above, &no_stack_overflow);
- SafeCall(&stack_overflow_label_);
+ SafeCall(&stack_overflow_label_);
- __ bind(&no_stack_overflow);
- }
+ __ bind(&no_stack_overflow);
}
@@ -1287,11 +1285,6 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
}
}
-
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- __ int3(); // Unused on x64.
-}
-
#undef __
#endif // V8_NATIVE_REGEXP
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index 998c9095..c4f3a85a 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -44,6 +44,12 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
};
// Call the generated regexp code directly. The entry function pointer should
@@ -51,4 +57,7 @@ class SimulatorStack : public v8::internal::AllStatic {
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 58a3e0f6..55b0b87c 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -173,7 +173,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, Immediate(flags));
@@ -183,7 +183,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ProbeTable(masm, flags, kPrimary, name, scratch);
// Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, Immediate(flags));
__ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
@@ -323,11 +323,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// Load length directly from the string.
__ bind(&load_length);
- __ and_(scratch, Immediate(kStringSizeMask));
__ movl(rax, FieldOperand(receiver, String::kLengthOffset));
- // rcx is also the receiver.
- __ lea(rcx, Operand(scratch, String::kLongLengthShift));
- __ shr(rax); // rcx is implicit shift register.
__ Integer32ToSmi(rax, rax);
__ ret(0);
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 781efd14..fe65d34a 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -893,16 +893,15 @@ void VirtualFrame::SyncRange(int begin, int end) {
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
- // Emit normal 'push' instructions for elements above stack pointer
- // and use mov instructions if we are below stack pointer.
+ // If positive we have to adjust the stack pointer.
+ int delta = end - stack_pointer_;
+ if (delta > 0) {
+ stack_pointer_ = end;
+ __ subq(rsp, Immediate(delta * kPointerSize));
+ }
+
for (int i = start; i <= end; i++) {
- if (!elements_[i].is_synced()) {
- if (i <= stack_pointer_) {
- SyncElementBelowStackPointer(i);
- } else {
- SyncElementByPushing(i);
- }
- }
+ if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
}
}
diff --git a/src/zone.h b/src/zone.h
index 4e4f1d72..0d006ddd 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -118,7 +118,7 @@ class Zone {
class ZoneObject {
public:
// Allocate a new ZoneObject of 'size' bytes in the Zone.
- void* operator new(size_t size) { return Zone::New(size); }
+ void* operator new(size_t size) { return Zone::New(static_cast<int>(size)); }
// Ideally, the delete operator should be private instead of
// public, but unfortunately the compiler sometimes synthesizes
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index f041041c..e6c81d80 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -34,6 +34,7 @@ Import('context object_files')
SOURCES = {
'all': [
+ 'test-accessors.cc',
'test-alloc.cc',
'test-api.cc',
'test-ast.cc',
@@ -51,6 +52,7 @@ SOURCES = {
'test-log.cc',
'test-log-utils.cc',
'test-mark-compact.cc',
+ 'test-parsing.cc',
'test-regexp.cc',
'test-serialize.cc',
'test-sockets.cc',
diff --git a/test/cctest/cctest.cc b/test/cctest/cctest.cc
index 82a33e6d..f638ed48 100644
--- a/test/cctest/cctest.cc
+++ b/test/cctest/cctest.cc
@@ -121,3 +121,6 @@ int main(int argc, char* argv[]) {
v8::V8::Dispose();
return 0;
}
+
+RegisterThreadedTest *RegisterThreadedTest::first_ = NULL;
+int RegisterThreadedTest::count_ = 0;
diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h
index a95645e0..404b692b 100644
--- a/test/cctest/cctest.h
+++ b/test/cctest/cctest.h
@@ -28,6 +28,8 @@
#ifndef CCTEST_H_
#define CCTEST_H_
+#include "v8.h"
+
#ifndef TEST
#define TEST(Name) \
static void Test##Name(); \
@@ -72,4 +74,138 @@ class CcTest {
CcTest* prev_;
};
+// Switches between all the Api tests using the threading support.
+// In order to get a surprising but repeatable pattern of thread
+// switching it has extra semaphores to control the order in which
+// the tests alternate, not relying solely on the big V8 lock.
+//
+// A test is augmented with calls to ApiTestFuzzer::Fuzz() in its
+// callbacks. This will have no effect when we are not running the
+// thread fuzzing test. In the thread fuzzing test it will
+// pseudorandomly select a successor thread and switch execution
+// to that thread, suspending the current test.
+class ApiTestFuzzer: public v8::internal::Thread {
+ public:
+ void CallTest();
+ explicit ApiTestFuzzer(int num)
+ : test_number_(num),
+ gate_(v8::internal::OS::CreateSemaphore(0)),
+ active_(true) {
+ }
+ ~ApiTestFuzzer() { delete gate_; }
+
+ // The ApiTestFuzzer is also a Thread, so it has a Run method.
+ virtual void Run();
+
+ enum PartOfTest { FIRST_PART, SECOND_PART };
+
+ static void Setup(PartOfTest part);
+ static void RunAllTests();
+ static void TearDown();
+ // This method switches threads if we are running the Threading test.
+ // Otherwise it does nothing.
+ static void Fuzz();
+ private:
+ static bool fuzzing_;
+ static int tests_being_run_;
+ static int current_;
+ static int active_tests_;
+ static bool NextThread();
+ int test_number_;
+ v8::internal::Semaphore* gate_;
+ bool active_;
+ void ContextSwitch();
+ static int GetNextTestNumber();
+ static v8::internal::Semaphore* all_tests_done_;
+};
+
+
+#define THREADED_TEST(Name) \
+ static void Test##Name(); \
+ RegisterThreadedTest register_##Name(Test##Name, #Name); \
+ /* */ TEST(Name)
+
+
+class RegisterThreadedTest {
+ public:
+ explicit RegisterThreadedTest(CcTest::TestFunction* callback,
+ const char* name)
+ : fuzzer_(NULL), callback_(callback), name_(name) {
+ prev_ = first_;
+ first_ = this;
+ count_++;
+ }
+ static int count() { return count_; }
+ static RegisterThreadedTest* nth(int i) {
+ CHECK(i < count());
+ RegisterThreadedTest* current = first_;
+ while (i > 0) {
+ i--;
+ current = current->prev_;
+ }
+ return current;
+ }
+ CcTest::TestFunction* callback() { return callback_; }
+ ApiTestFuzzer* fuzzer_;
+ const char* name() { return name_; }
+
+ private:
+ static RegisterThreadedTest* first_;
+ static int count_;
+ CcTest::TestFunction* callback_;
+ RegisterThreadedTest* prev_;
+ const char* name_;
+};
+
+
+// A LocalContext holds a reference to a v8::Context.
+class LocalContext {
+ public:
+ LocalContext(v8::ExtensionConfiguration* extensions = 0,
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Handle<v8::ObjectTemplate>(),
+ v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
+ : context_(v8::Context::New(extensions, global_template, global_object)) {
+ context_->Enter();
+ }
+
+ virtual ~LocalContext() {
+ context_->Exit();
+ context_.Dispose();
+ }
+
+ v8::Context* operator->() { return *context_; }
+ v8::Context* operator*() { return *context_; }
+ bool IsReady() { return !context_.IsEmpty(); }
+
+ v8::Local<v8::Context> local() {
+ return v8::Local<v8::Context>::New(context_);
+ }
+
+ private:
+ v8::Persistent<v8::Context> context_;
+};
+
+
+static inline v8::Local<v8::Value> v8_num(double x) {
+ return v8::Number::New(x);
+}
+
+
+static inline v8::Local<v8::String> v8_str(const char* x) {
+ return v8::String::New(x);
+}
+
+
+static inline v8::Local<v8::Script> v8_compile(const char* x) {
+ return v8::Script::Compile(v8_str(x));
+}
+
+
+// Helper function that compiles and runs the source.
+static inline v8::Local<v8::Value> CompileRun(const char* source) {
+ return v8::Script::Compile(v8::String::New(source))->Run();
+}
+
+
#endif // ifndef CCTEST_H_
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index b43cd643..a143cbda 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -33,12 +33,6 @@ test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux
# BUG(382): Weird test. Can't guarantee that it never times out.
test-api/ApplyInterruption: PASS || TIMEOUT
-# This is about to go away anyway since new snapshot code is on the way.
-test-serialize/Deserialize: FAIL
-test-serialize/DeserializeAndRunScript: FAIL
-test-serialize/DeserializeNatives: FAIL
-test-serialize/DeserializeExtensions: FAIL
-
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
test-serialize/TestThatAlwaysFails: FAIL
@@ -47,9 +41,6 @@ test-serialize/DependentTestThatAlwaysFails: FAIL
[ $arch == arm ]
-# BUG(113): Test seems flaky on ARM.
-test-spaces/LargeObjectSpace: PASS || FAIL
-
# BUG(240): Test seems flaky on ARM.
test-api/RegExpInterruption: SKIP
@@ -61,11 +52,3 @@ test-api/OutOfMemoryNested: SKIP
# BUG(355): Test crashes on ARM.
test-log/ProfLazyMode: SKIP
-
-[ $simulator == arm ]
-
-# BUG(271): During exception propagation, we compare pointers into the
-# stack. These tests fail on the ARM simulator because the C++ and
-# the JavaScript stacks are separate.
-test-api/ExceptionOrder: FAIL
-test-api/TryCatchInTryFinally: FAIL
diff --git a/test/cctest/test-accessors.cc b/test/cctest/test-accessors.cc
new file mode 100644
index 00000000..25f5c395
--- /dev/null
+++ b/test/cctest/test-accessors.cc
@@ -0,0 +1,450 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "cctest.h"
+#include "frames-inl.h"
+#include "string-stream.h"
+
+using ::v8::ObjectTemplate;
+using ::v8::Value;
+using ::v8::Context;
+using ::v8::Local;
+using ::v8::String;
+using ::v8::Script;
+using ::v8::Function;
+using ::v8::AccessorInfo;
+using ::v8::Extension;
+
+namespace i = ::v8::internal;
+
+static v8::Handle<Value> handle_property(Local<String> name,
+ const AccessorInfo&) {
+ ApiTestFuzzer::Fuzz();
+ return v8_num(900);
+}
+
+
+THREADED_TEST(PropertyHandler) {
+ v8::HandleScope scope;
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ fun_templ->InstanceTemplate()->SetAccessor(v8_str("foo"), handle_property);
+ LocalContext env;
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("Fun"), fun);
+ Local<Script> getter = v8_compile("var obj = new Fun(); obj.foo;");
+ CHECK_EQ(900, getter->Run()->Int32Value());
+ Local<Script> setter = v8_compile("obj.foo = 901;");
+ CHECK_EQ(901, setter->Run()->Int32Value());
+}
+
+
+static v8::Handle<Value> GetIntValue(Local<String> property,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ int* value =
+ static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value());
+ return v8_num(*value);
+}
+
+
+static void SetIntValue(Local<String> property,
+ Local<Value> value,
+ const AccessorInfo& info) {
+ int* field =
+ static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value());
+ *field = value->Int32Value();
+}
+
+int foo, bar, baz;
+
+THREADED_TEST(GlobalVariableAccess) {
+ foo = 0;
+ bar = -4;
+ baz = 10;
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ templ->InstanceTemplate()->SetAccessor(v8_str("foo"),
+ GetIntValue,
+ SetIntValue,
+ v8::External::New(&foo));
+ templ->InstanceTemplate()->SetAccessor(v8_str("bar"),
+ GetIntValue,
+ SetIntValue,
+ v8::External::New(&bar));
+ templ->InstanceTemplate()->SetAccessor(v8_str("baz"),
+ GetIntValue,
+ SetIntValue,
+ v8::External::New(&baz));
+ LocalContext env(0, templ->InstanceTemplate());
+ v8_compile("foo = (++bar) + baz")->Run();
+ CHECK_EQ(bar, -3);
+ CHECK_EQ(foo, 7);
+}
+
+
+static int x_register = 0;
+static v8::Handle<v8::Object> x_receiver;
+static v8::Handle<v8::Object> x_holder;
+
+
+static v8::Handle<Value> XGetter(Local<String> name, const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ CHECK_EQ(x_receiver, info.This());
+ CHECK_EQ(x_holder, info.Holder());
+ return v8_num(x_register);
+}
+
+
+static void XSetter(Local<String> name,
+ Local<Value> value,
+ const AccessorInfo& info) {
+ CHECK_EQ(x_holder, info.This());
+ CHECK_EQ(x_holder, info.Holder());
+ x_register = value->Int32Value();
+}
+
+
+THREADED_TEST(AccessorIC) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ obj->SetAccessor(v8_str("x"), XGetter, XSetter);
+ LocalContext context;
+ x_holder = obj->NewInstance();
+ context->Global()->Set(v8_str("holder"), x_holder);
+ x_receiver = v8::Object::New();
+ context->Global()->Set(v8_str("obj"), x_receiver);
+ v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(CompileRun(
+ "obj.__proto__ = holder;"
+ "var result = [];"
+ "for (var i = 0; i < 10; i++) {"
+ " holder.x = i;"
+ " result.push(obj.x);"
+ "}"
+ "result"));
+ CHECK_EQ(10, array->Length());
+ for (int i = 0; i < 10; i++) {
+ v8::Handle<Value> entry = array->Get(v8::Integer::New(i));
+ CHECK_EQ(v8::Integer::New(i), entry);
+ }
+}
+
+
+static v8::Handle<Value> AccessorProhibitsOverwritingGetter(
+ Local<String> name,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ return v8::True();
+}
+
+
+THREADED_TEST(AccessorProhibitsOverwriting) {
+ v8::HandleScope scope;
+ LocalContext context;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetAccessor(v8_str("x"),
+ AccessorProhibitsOverwritingGetter,
+ 0,
+ v8::Handle<Value>(),
+ v8::PROHIBITS_OVERWRITING,
+ v8::ReadOnly);
+ Local<v8::Object> instance = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), instance);
+ Local<Value> value = CompileRun(
+ "obj.__defineGetter__('x', function() { return false; });"
+ "obj.x");
+ CHECK(value->BooleanValue());
+ value = CompileRun(
+ "var setter_called = false;"
+ "obj.__defineSetter__('x', function() { setter_called = true; });"
+ "obj.x = 42;"
+ "setter_called");
+ CHECK(!value->BooleanValue());
+ value = CompileRun(
+ "obj2 = {};"
+ "obj2.__proto__ = obj;"
+ "obj2.__defineGetter__('x', function() { return false; });"
+ "obj2.x");
+ CHECK(value->BooleanValue());
+ value = CompileRun(
+ "var setter_called = false;"
+ "obj2 = {};"
+ "obj2.__proto__ = obj;"
+ "obj2.__defineSetter__('x', function() { setter_called = true; });"
+ "obj2.x = 42;"
+ "setter_called");
+ CHECK(!value->BooleanValue());
+}
+
+
+template <int C>
+static v8::Handle<Value> HandleAllocatingGetter(Local<String> name,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ for (int i = 0; i < C; i++)
+ v8::String::New("foo");
+ return v8::String::New("foo");
+}
+
+
+THREADED_TEST(HandleScopePop) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ obj->SetAccessor(v8_str("one"), HandleAllocatingGetter<1>);
+ obj->SetAccessor(v8_str("many"), HandleAllocatingGetter<1024>);
+ LocalContext context;
+ v8::Handle<v8::Object> inst = obj->NewInstance();
+ context->Global()->Set(v8::String::New("obj"), inst);
+ int count_before = i::HandleScope::NumberOfHandles();
+ {
+ v8::HandleScope scope;
+ CompileRun(
+ "for (var i = 0; i < 1000; i++) {"
+ " obj.one;"
+ " obj.many;"
+ "}");
+ }
+ int count_after = i::HandleScope::NumberOfHandles();
+ CHECK_EQ(count_before, count_after);
+}
+
+static v8::Handle<Value> CheckAccessorArgsCorrect(Local<String> name,
+ const AccessorInfo& info) {
+ CHECK(info.This() == info.Holder());
+ CHECK(info.Data()->Equals(v8::String::New("data")));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This() == info.Holder());
+ CHECK(info.Data()->Equals(v8::String::New("data")));
+ i::Heap::CollectAllGarbage(true);
+ CHECK(info.This() == info.Holder());
+ CHECK(info.Data()->Equals(v8::String::New("data")));
+ return v8::Integer::New(17);
+}
+
+THREADED_TEST(DirectCall) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ obj->SetAccessor(v8_str("xxx"),
+ CheckAccessorArgsCorrect,
+ NULL,
+ v8::String::New("data"));
+ LocalContext context;
+ v8::Handle<v8::Object> inst = obj->NewInstance();
+ context->Global()->Set(v8::String::New("obj"), inst);
+ Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+ for (int i = 0; i < 10; i++) {
+ Local<Value> result = scr->Run();
+ CHECK(!result.IsEmpty());
+ CHECK_EQ(17, result->Int32Value());
+ }
+}
+
+static v8::Handle<Value> EmptyGetter(Local<String> name,
+ const AccessorInfo& info) {
+ CheckAccessorArgsCorrect(name, info);
+ ApiTestFuzzer::Fuzz();
+ CheckAccessorArgsCorrect(name, info);
+ return v8::Handle<v8::Value>();
+}
+
+THREADED_TEST(EmptyResult) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8::String::New("data"));
+ LocalContext context;
+ v8::Handle<v8::Object> inst = obj->NewInstance();
+ context->Global()->Set(v8::String::New("obj"), inst);
+ Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+ for (int i = 0; i < 10; i++) {
+ Local<Value> result = scr->Run();
+ CHECK(result == v8::Undefined());
+ }
+}
+
+
+THREADED_TEST(NoReuseRegress) {
+ // Check that the IC generated for the one test doesn't get reused
+ // for the other.
+ v8::HandleScope scope;
+ {
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8::String::New("data"));
+ LocalContext context;
+ v8::Handle<v8::Object> inst = obj->NewInstance();
+ context->Global()->Set(v8::String::New("obj"), inst);
+ Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+ for (int i = 0; i < 2; i++) {
+ Local<Value> result = scr->Run();
+ CHECK(result == v8::Undefined());
+ }
+ }
+ {
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ obj->SetAccessor(v8_str("xxx"),
+ CheckAccessorArgsCorrect,
+ NULL,
+ v8::String::New("data"));
+ LocalContext context;
+ v8::Handle<v8::Object> inst = obj->NewInstance();
+ context->Global()->Set(v8::String::New("obj"), inst);
+ Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+ for (int i = 0; i < 10; i++) {
+ Local<Value> result = scr->Run();
+ CHECK(!result.IsEmpty());
+ CHECK_EQ(17, result->Int32Value());
+ }
+ }
+}
+
+static v8::Handle<Value> ThrowingGetAccessor(Local<String> name,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ return v8::ThrowException(v8_str("g"));
+}
+
+
+static void ThrowingSetAccessor(Local<String> name,
+ Local<Value> value,
+ const AccessorInfo& info) {
+ v8::ThrowException(value);
+}
+
+
+THREADED_TEST(Regress1054726) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ obj->SetAccessor(v8_str("x"),
+ ThrowingGetAccessor,
+ ThrowingSetAccessor,
+ Local<Value>());
+
+ LocalContext env;
+ env->Global()->Set(v8_str("obj"), obj->NewInstance());
+
+ // Use the throwing property setter/getter in a loop to force
+ // the accessor ICs to be initialized.
+ v8::Handle<Value> result;
+ result = Script::Compile(v8_str(
+ "var result = '';"
+ "for (var i = 0; i < 5; i++) {"
+ " try { obj.x; } catch (e) { result += e; }"
+ "}; result"))->Run();
+ CHECK_EQ(v8_str("ggggg"), result);
+
+ result = Script::Compile(String::New(
+ "var result = '';"
+ "for (var i = 0; i < 5; i++) {"
+ " try { obj.x = i; } catch (e) { result += e; }"
+ "}; result"))->Run();
+ CHECK_EQ(v8_str("01234"), result);
+}
+
+
+static v8::Handle<Value> AllocGetter(Local<String> name,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ return v8::Array::New(1000);
+}
+
+
+THREADED_TEST(Gc) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ obj->SetAccessor(v8_str("xxx"), AllocGetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("obj"), obj->NewInstance());
+ Script::Compile(String::New(
+ "var last = [];"
+ "for (var i = 0; i < 2048; i++) {"
+ " var result = obj.xxx;"
+ " result[0] = last;"
+ " last = result;"
+ "}"))->Run();
+}
+
+
+static v8::Handle<Value> StackCheck(Local<String> name,
+ const AccessorInfo& info) {
+ i::StackFrameIterator iter;
+ for (int i = 0; !iter.done(); i++) {
+ i::StackFrame* frame = iter.frame();
+ CHECK(i != 0 || (frame->type() == i::StackFrame::EXIT));
+ CHECK(frame->code()->IsCode());
+ i::Address pc = frame->pc();
+ i::Code* code = frame->code();
+ CHECK(code->contains(pc));
+ iter.Advance();
+ }
+ return v8::Undefined();
+}
+
+
+THREADED_TEST(StackIteration) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ i::StringStream::ClearMentionedObjectCache();
+ obj->SetAccessor(v8_str("xxx"), StackCheck);
+ LocalContext env;
+ env->Global()->Set(v8_str("obj"), obj->NewInstance());
+ Script::Compile(String::New(
+ "function foo() {"
+ " return obj.xxx;"
+ "}"
+ "for (var i = 0; i < 100; i++) {"
+ " foo();"
+ "}"))->Run();
+}
+
+
+static v8::Handle<Value> AllocateHandles(Local<String> name,
+ const AccessorInfo& info) {
+ for (int i = 0; i < i::kHandleBlockSize + 1; i++) {
+ v8::Local<v8::Value>::New(name);
+ }
+ return v8::Integer::New(100);
+}
+
+
+THREADED_TEST(HandleScopeSegment) {
+ // Check that we can return values past popping of handle scope
+ // segments.
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ obj->SetAccessor(v8_str("xxx"), AllocateHandles);
+ LocalContext env;
+ env->Global()->Set(v8_str("obj"), obj->NewInstance());
+ v8::Handle<v8::Value> result = Script::Compile(String::New(
+ "var result;"
+ "for (var i = 0; i < 4; i++)"
+ " result = obj.xxx;"
+ "result;"))->Run();
+ CHECK_EQ(100, result->Int32Value());
+}
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index 1235b13b..315a34ed 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -65,9 +65,9 @@ static Object* AllocateAfterFailures() {
// Old data space.
OldSpace* old_data_space = Heap::old_data_space();
- static const int kOldDataSpaceFillerSize = SeqAsciiString::SizeFor(0);
+ static const int kOldDataSpaceFillerSize = ByteArray::SizeFor(0);
while (old_data_space->Available() > kOldDataSpaceFillerSize) {
- CHECK(!Heap::AllocateRawAsciiString(0, TENURED)->IsFailure());
+ CHECK(!Heap::AllocateByteArray(0, TENURED)->IsFailure());
}
CHECK(!Heap::AllocateRawAsciiString(100, TENURED)->IsFailure());
@@ -195,9 +195,9 @@ TEST(CodeRange) {
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
void* base = CodeRange::AllocateRawMemory(requested, &allocated);
- blocks.Add(Block(base, allocated));
- current_allocated += allocated;
- total_allocated += allocated;
+ blocks.Add(Block(base, static_cast<int>(allocated)));
+ current_allocated += static_cast<int>(allocated);
+ total_allocated += static_cast<int>(allocated);
} else {
// Free a block.
int index = Pseudorandom() % blocks.length();
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index a943f303..6d6c174f 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -38,6 +38,8 @@
#include "utils.h"
#include "cctest.h"
+static const bool kLogThreading = false;
+
static bool IsNaN(double x) {
#ifdef WIN32
return _isnan(x);
@@ -58,131 +60,6 @@ using ::v8::Extension;
namespace i = ::v8::internal;
-static Local<Value> v8_num(double x) {
- return v8::Number::New(x);
-}
-
-
-static Local<String> v8_str(const char* x) {
- return String::New(x);
-}
-
-
-static Local<Script> v8_compile(const char* x) {
- return Script::Compile(v8_str(x));
-}
-
-
-// A LocalContext holds a reference to a v8::Context.
-class LocalContext {
- public:
- LocalContext(v8::ExtensionConfiguration* extensions = 0,
- v8::Handle<ObjectTemplate> global_template =
- v8::Handle<ObjectTemplate>(),
- v8::Handle<Value> global_object = v8::Handle<Value>())
- : context_(Context::New(extensions, global_template, global_object)) {
- context_->Enter();
- }
-
- virtual ~LocalContext() {
- context_->Exit();
- context_.Dispose();
- }
-
- Context* operator->() { return *context_; }
- Context* operator*() { return *context_; }
- Local<Context> local() { return Local<Context>::New(context_); }
- bool IsReady() { return !context_.IsEmpty(); }
-
- private:
- v8::Persistent<Context> context_;
-};
-
-
-// Switches between all the Api tests using the threading support.
-// In order to get a surprising but repeatable pattern of thread
-// switching it has extra semaphores to control the order in which
-// the tests alternate, not relying solely on the big V8 lock.
-//
-// A test is augmented with calls to ApiTestFuzzer::Fuzz() in its
-// callbacks. This will have no effect when we are not running the
-// thread fuzzing test. In the thread fuzzing test it will
-// pseudorandomly select a successor thread and switch execution
-// to that thread, suspending the current test.
-class ApiTestFuzzer: public v8::internal::Thread {
- public:
- void CallTest();
- explicit ApiTestFuzzer(int num)
- : test_number_(num),
- gate_(v8::internal::OS::CreateSemaphore(0)),
- active_(true) {
- }
- ~ApiTestFuzzer() { delete gate_; }
-
- // The ApiTestFuzzer is also a Thread, so it has a Run method.
- virtual void Run();
-
- enum PartOfTest { FIRST_PART, SECOND_PART };
-
- static void Setup(PartOfTest part);
- static void RunAllTests();
- static void TearDown();
- // This method switches threads if we are running the Threading test.
- // Otherwise it does nothing.
- static void Fuzz();
- private:
- static bool fuzzing_;
- static int tests_being_run_;
- static int current_;
- static int active_tests_;
- static bool NextThread();
- int test_number_;
- v8::internal::Semaphore* gate_;
- bool active_;
- void ContextSwitch();
- static int GetNextTestNumber();
- static v8::internal::Semaphore* all_tests_done_;
-};
-
-
-#define THREADED_TEST(Name) \
- static void Test##Name(); \
- RegisterThreadedTest register_##Name(Test##Name); \
- /* */ TEST(Name)
-
-
-class RegisterThreadedTest {
- public:
- explicit RegisterThreadedTest(CcTest::TestFunction* callback)
- : fuzzer_(NULL), callback_(callback) {
- prev_ = first_;
- first_ = this;
- count_++;
- }
- static int count() { return count_; }
- static RegisterThreadedTest* nth(int i) {
- CHECK(i < count());
- RegisterThreadedTest* current = first_;
- while (i > 0) {
- i--;
- current = current->prev_;
- }
- return current;
- }
- CcTest::TestFunction* callback() { return callback_; }
- ApiTestFuzzer* fuzzer_;
-
- private:
- static RegisterThreadedTest* first_;
- static int count_;
- CcTest::TestFunction* callback_;
- RegisterThreadedTest* prev_;
-};
-
-
-RegisterThreadedTest *RegisterThreadedTest::first_ = NULL;
-int RegisterThreadedTest::count_ = 0;
-
static int signature_callback_count;
static v8::Handle<Value> IncrementingSignatureCallback(
@@ -231,11 +108,6 @@ THREADED_TEST(Handles) {
}
-// Helper function that compiles and runs the source.
-static Local<Value> CompileRun(const char* source) {
- return Script::Compile(String::New(source))->Run();
-}
-
THREADED_TEST(ReceiverSignature) {
v8::HandleScope scope;
LocalContext env;
@@ -382,9 +254,9 @@ THREADED_TEST(Script) {
static uint16_t* AsciiToTwoByteString(const char* source) {
- size_t array_length = strlen(source) + 1;
+ int array_length = i::StrLength(source) + 1;
uint16_t* converted = i::NewArray<uint16_t>(array_length);
- for (size_t i = 0; i < array_length; i++) converted[i] = source[i];
+ for (int i = 0; i < array_length; i++) converted[i] = source[i];
return converted;
}
@@ -720,27 +592,6 @@ THREADED_TEST(FindInstanceInPrototypeChain) {
}
-static v8::Handle<Value> handle_property(Local<String> name,
- const AccessorInfo&) {
- ApiTestFuzzer::Fuzz();
- return v8_num(900);
-}
-
-
-THREADED_TEST(PropertyHandler) {
- v8::HandleScope scope;
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- fun_templ->InstanceTemplate()->SetAccessor(v8_str("foo"), handle_property);
- LocalContext env;
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("Fun"), fun);
- Local<Script> getter = v8_compile("var obj = new Fun(); obj.foo;");
- CHECK_EQ(900, getter->Run()->Int32Value());
- Local<Script> setter = v8_compile("obj.foo = 901;");
- CHECK_EQ(901, setter->Run()->Int32Value());
-}
-
-
THREADED_TEST(TinyInteger) {
v8::HandleScope scope;
LocalContext env;
@@ -907,49 +758,6 @@ THREADED_TEST(GlobalPrototype) {
}
-static v8::Handle<Value> GetIntValue(Local<String> property,
- const AccessorInfo& info) {
- ApiTestFuzzer::Fuzz();
- int* value =
- static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value());
- return v8_num(*value);
-}
-
-static void SetIntValue(Local<String> property,
- Local<Value> value,
- const AccessorInfo& info) {
- int* field =
- static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value());
- *field = value->Int32Value();
-}
-
-int foo, bar, baz;
-
-THREADED_TEST(GlobalVariableAccess) {
- foo = 0;
- bar = -4;
- baz = 10;
- v8::HandleScope scope;
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
- templ->InstanceTemplate()->SetAccessor(v8_str("foo"),
- GetIntValue,
- SetIntValue,
- v8::External::New(&foo));
- templ->InstanceTemplate()->SetAccessor(v8_str("bar"),
- GetIntValue,
- SetIntValue,
- v8::External::New(&bar));
- templ->InstanceTemplate()->SetAccessor(v8_str("baz"),
- GetIntValue,
- SetIntValue,
- v8::External::New(&baz));
- LocalContext env(0, templ->InstanceTemplate());
- v8_compile("foo = (++bar) + baz")->Run();
- CHECK_EQ(bar, -3);
- CHECK_EQ(foo, 7);
-}
-
-
THREADED_TEST(ObjectTemplate) {
v8::HandleScope scope;
Local<ObjectTemplate> templ1 = ObjectTemplate::New();
@@ -1365,50 +1173,6 @@ THREADED_TEST(CallbackExceptionRegression) {
}
-static v8::Handle<Value> ThrowingGetAccessor(Local<String> name,
- const AccessorInfo& info) {
- ApiTestFuzzer::Fuzz();
- return v8::ThrowException(v8_str("g"));
-}
-
-
-static void ThrowingSetAccessor(Local<String> name,
- Local<Value> value,
- const AccessorInfo& info) {
- v8::ThrowException(value);
-}
-
-
-THREADED_TEST(Regress1054726) {
- v8::HandleScope scope;
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
- obj->SetAccessor(v8_str("x"),
- ThrowingGetAccessor,
- ThrowingSetAccessor,
- Local<Value>());
-
- LocalContext env;
- env->Global()->Set(v8_str("obj"), obj->NewInstance());
-
- // Use the throwing property setter/getter in a loop to force
- // the accessor ICs to be initialized.
- v8::Handle<Value> result;
- result = Script::Compile(v8_str(
- "var result = '';"
- "for (var i = 0; i < 5; i++) {"
- " try { obj.x; } catch (e) { result += e; }"
- "}; result"))->Run();
- CHECK_EQ(v8_str("ggggg"), result);
-
- result = Script::Compile(String::New(
- "var result = '';"
- "for (var i = 0; i < 5; i++) {"
- " try { obj.x = i; } catch (e) { result += e; }"
- "}; result"))->Run();
- CHECK_EQ(v8_str("01234"), result);
-}
-
-
THREADED_TEST(FunctionPrototype) {
v8::HandleScope scope;
Local<v8::FunctionTemplate> Foo = v8::FunctionTemplate::New();
@@ -1580,17 +1344,10 @@ THREADED_TEST(HiddenProperties) {
}
+static bool interceptor_for_hidden_properties_called;
static v8::Handle<Value> InterceptorForHiddenProperties(
Local<String> name, const AccessorInfo& info) {
- // Make sure objects move.
- bool saved_always_compact = i::FLAG_always_compact;
- if (!i::FLAG_never_compact) {
- i::FLAG_always_compact = true;
- }
- // The whole goal of this interceptor is to cause a GC during local property
- // lookup.
- i::Heap::CollectAllGarbage(false);
- i::FLAG_always_compact = saved_always_compact;
+ interceptor_for_hidden_properties_called = true;
return v8::Handle<Value>();
}
@@ -1599,6 +1356,8 @@ THREADED_TEST(HiddenPropertiesWithInterceptors) {
v8::HandleScope scope;
LocalContext context;
+ interceptor_for_hidden_properties_called = false;
+
v8::Local<v8::String> key = v8_str("api-test::hidden-key");
// Associate an interceptor with an object and start setting hidden values.
@@ -1609,6 +1368,7 @@ THREADED_TEST(HiddenPropertiesWithInterceptors) {
Local<v8::Object> obj = function->NewInstance();
CHECK(obj->SetHiddenValue(key, v8::Integer::New(2302)));
CHECK_EQ(2302, obj->GetHiddenValue(key)->Int32Value());
+ CHECK(!interceptor_for_hidden_properties_called);
}
@@ -2910,6 +2670,40 @@ THREADED_TEST(AutoExtensions) {
}
+static const char* kSyntaxErrorInExtensionSource =
+ "[";
+
+
+// Test that a syntax error in an extension does not cause a fatal
+// error but results in an empty context.
+THREADED_TEST(SyntaxErrorExtensions) {
+ v8::HandleScope handle_scope;
+ v8::RegisterExtension(new Extension("syntaxerror",
+ kSyntaxErrorInExtensionSource));
+ const char* extension_names[] = { "syntaxerror" };
+ v8::ExtensionConfiguration extensions(1, extension_names);
+ v8::Handle<Context> context = Context::New(&extensions);
+ CHECK(context.IsEmpty());
+}
+
+
+static const char* kExceptionInExtensionSource =
+ "throw 42";
+
+
+// Test that an exception when installing an extension does not cause
+// a fatal error but results in an empty context.
+THREADED_TEST(ExceptionExtensions) {
+ v8::HandleScope handle_scope;
+ v8::RegisterExtension(new Extension("exception",
+ kExceptionInExtensionSource));
+ const char* extension_names[] = { "exception" };
+ v8::ExtensionConfiguration extensions(1, extension_names);
+ v8::Handle<Context> context = Context::New(&extensions);
+ CHECK(context.IsEmpty());
+}
+
+
static void CheckDependencies(const char* name, const char* expected) {
v8::HandleScope handle_scope;
v8::ExtensionConfiguration config(1, &name);
@@ -3157,6 +2951,58 @@ THREADED_TEST(WeakReference) {
}
+static bool in_scavenge = false;
+static int last = -1;
+
+static void ForceScavenge(v8::Persistent<v8::Value> obj, void* data) {
+ CHECK_EQ(-1, last);
+ last = 0;
+ obj.Dispose();
+ obj.Clear();
+ in_scavenge = true;
+ i::Heap::PerformScavenge();
+ in_scavenge = false;
+ *(reinterpret_cast<bool*>(data)) = true;
+}
+
+static void CheckIsNotInvokedInScavenge(v8::Persistent<v8::Value> obj,
+ void* data) {
+ CHECK_EQ(0, last);
+ last = 1;
+ *(reinterpret_cast<bool*>(data)) = in_scavenge;
+ obj.Dispose();
+ obj.Clear();
+}
+
+THREADED_TEST(NoWeakRefCallbacksInScavenge) {
+ // Test verifies that scavenge cannot invoke WeakReferenceCallbacks.
+ // Calling callbacks from scavenges is unsafe as objects held by those
+ // handlers might have become strongly reachable, but scavenge doesn't
+ // check that.
+ v8::Persistent<Context> context = Context::New();
+ Context::Scope context_scope(context);
+
+ v8::Persistent<v8::Object> object_a;
+ v8::Persistent<v8::Object> object_b;
+
+ {
+ v8::HandleScope handle_scope;
+ object_b = v8::Persistent<v8::Object>::New(v8::Object::New());
+ object_a = v8::Persistent<v8::Object>::New(v8::Object::New());
+ }
+
+ bool object_a_disposed = false;
+ object_a.MakeWeak(&object_a_disposed, &ForceScavenge);
+ bool released_in_scavenge = false;
+ object_b.MakeWeak(&released_in_scavenge, &CheckIsNotInvokedInScavenge);
+
+ while (!object_a_disposed) {
+ i::Heap::CollectAllGarbage(false);
+ }
+ CHECK(!released_in_scavenge);
+}
+
+
v8::Handle<Function> args_fun;
@@ -3184,53 +3030,6 @@ THREADED_TEST(Arguments) {
}
-static int x_register = 0;
-static v8::Handle<v8::Object> x_receiver;
-static v8::Handle<v8::Object> x_holder;
-
-
-static v8::Handle<Value> XGetter(Local<String> name, const AccessorInfo& info) {
- ApiTestFuzzer::Fuzz();
- CHECK_EQ(x_receiver, info.This());
- CHECK_EQ(x_holder, info.Holder());
- return v8_num(x_register);
-}
-
-
-static void XSetter(Local<String> name,
- Local<Value> value,
- const AccessorInfo& info) {
- CHECK_EQ(x_holder, info.This());
- CHECK_EQ(x_holder, info.Holder());
- x_register = value->Int32Value();
-}
-
-
-THREADED_TEST(AccessorIC) {
- v8::HandleScope scope;
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
- obj->SetAccessor(v8_str("x"), XGetter, XSetter);
- LocalContext context;
- x_holder = obj->NewInstance();
- context->Global()->Set(v8_str("holder"), x_holder);
- x_receiver = v8::Object::New();
- context->Global()->Set(v8_str("obj"), x_receiver);
- v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(CompileRun(
- "obj.__proto__ = holder;"
- "var result = [];"
- "for (var i = 0; i < 10; i++) {"
- " holder.x = i;"
- " result.push(obj.x);"
- "}"
- "result"));
- CHECK_EQ(10, array->Length());
- for (int i = 0; i < 10; i++) {
- v8::Handle<Value> entry = array->Get(v8::Integer::New(i));
- CHECK_EQ(v8::Integer::New(i), entry);
- }
-}
-
-
static v8::Handle<Value> NoBlockGetterX(Local<String> name,
const AccessorInfo&) {
return v8::Handle<Value>();
@@ -6094,13 +5893,17 @@ void ApiTestFuzzer::Fuzz() {
// not start immediately.
bool ApiTestFuzzer::NextThread() {
int test_position = GetNextTestNumber();
- int test_number = RegisterThreadedTest::nth(current_)->fuzzer_->test_number_;
+ const char* test_name = RegisterThreadedTest::nth(current_)->name();
if (test_position == current_) {
- printf("Stay with %d\n", test_number);
+ if (kLogThreading)
+ printf("Stay with %s\n", test_name);
return false;
}
- printf("Switch from %d to %d\n",
- current_ < 0 ? 0 : test_number, test_position < 0 ? 0 : test_number);
+ if (kLogThreading) {
+ printf("Switch from %s to %s\n",
+ test_name,
+ RegisterThreadedTest::nth(test_position)->name());
+ }
current_ = test_position;
RegisterThreadedTest::nth(current_)->fuzzer_->gate_->Signal();
return true;
@@ -6209,9 +6012,11 @@ TEST(Threading2) {
void ApiTestFuzzer::CallTest() {
- printf("Start test %d\n", test_number_);
+ if (kLogThreading)
+ printf("Start test %d\n", test_number_);
CallTestNumber(test_number_);
- printf("End test %d\n", test_number_);
+ if (kLogThreading)
+ printf("End test %d\n", test_number_);
}
@@ -6455,6 +6260,31 @@ THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) {
i::Heap::CollectAllGarbage(false);
}
+void DisposingCallback(v8::Persistent<v8::Value> handle, void*) {
+ handle.Dispose();
+}
+
+void HandleCreatingCallback(v8::Persistent<v8::Value> handle, void*) {
+ v8::HandleScope scope;
+ v8::Persistent<v8::Object>::New(v8::Object::New());
+}
+
+
+THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) {
+ LocalContext context;
+
+ v8::Persistent<v8::Object> handle1, handle2, handle3;
+ {
+ v8::HandleScope scope;
+ handle3 = v8::Persistent<v8::Object>::New(v8::Object::New());
+ handle2 = v8::Persistent<v8::Object>::New(v8::Object::New());
+ handle1 = v8::Persistent<v8::Object>::New(v8::Object::New());
+ }
+ handle2.MakeWeak(NULL, DisposingCallback);
+ handle3.MakeWeak(NULL, HandleCreatingCallback);
+ i::Heap::CollectAllGarbage(false);
+}
+
THREADED_TEST(CheckForCrossContextObjectLiterals) {
v8::V8::Initialize();
@@ -6699,53 +6529,6 @@ THREADED_TEST(PropertyEnumeration) {
}
-static v8::Handle<Value> AccessorProhibitsOverwritingGetter(
- Local<String> name,
- const AccessorInfo& info) {
- ApiTestFuzzer::Fuzz();
- return v8::True();
-}
-
-
-THREADED_TEST(AccessorProhibitsOverwriting) {
- v8::HandleScope scope;
- LocalContext context;
- Local<ObjectTemplate> templ = ObjectTemplate::New();
- templ->SetAccessor(v8_str("x"),
- AccessorProhibitsOverwritingGetter,
- 0,
- v8::Handle<Value>(),
- v8::PROHIBITS_OVERWRITING,
- v8::ReadOnly);
- Local<v8::Object> instance = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), instance);
- Local<Value> value = CompileRun(
- "obj.__defineGetter__('x', function() { return false; });"
- "obj.x");
- CHECK(value->BooleanValue());
- value = CompileRun(
- "var setter_called = false;"
- "obj.__defineSetter__('x', function() { setter_called = true; });"
- "obj.x = 42;"
- "setter_called");
- CHECK(!value->BooleanValue());
- value = CompileRun(
- "obj2 = {};"
- "obj2.__proto__ = obj;"
- "obj2.__defineGetter__('x', function() { return false; });"
- "obj2.x");
- CHECK(value->BooleanValue());
- value = CompileRun(
- "var setter_called = false;"
- "obj2 = {};"
- "obj2.__proto__ = obj;"
- "obj2.__defineSetter__('x', function() { setter_called = true; });"
- "obj2.x = 42;"
- "setter_called");
- CHECK(!value->BooleanValue());
-}
-
-
static bool NamedSetAccessBlocker(Local<v8::Object> obj,
Local<Value> name,
v8::AccessType type,
@@ -6921,7 +6704,8 @@ TEST(PreCompile) {
// a workaround for now to make this test not fail.
v8::V8::Initialize();
const char *script = "function foo(a) { return a+1; }";
- v8::ScriptData *sd = v8::ScriptData::PreCompile(script, strlen(script));
+ v8::ScriptData *sd =
+ v8::ScriptData::PreCompile(script, i::StrLength(script));
CHECK_NE(sd->Length(), 0);
CHECK_NE(sd->Data(), NULL);
delete sd;
@@ -7279,27 +7063,17 @@ static void MorphAString(i::String* string,
CHECK(i::StringShape(string).IsExternal());
if (string->IsAsciiRepresentation()) {
// Check old map is not symbol or long.
- CHECK(string->map() == i::Heap::short_external_ascii_string_map() ||
- string->map() == i::Heap::medium_external_ascii_string_map());
+ CHECK(string->map() == i::Heap::external_ascii_string_map());
// Morph external string to be TwoByte string.
- if (string->length() <= i::String::kMaxShortStringSize) {
- string->set_map(i::Heap::short_external_string_map());
- } else {
- string->set_map(i::Heap::medium_external_string_map());
- }
+ string->set_map(i::Heap::external_string_map());
i::ExternalTwoByteString* morphed =
i::ExternalTwoByteString::cast(string);
morphed->set_resource(uc16_resource);
} else {
// Check old map is not symbol or long.
- CHECK(string->map() == i::Heap::short_external_string_map() ||
- string->map() == i::Heap::medium_external_string_map());
+ CHECK(string->map() == i::Heap::external_string_map());
// Morph external string to be ASCII string.
- if (string->length() <= i::String::kMaxShortStringSize) {
- string->set_map(i::Heap::short_external_ascii_string_map());
- } else {
- string->set_map(i::Heap::medium_external_ascii_string_map());
- }
+ string->set_map(i::Heap::external_ascii_string_map());
i::ExternalAsciiString* morphed =
i::ExternalAsciiString::cast(string);
morphed->set_resource(ascii_resource);
@@ -7317,9 +7091,10 @@ THREADED_TEST(MorphCompositeStringTest) {
v8::HandleScope scope;
LocalContext env;
AsciiVectorResource ascii_resource(
- i::Vector<const char>(c_string, strlen(c_string)));
+ i::Vector<const char>(c_string, i::StrLength(c_string)));
UC16VectorResource uc16_resource(
- i::Vector<const uint16_t>(two_byte_string, strlen(c_string)));
+ i::Vector<const uint16_t>(two_byte_string,
+ i::StrLength(c_string)));
Local<String> lhs(v8::Utils::ToLocal(
i::Factory::NewExternalStringFromAscii(&ascii_resource)));
@@ -7377,7 +7152,8 @@ TEST(CompileExternalTwoByteSource) {
for (int i = 0; ascii_sources[i] != NULL; i++) {
uint16_t* two_byte_string = AsciiToTwoByteString(ascii_sources[i]);
UC16VectorResource uc16_resource(
- i::Vector<const uint16_t>(two_byte_string, strlen(ascii_sources[i])));
+ i::Vector<const uint16_t>(two_byte_string,
+ i::StrLength(ascii_sources[i])));
v8::Local<v8::String> source = v8::String::NewExternal(&uc16_resource);
v8::Script::Compile(source);
}
@@ -7863,18 +7639,18 @@ THREADED_TEST(Regress16276) {
THREADED_TEST(PixelArray) {
v8::HandleScope scope;
LocalContext context;
- const int kElementCount = 40;
+ const int kElementCount = 260;
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(kElementCount,
pixel_data);
i::Heap::CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
- pixels->set(i, i);
+ pixels->set(i, i % 256);
}
i::Heap::CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
- CHECK_EQ(i, pixels->get(i));
- CHECK_EQ(i, pixel_data[i]);
+ CHECK_EQ(i % 256, pixels->get(i));
+ CHECK_EQ(i % 256, pixel_data[i]);
}
v8::Handle<v8::Object> obj = v8::Object::New();
@@ -8038,6 +7814,15 @@ THREADED_TEST(PixelArray) {
result = CompileRun("pixels[1] = 23;");
CHECK_EQ(23, result->Int32Value());
+ // Test for index greater than 255. Regression test for:
+ // http://code.google.com/p/chromium/issues/detail?id=26337.
+ result = CompileRun("pixels[256] = 255;");
+ CHECK_EQ(255, result->Int32Value());
+ result = CompileRun("var i = 0;"
+ "for (var j = 0; j < 8; j++) { i = pixels[256]; }"
+ "i");
+ CHECK_EQ(255, result->Int32Value());
+
free(pixel_data);
}
@@ -8298,6 +8083,85 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
result = CompileRun("ext_array[1] = 23;");
CHECK_EQ(23, result->Int32Value());
+ // Test more complex manipulations which cause eax to contain values
+ // that won't be completely overwritten by loads from the arrays.
+ // This catches bugs in the instructions used for the KeyedLoadIC
+ // for byte and word types.
+ {
+ const int kXSize = 300;
+ const int kYSize = 300;
+ const int kLargeElementCount = kXSize * kYSize * 4;
+ ElementType* large_array_data =
+ static_cast<ElementType*>(malloc(kLargeElementCount * element_size));
+ i::Handle<ExternalArrayClass> large_array =
+ i::Handle<ExternalArrayClass>::cast(
+ i::Factory::NewExternalArray(kLargeElementCount,
+ array_type,
+ array_data));
+ v8::Handle<v8::Object> large_obj = v8::Object::New();
+ // Set the elements to be the external array.
+ large_obj->SetIndexedPropertiesToExternalArrayData(large_array_data,
+ array_type,
+ kLargeElementCount);
+ context->Global()->Set(v8_str("large_array"), large_obj);
+ // Initialize contents of a few rows.
+ for (int x = 0; x < 300; x++) {
+ int row = 0;
+ int offset = row * 300 * 4;
+ large_array_data[offset + 4 * x + 0] = (ElementType) 127;
+ large_array_data[offset + 4 * x + 1] = (ElementType) 0;
+ large_array_data[offset + 4 * x + 2] = (ElementType) 0;
+ large_array_data[offset + 4 * x + 3] = (ElementType) 127;
+ row = 150;
+ offset = row * 300 * 4;
+ large_array_data[offset + 4 * x + 0] = (ElementType) 127;
+ large_array_data[offset + 4 * x + 1] = (ElementType) 0;
+ large_array_data[offset + 4 * x + 2] = (ElementType) 0;
+ large_array_data[offset + 4 * x + 3] = (ElementType) 127;
+ row = 298;
+ offset = row * 300 * 4;
+ large_array_data[offset + 4 * x + 0] = (ElementType) 127;
+ large_array_data[offset + 4 * x + 1] = (ElementType) 0;
+ large_array_data[offset + 4 * x + 2] = (ElementType) 0;
+ large_array_data[offset + 4 * x + 3] = (ElementType) 127;
+ }
+ // The goal of the code below is to make "offset" large enough
+ // that the computation of the index (which goes into eax) has
+ // high bits set which will not be overwritten by a byte or short
+ // load.
+ result = CompileRun("var failed = false;"
+ "var offset = 0;"
+ "for (var i = 0; i < 300; i++) {"
+ " if (large_array[4 * i] != 127 ||"
+ " large_array[4 * i + 1] != 0 ||"
+ " large_array[4 * i + 2] != 0 ||"
+ " large_array[4 * i + 3] != 127) {"
+ " failed = true;"
+ " }"
+ "}"
+ "offset = 150 * 300 * 4;"
+ "for (var i = 0; i < 300; i++) {"
+ " if (large_array[offset + 4 * i] != 127 ||"
+ " large_array[offset + 4 * i + 1] != 0 ||"
+ " large_array[offset + 4 * i + 2] != 0 ||"
+ " large_array[offset + 4 * i + 3] != 127) {"
+ " failed = true;"
+ " }"
+ "}"
+ "offset = 298 * 300 * 4;"
+ "for (var i = 0; i < 300; i++) {"
+ " if (large_array[offset + 4 * i] != 127 ||"
+ " large_array[offset + 4 * i + 1] != 0 ||"
+ " large_array[offset + 4 * i + 2] != 0 ||"
+ " large_array[offset + 4 * i + 3] != 127) {"
+ " failed = true;"
+ " }"
+ "}"
+ "!failed;");
+ CHECK_EQ(true, result->BooleanValue());
+ free(large_array_data);
+ }
+
free(array_data);
}
@@ -8489,9 +8353,262 @@ THREADED_TEST(GetHeapStatistics) {
v8::HandleScope scope;
LocalContext c1;
v8::HeapStatistics heap_statistics;
- CHECK_EQ(heap_statistics.total_heap_size(), 0);
- CHECK_EQ(heap_statistics.used_heap_size(), 0);
+ CHECK_EQ(static_cast<int>(heap_statistics.total_heap_size()), 0);
+ CHECK_EQ(static_cast<int>(heap_statistics.used_heap_size()), 0);
v8::V8::GetHeapStatistics(&heap_statistics);
- CHECK_NE(heap_statistics.total_heap_size(), 0);
- CHECK_NE(heap_statistics.used_heap_size(), 0);
+ CHECK_NE(static_cast<int>(heap_statistics.total_heap_size()), 0);
+ CHECK_NE(static_cast<int>(heap_statistics.used_heap_size()), 0);
+}
+
+
+static double DoubleFromBits(uint64_t value) {
+ double target;
+#ifdef BIG_ENDIAN_FLOATING_POINT
+ const int kIntSize = 4;
+ // Somebody swapped the lower and higher half of doubles.
+ memcpy(&target, reinterpret_cast<char*>(&value) + kIntSize, kIntSize);
+ memcpy(reinterpret_cast<char*>(&target) + kIntSize, &value, kIntSize);
+#else
+ memcpy(&target, &value, sizeof(target));
+#endif
+ return target;
+}
+
+
+static uint64_t DoubleToBits(double value) {
+ uint64_t target;
+#ifdef BIG_ENDIAN_FLOATING_POINT
+ const int kIntSize = 4;
+ // Somebody swapped the lower and higher half of doubles.
+ memcpy(&target, reinterpret_cast<char*>(&value) + kIntSize, kIntSize);
+ memcpy(reinterpret_cast<char*>(&target) + kIntSize, &value, kIntSize);
+#else
+ memcpy(&target, &value, sizeof(target));
+#endif
+ return target;
+}
+
+
+static double DoubleToDateTime(double input) {
+ double date_limit = 864e13;
+ if (IsNaN(input) || input < -date_limit || input > date_limit) {
+ return i::OS::nan_value();
+ }
+ return (input < 0) ? -(floor(-input)) : floor(input);
+}
+
+// We don't have a consistent way to write 64-bit constants syntactically, so we
+// split them into two 32-bit constants and combine them programmatically.
+static double DoubleFromBits(uint32_t high_bits, uint32_t low_bits) {
+ return DoubleFromBits((static_cast<uint64_t>(high_bits) << 32) | low_bits);
+}
+
+
+THREADED_TEST(QuietSignalingNaNs) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::TryCatch try_catch;
+
+ // Special double values.
+ double snan = DoubleFromBits(0x7ff00000, 0x00000001);
+ double qnan = DoubleFromBits(0x7ff80000, 0x00000000);
+ double infinity = DoubleFromBits(0x7ff00000, 0x00000000);
+ double max_normal = DoubleFromBits(0x7fefffff, 0xffffffffu);
+ double min_normal = DoubleFromBits(0x00100000, 0x00000000);
+ double max_denormal = DoubleFromBits(0x000fffff, 0xffffffffu);
+ double min_denormal = DoubleFromBits(0x00000000, 0x00000001);
+
+ // Date values are capped at +/-100000000 days (times 864e5 ms per day)
+ // on either side of the epoch.
+ double date_limit = 864e13;
+
+ double test_values[] = {
+ snan,
+ qnan,
+ infinity,
+ max_normal,
+ date_limit + 1,
+ date_limit,
+ min_normal,
+ max_denormal,
+ min_denormal,
+ 0,
+ -0,
+ -min_denormal,
+ -max_denormal,
+ -min_normal,
+ -date_limit,
+ -date_limit - 1,
+ -max_normal,
+ -infinity,
+ -qnan,
+ -snan
+ };
+ int num_test_values = 20;
+
+ for (int i = 0; i < num_test_values; i++) {
+ double test_value = test_values[i];
+
+ // Check that Number::New preserves non-NaNs and quiets SNaNs.
+ v8::Handle<v8::Value> number = v8::Number::New(test_value);
+ double stored_number = number->NumberValue();
+ if (!IsNaN(test_value)) {
+ CHECK_EQ(test_value, stored_number);
+ } else {
+ uint64_t stored_bits = DoubleToBits(stored_number);
+ // Check if quiet nan (bits 51..62 all set).
+ CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff));
+ }
+
+ // Check that Date::New preserves non-NaNs in the date range and
+ // quiets SNaNs.
+ v8::Handle<v8::Value> date = v8::Date::New(test_value);
+ double expected_stored_date = DoubleToDateTime(test_value);
+ double stored_date = date->NumberValue();
+ if (!IsNaN(expected_stored_date)) {
+ CHECK_EQ(expected_stored_date, stored_date);
+ } else {
+ uint64_t stored_bits = DoubleToBits(stored_date);
+ // Check if quiet nan (bits 51..62 all set).
+ CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff));
+ }
+ }
+}
+
+
+static v8::Handle<Value> SpaghettiIncident(const v8::Arguments& args) {
+ v8::HandleScope scope;
+ v8::TryCatch tc;
+ v8::Handle<v8::String> str = args[0]->ToString();
+ if (tc.HasCaught())
+ return tc.ReThrow();
+ return v8::Undefined();
+}
+
+
+// Test that an exception can be propagated down through a spaghetti
+// stack using ReThrow.
+THREADED_TEST(SpaghettiStackReThrow) {
+ v8::HandleScope scope;
+ LocalContext context;
+ context->Global()->Set(
+ v8::String::New("s"),
+ v8::FunctionTemplate::New(SpaghettiIncident)->GetFunction());
+ v8::TryCatch try_catch;
+ CompileRun(
+ "var i = 0;"
+ "var o = {"
+ " toString: function () {"
+ " if (i == 10) {"
+ " throw 'Hey!';"
+ " } else {"
+ " i++;"
+ " return s(o);"
+ " }"
+ " }"
+ "};"
+ "s(o);");
+ CHECK(try_catch.HasCaught());
+ v8::String::Utf8Value value(try_catch.Exception());
+ CHECK_EQ(0, strcmp(*value, "Hey!"));
+}
+
+
+static int GetGlobalObjectsCount() {
+ int count = 0;
+ v8::internal::HeapIterator it;
+ while (it.has_next()) {
+ v8::internal::HeapObject* object = it.next();
+ if (object->IsJSGlobalObject()) count++;
+ }
+ return count;
+}
+
+
+TEST(Regress528) {
+ v8::V8::Initialize();
+
+ v8::HandleScope scope;
+ v8::Persistent<Context> context;
+ v8::Persistent<Context> other_context;
+ int gc_count;
+
+ // Create a context used to keep the code from aging in the compilation
+ // cache.
+ other_context = Context::New();
+
+ // Context-dependent context data creates reference from the compilation
+ // cache to the global object.
+ const char* source_simple = "1";
+ context = Context::New();
+ {
+ v8::HandleScope scope;
+
+ context->Enter();
+ Local<v8::String> obj = v8::String::New("");
+ context->SetData(obj);
+ CompileRun(source_simple);
+ context->Exit();
+ }
+ context.Dispose();
+ for (gc_count = 1; gc_count < 10; gc_count++) {
+ other_context->Enter();
+ CompileRun(source_simple);
+ other_context->Exit();
+ v8::internal::Heap::CollectAllGarbage(false);
+ if (GetGlobalObjectsCount() == 1) break;
+ }
+ CHECK_GE(2, gc_count);
+ CHECK_EQ(1, GetGlobalObjectsCount());
+
+ // Eval in a function creates reference from the compilation cache to the
+ // global object.
+ const char* source_eval = "function f(){eval('1')}; f()";
+ context = Context::New();
+ {
+ v8::HandleScope scope;
+
+ context->Enter();
+ CompileRun(source_eval);
+ context->Exit();
+ }
+ context.Dispose();
+ for (gc_count = 1; gc_count < 10; gc_count++) {
+ other_context->Enter();
+ CompileRun(source_eval);
+ other_context->Exit();
+ v8::internal::Heap::CollectAllGarbage(false);
+ if (GetGlobalObjectsCount() == 1) break;
+ }
+ CHECK_GE(2, gc_count);
+ CHECK_EQ(1, GetGlobalObjectsCount());
+
+ // Looking up the line number for an exception creates reference from the
+ // compilation cache to the global object.
+ const char* source_exception = "function f(){throw 1;} f()";
+ context = Context::New();
+ {
+ v8::HandleScope scope;
+
+ context->Enter();
+ v8::TryCatch try_catch;
+ CompileRun(source_exception);
+ CHECK(try_catch.HasCaught());
+ v8::Handle<v8::Message> message = try_catch.Message();
+ CHECK(!message.IsEmpty());
+ CHECK_EQ(1, message->GetLineNumber());
+ context->Exit();
+ }
+ context.Dispose();
+ for (gc_count = 1; gc_count < 10; gc_count++) {
+ other_context->Enter();
+ CompileRun(source_exception);
+ other_context->Exit();
+ v8::internal::Heap::CollectAllGarbage(false);
+ if (GetGlobalObjectsCount() == 1) break;
+ }
+ CHECK_GE(2, gc_count);
+ CHECK_EQ(1, GetGlobalObjectsCount());
+
+ other_context.Dispose();
}
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index 9ad7c76f..76eb6bb3 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -173,8 +173,8 @@ TEST(AssemblerIa323) {
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
- CHECK(CpuFeatures::IsSupported(CpuFeatures::SSE2));
- { CpuFeatures::Scope fscope(CpuFeatures::SSE2);
+ CHECK(CpuFeatures::IsSupported(SSE2));
+ { CpuFeatures::Scope fscope(SSE2);
__ cvttss2si(eax, Operand(esp, 4));
__ ret(0);
}
@@ -207,8 +207,8 @@ TEST(AssemblerIa324) {
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
- CHECK(CpuFeatures::IsSupported(CpuFeatures::SSE2));
- CpuFeatures::Scope fscope(CpuFeatures::SSE2);
+ CHECK(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope fscope(SSE2);
__ cvttsd2si(eax, Operand(esp, 4));
__ ret(0);
@@ -260,8 +260,8 @@ typedef double (*F5)(double x, double y);
TEST(AssemblerIa326) {
InitializeVM();
v8::HandleScope scope;
- CHECK(CpuFeatures::IsSupported(CpuFeatures::SSE2));
- CpuFeatures::Scope fscope(CpuFeatures::SSE2);
+ CHECK(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope fscope(SSE2);
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
@@ -305,8 +305,8 @@ typedef double (*F6)(int x);
TEST(AssemblerIa328) {
InitializeVM();
v8::HandleScope scope;
- CHECK(CpuFeatures::IsSupported(CpuFeatures::SSE2));
- CpuFeatures::Scope fscope(CpuFeatures::SSE2);
+ CHECK(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope fscope(SSE2);
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
__ mov(eax, Operand(esp, 4));
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index 81aa973d..f100b734 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -86,7 +86,7 @@ TEST(AssemblerX64ReturnOperation) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(buffer, actual_size);
+ Assembler assm(buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
__ movq(rax, arg2);
@@ -107,7 +107,7 @@ TEST(AssemblerX64StackOperations) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(buffer, actual_size);
+ Assembler assm(buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
// We compile without stack frame pointers, so the gdb debugger shows
@@ -138,7 +138,7 @@ TEST(AssemblerX64ArithmeticOperations) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(buffer, actual_size);
+ Assembler assm(buffer, static_cast<int>(actual_size));
// Assemble a simple function that adds arguments returning the sum.
__ movq(rax, arg2);
@@ -159,7 +159,7 @@ TEST(AssemblerX64ImulOperation) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(buffer, actual_size);
+ Assembler assm(buffer, static_cast<int>(actual_size));
// Assemble a simple function that multiplies arguments returning the high
// word.
@@ -186,7 +186,7 @@ TEST(AssemblerX64MemoryOperands) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(buffer, actual_size);
+ Assembler assm(buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
__ push(rbp);
@@ -219,7 +219,7 @@ TEST(AssemblerX64ControlFlow) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(buffer, actual_size);
+ Assembler assm(buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 1 and returns it.
__ push(rbp);
@@ -247,7 +247,7 @@ TEST(AssemblerX64LoopImmediates) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(buffer, actual_size);
+ Assembler assm(buffer, static_cast<int>(actual_size));
// Assemble two loops using rax as counter, and verify the ending counts.
Label Fail;
__ movq(rax, Immediate(-3));
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 4ffcee3d..5b721930 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -53,7 +53,7 @@ using ::v8::internal::StepIn; // From StepAction enum
using ::v8::internal::StepNext; // From StepAction enum
using ::v8::internal::StepOut; // From StepAction enum
using ::v8::internal::Vector;
-
+using ::v8::internal::StrLength;
// Size of temp buffer for formatting small strings.
#define SMALL_STRING_BUFFER_SIZE 80
@@ -178,12 +178,6 @@ static v8::Local<v8::Function> CompileFunction(const char* source,
}
-// Helper function that compiles and runs the source.
-static v8::Local<v8::Value> CompileRun(const char* source) {
- return v8::Script::Compile(v8::String::New(source))->Run();
-}
-
-
// Is there any debug info for the function?
static bool HasDebugInfo(v8::Handle<v8::Function> fun) {
Handle<v8::internal::JSFunction> f = v8::Utils::OpenHandle(*fun);
@@ -806,14 +800,14 @@ static void DebugEventStepSequence(v8::DebugEvent event,
if (event == v8::Break || event == v8::Exception) {
// Check that the current function is the expected.
CHECK(break_point_hit_count <
- static_cast<int>(strlen(expected_step_sequence)));
+ StrLength(expected_step_sequence));
const int argc = 1;
v8::Handle<v8::Value> argv[argc] = { exec_state };
v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
argc, argv);
CHECK(result->IsString());
v8::String::AsciiValue function_name(result->ToString());
- CHECK_EQ(1, strlen(*function_name));
+ CHECK_EQ(1, StrLength(*function_name));
CHECK_EQ((*function_name)[0],
expected_step_sequence[break_point_hit_count]);
@@ -872,6 +866,26 @@ static void DebugEventBreak(v8::DebugEvent event,
}
+// Debug event handler which re-issues a debug break until a limit has been
+// reached.
+int max_break_point_hit_count = 0;
+static void DebugEventBreakMax(v8::DebugEvent event,
+ v8::Handle<v8::Object> exec_state,
+ v8::Handle<v8::Object> event_data,
+ v8::Handle<v8::Value> data) {
+ // When hitting a debug event listener there must be a break set.
+ CHECK_NE(v8::internal::Debug::break_id(), 0);
+
+ if (event == v8::Break && break_point_hit_count < max_break_point_hit_count) {
+ // Count the number of breaks.
+ break_point_hit_count++;
+
+ // Set the break flag again to come back here as soon as possible.
+ v8::Debug::DebugBreak();
+ }
+}
+
+
// --- M e s s a g e C a l l b a c k
@@ -1917,7 +1931,7 @@ TEST(ScriptBreakPointLine) {
// Chesk that a break point was hit when the script was run.
CHECK_EQ(1, break_point_hit_count);
- CHECK_EQ(0, strlen(last_function_hit));
+ CHECK_EQ(0, StrLength(last_function_hit));
// Call f and check that the script break point.
f->Call(env->Global(), 0, NULL);
@@ -1953,7 +1967,7 @@ TEST(ScriptBreakPointLine) {
break_point_hit_count = 0;
v8::Script::Compile(script, &origin)->Run();
CHECK_EQ(2, break_point_hit_count);
- CHECK_EQ(0, strlen(last_function_hit));
+ CHECK_EQ(0, StrLength(last_function_hit));
// Set a break point in the code after the last function decleration.
int sbp6 = SetScriptBreakPointByNameFromJS("test.html", 12, -1);
@@ -1962,7 +1976,7 @@ TEST(ScriptBreakPointLine) {
break_point_hit_count = 0;
v8::Script::Compile(script, &origin)->Run();
CHECK_EQ(3, break_point_hit_count);
- CHECK_EQ(0, strlen(last_function_hit));
+ CHECK_EQ(0, StrLength(last_function_hit));
// Clear the last break points, and reload the script which should not hit any
// break points.
@@ -2478,21 +2492,24 @@ TEST(StepInOutSimple) {
break_point_hit_count = 0;
expected_step_sequence = "abcbaca";
a->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Step through invocation of a with step next.
step_action = StepNext;
break_point_hit_count = 0;
expected_step_sequence = "aaa";
a->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Step through invocation of a with step out.
step_action = StepOut;
break_point_hit_count = 0;
expected_step_sequence = "a";
a->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
@@ -2525,21 +2542,24 @@ TEST(StepInOutTree) {
break_point_hit_count = 0;
expected_step_sequence = "adacadabcbadacada";
a->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Step through invocation of a with step next.
step_action = StepNext;
break_point_hit_count = 0;
expected_step_sequence = "aaaa";
a->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Step through invocation of a with step out.
step_action = StepOut;
break_point_hit_count = 0;
expected_step_sequence = "a";
a->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
@@ -2571,7 +2591,8 @@ TEST(StepInOutBranch) {
break_point_hit_count = 0;
expected_step_sequence = "abaca";
a->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
@@ -2707,6 +2728,37 @@ TEST(DebugStepFunctionCall) {
}
+// Tests that breakpoint will be hit if it's set in script.
+TEST(PauseInScript) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ env.ExposeDebug();
+
+ // Register a debug event listener which counts.
+ v8::Debug::SetDebugEventListener(DebugEventCounter);
+
+ // Create a script that returns a function.
+ const char* src = "(function (evt) {})";
+ const char* script_name = "StepInHandlerTest";
+
+ // Set breakpoint in the script.
+ SetScriptBreakPointByNameFromJS(script_name, 0, -1);
+ break_point_hit_count = 0;
+
+ v8::ScriptOrigin origin(v8::String::New(script_name), v8::Integer::New(0));
+ v8::Handle<v8::Script> script = v8::Script::Compile(v8::String::New(src),
+ &origin);
+ v8::Local<v8::Value> r = script->Run();
+
+ CHECK(r->IsFunction());
+ CHECK_EQ(1, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
// Test break on exceptions. For each exception break combination the number
// of debug event exception callbacks and message callbacks are collected. The
// number of debug event exception callbacks are used to check that the
@@ -2938,7 +2990,8 @@ TEST(StepWithException) {
break_point_hit_count = 0;
expected_step_sequence = "aa";
a->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Step through invocation of b + c.
v8::Local<v8::Function> b = CompileFunction(&env, src, "b");
@@ -2947,7 +3000,8 @@ TEST(StepWithException) {
break_point_hit_count = 0;
expected_step_sequence = "bcc";
b->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Step through invocation of d + e.
v8::Local<v8::Function> d = CompileFunction(&env, src, "d");
@@ -2957,7 +3011,8 @@ TEST(StepWithException) {
break_point_hit_count = 0;
expected_step_sequence = "dded";
d->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Step through invocation of d + e now with break on caught exceptions.
ChangeBreakOnException(true, true);
@@ -2965,7 +3020,8 @@ TEST(StepWithException) {
break_point_hit_count = 0;
expected_step_sequence = "ddeed";
d->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Step through invocation of f + g + h.
v8::Local<v8::Function> f = CompileFunction(&env, src, "f");
@@ -2975,7 +3031,8 @@ TEST(StepWithException) {
break_point_hit_count = 0;
expected_step_sequence = "ffghf";
f->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Step through invocation of f + g + h now with break on caught exceptions.
ChangeBreakOnException(true, true);
@@ -2983,7 +3040,8 @@ TEST(StepWithException) {
break_point_hit_count = 0;
expected_step_sequence = "ffghhf";
f->Call(env->Global(), 0, NULL);
- CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count);
+ CHECK_EQ(StrLength(expected_step_sequence),
+ break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
@@ -3425,6 +3483,75 @@ TEST(NativeGetterThrowingErrorPropertyMirror) {
}
+// Test that hidden properties object is not returned as an unnamed property
+// among regular properties.
+// See http://crbug.com/26491
+TEST(NoHiddenProperties) {
+ // Create a V8 environment with debug access.
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ env.ExposeDebug();
+
+ // Create an object in the global scope.
+ const char* source = "var obj = {a: 1};";
+ v8::Script::Compile(v8::String::New(source))->Run();
+ v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(
+ env->Global()->Get(v8::String::New("obj")));
+ // Set a hidden property on the object.
+ obj->SetHiddenValue(v8::String::New("v8::test-debug::a"),
+ v8::Int32::New(11));
+
+ // Get mirror for the object with property getter.
+ CompileRun("var obj_mirror = debug.MakeMirror(obj);");
+ CHECK(CompileRun(
+ "obj_mirror instanceof debug.ObjectMirror")->BooleanValue());
+ CompileRun("var named_names = obj_mirror.propertyNames();");
+ // There should be exactly one property. But there is also an unnamed
+ // property whose value is hidden properties dictionary. The latter
+ // property should not be in the list of reguar properties.
+ CHECK_EQ(1, CompileRun("named_names.length")->Int32Value());
+ CHECK(CompileRun("named_names[0] == 'a'")->BooleanValue());
+ CHECK(CompileRun(
+ "obj_mirror.property('a').value().value() == 1")->BooleanValue());
+
+ // Object created by t0 will become hidden prototype of object 'obj'.
+ v8::Handle<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New();
+ t0->InstanceTemplate()->Set(v8::String::New("b"), v8::Number::New(2));
+ t0->SetHiddenPrototype(true);
+ v8::Handle<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
+ t1->InstanceTemplate()->Set(v8::String::New("c"), v8::Number::New(3));
+
+ // Create proto objects, add hidden properties to them and set them on
+ // the global object.
+ v8::Handle<v8::Object> protoObj = t0->GetFunction()->NewInstance();
+ protoObj->SetHiddenValue(v8::String::New("v8::test-debug::b"),
+ v8::Int32::New(12));
+ env->Global()->Set(v8::String::New("protoObj"), protoObj);
+ v8::Handle<v8::Object> grandProtoObj = t1->GetFunction()->NewInstance();
+ grandProtoObj->SetHiddenValue(v8::String::New("v8::test-debug::c"),
+ v8::Int32::New(13));
+ env->Global()->Set(v8::String::New("grandProtoObj"), grandProtoObj);
+
+ // Setting prototypes: obj->protoObj->grandProtoObj
+ protoObj->Set(v8::String::New("__proto__"), grandProtoObj);
+ obj->Set(v8::String::New("__proto__"), protoObj);
+
+ // Get mirror for the object with property getter.
+ CompileRun("var obj_mirror = debug.MakeMirror(obj);");
+ CHECK(CompileRun(
+ "obj_mirror instanceof debug.ObjectMirror")->BooleanValue());
+ CompileRun("var named_names = obj_mirror.propertyNames();");
+ // There should be exactly two properties - one from the object itself and
+ // another from its hidden prototype.
+ CHECK_EQ(2, CompileRun("named_names.length")->Int32Value());
+ CHECK(CompileRun("named_names.sort(); named_names[0] == 'a' &&"
+ "named_names[1] == 'b'")->BooleanValue());
+ CHECK(CompileRun(
+ "obj_mirror.property('a').value().value() == 1")->BooleanValue());
+ CHECK(CompileRun(
+ "obj_mirror.property('b').value().value() == 2")->BooleanValue());
+}
+
// Multithreaded tests of JSON debugger protocol
@@ -4564,6 +4691,71 @@ TEST(DebuggerHostDispatch) {
}
+/* Test DebugMessageDispatch */
+/* In this test, the V8 thread waits for a message from the debug thread.
+ * The DebugMessageDispatchHandler is executed from the debugger thread
+ * which signals the V8 thread to wake up.
+ */
+
+class DebugMessageDispatchV8Thread : public v8::internal::Thread {
+ public:
+ void Run();
+};
+
+class DebugMessageDispatchDebuggerThread : public v8::internal::Thread {
+ public:
+ void Run();
+};
+
+Barriers* debug_message_dispatch_barriers;
+
+
+static void DebugMessageHandler() {
+ debug_message_dispatch_barriers->semaphore_1->Signal();
+}
+
+
+void DebugMessageDispatchV8Thread::Run() {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Setup debug message dispatch handler.
+ v8::Debug::SetDebugMessageDispatchHandler(DebugMessageHandler);
+
+ CompileRun("var y = 1 + 2;\n");
+ debug_message_dispatch_barriers->barrier_1.Wait();
+ debug_message_dispatch_barriers->semaphore_1->Wait();
+ debug_message_dispatch_barriers->barrier_2.Wait();
+}
+
+
+void DebugMessageDispatchDebuggerThread::Run() {
+ debug_message_dispatch_barriers->barrier_1.Wait();
+ SendContinueCommand();
+ debug_message_dispatch_barriers->barrier_2.Wait();
+}
+
+DebugMessageDispatchDebuggerThread debug_message_dispatch_debugger_thread;
+DebugMessageDispatchV8Thread debug_message_dispatch_v8_thread;
+
+
+TEST(DebuggerDebugMessageDispatch) {
+ i::FLAG_debugger_auto_break = true;
+
+ // Create a V8 environment
+ Barriers stack_allocated_debug_message_dispatch_barriers;
+ stack_allocated_debug_message_dispatch_barriers.Initialize();
+ debug_message_dispatch_barriers =
+ &stack_allocated_debug_message_dispatch_barriers;
+
+ debug_message_dispatch_v8_thread.Start();
+ debug_message_dispatch_debugger_thread.Start();
+
+ debug_message_dispatch_v8_thread.Join();
+ debug_message_dispatch_debugger_thread.Join();
+}
+
+
TEST(DebuggerAgent) {
// Make sure these ports is not used by other tests to allow tests to run in
// parallel.
@@ -4709,7 +4901,8 @@ TEST(DebuggerAgentProtocolOverflowHeader) {
// Add empty body to request.
const char* content_length_zero_header = "Content-Length:0\r\n";
- client->Send(content_length_zero_header, strlen(content_length_zero_header));
+ client->Send(content_length_zero_header,
+ StrLength(content_length_zero_header));
client->Send("\r\n", 2);
// Wait until data is received.
@@ -4823,7 +5016,7 @@ TEST(ScriptNameAndData) {
v8::ScriptOrigin origin2 = v8::ScriptOrigin(v8::String::New("new name"));
v8::Handle<v8::Script> script2 = v8::Script::Compile(script, &origin2);
script2->Run();
- script2->SetData(data_obj);
+ script2->SetData(data_obj->ToString());
f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(3, break_point_hit_count);
@@ -4876,8 +5069,8 @@ TEST(ContextData) {
CHECK(context_2->GetData()->IsUndefined());
// Set and check different data values.
- v8::Handle<v8::Value> data_1 = v8::Number::New(1);
- v8::Handle<v8::Value> data_2 = v8::String::New("2");
+ v8::Handle<v8::String> data_1 = v8::String::New("1");
+ v8::Handle<v8::String> data_2 = v8::String::New("2");
context_1->SetData(data_1);
context_2->SetData(data_2);
CHECK(context_1->GetData()->StrictEquals(data_1));
@@ -5040,7 +5233,7 @@ static void ExecuteScriptForContextCheck() {
CHECK(context_1->GetData()->IsUndefined());
// Set and check a data value.
- v8::Handle<v8::Value> data_1 = v8::Number::New(1);
+ v8::Handle<v8::String> data_1 = v8::String::New("1");
context_1->SetData(data_1);
CHECK(context_1->GetData()->StrictEquals(data_1));
@@ -5444,3 +5637,119 @@ TEST(GetMirror) {
v8::Handle<v8::Value> result = run_test->Call(env->Global(), 1, &obj);
CHECK(result->IsTrue());
}
+
+
+// Test that the debug break flag works with function.apply.
+TEST(DebugBreakFunctionApply) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Create a function for testing breaking in apply.
+ v8::Local<v8::Function> foo = CompileFunction(
+ &env,
+ "function baz(x) { }"
+ "function bar(x) { baz(); }"
+ "function foo(){ bar.apply(this, [1]); }",
+ "foo");
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventBreakMax);
+
+ // Set the debug break flag before calling the code using function.apply.
+ v8::Debug::DebugBreak();
+
+ // Limit the number of debug breaks. This is a regression test for issue 493
+ // where this test would enter an infinite loop.
+ break_point_hit_count = 0;
+ max_break_point_hit_count = 10000; // 10000 => infinite loop.
+ foo->Call(env->Global(), 0, NULL);
+
+ // When keeping the debug break several break will happen.
+ CHECK_EQ(3, break_point_hit_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+v8::Handle<v8::Context> debugee_context;
+v8::Handle<v8::Context> debugger_context;
+
+
+// Property getter that checks that current and calling contexts
+// are both the debugee contexts.
+static v8::Handle<v8::Value> NamedGetterWithCallingContextCheck(
+ v8::Local<v8::String> name,
+ const v8::AccessorInfo& info) {
+ CHECK_EQ(0, strcmp(*v8::String::AsciiValue(name), "a"));
+ v8::Handle<v8::Context> current = v8::Context::GetCurrent();
+ CHECK(current == debugee_context);
+ CHECK(current != debugger_context);
+ v8::Handle<v8::Context> calling = v8::Context::GetCalling();
+ CHECK(calling == debugee_context);
+ CHECK(calling != debugger_context);
+ return v8::Int32::New(1);
+}
+
+
+// Debug event listener that checks if the first argument of a function is
+// an object with property 'a' == 1. If the property has custom accessor
+// this handler will eventually invoke it.
+static void DebugEventGetAtgumentPropertyValue(
+ v8::DebugEvent event,
+ v8::Handle<v8::Object> exec_state,
+ v8::Handle<v8::Object> event_data,
+ v8::Handle<v8::Value> data) {
+ if (event == v8::Break) {
+ break_point_hit_count++;
+ CHECK(debugger_context == v8::Context::GetCurrent());
+ v8::Handle<v8::Function> func(v8::Function::Cast(*CompileRun(
+ "(function(exec_state) {\n"
+ " return (exec_state.frame(0).argumentValue(0).property('a').\n"
+ " value().value() == 1);\n"
+ "})")));
+ const int argc = 1;
+ v8::Handle<v8::Value> argv[argc] = { exec_state };
+ v8::Handle<v8::Value> result = func->Call(exec_state, argc, argv);
+ CHECK(result->IsTrue());
+ }
+}
+
+
+TEST(CallingContextIsNotDebugContext) {
+ // Create and enter a debugee context.
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ env.ExposeDebug();
+
+ // Save handles to the debugger and debugee contexts to be used in
+ // NamedGetterWithCallingContextCheck.
+ debugee_context = v8::Local<v8::Context>(*env);
+ debugger_context = v8::Utils::ToLocal(Debug::debug_context());
+
+ // Create object with 'a' property accessor.
+ v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New();
+ named->SetAccessor(v8::String::New("a"),
+ NamedGetterWithCallingContextCheck);
+ env->Global()->Set(v8::String::New("obj"),
+ named->NewInstance());
+
+ // Register the debug event listener
+ v8::Debug::SetDebugEventListener(DebugEventGetAtgumentPropertyValue);
+
+ // Create a function that invokes debugger.
+ v8::Local<v8::Function> foo = CompileFunction(
+ &env,
+ "function bar(x) { debugger; }"
+ "function foo(){ bar(obj); }",
+ "foo");
+
+ break_point_hit_count = 0;
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(1, break_point_hit_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ debugee_context = v8::Handle<v8::Context>();
+ debugger_context = v8::Handle<v8::Context>();
+ CheckDebuggerUnloaded();
+}
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 74db2346..b8b3364a 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -105,13 +105,13 @@ TEST(DisasmIa320) {
__ xor_(edx, 3);
__ nop();
{
- CHECK(CpuFeatures::IsSupported(CpuFeatures::CPUID));
- CpuFeatures::Scope fscope(CpuFeatures::CPUID);
+ CHECK(CpuFeatures::IsSupported(CPUID));
+ CpuFeatures::Scope fscope(CPUID);
__ cpuid();
}
{
- CHECK(CpuFeatures::IsSupported(CpuFeatures::RDTSC));
- CpuFeatures::Scope fscope(CpuFeatures::RDTSC);
+ CHECK(CpuFeatures::IsSupported(RDTSC));
+ CpuFeatures::Scope fscope(RDTSC);
__ rdtsc();
}
__ movsx_b(edx, Operand(ecx));
@@ -194,15 +194,16 @@ TEST(DisasmIa320) {
__ rcl(edx, 7);
__ sar(edx, 1);
__ sar(edx, 6);
- __ sar(edx);
+ __ sar_cl(edx);
__ sbb(edx, Operand(ebx, ecx, times_4, 10000));
__ shld(edx, Operand(ebx, ecx, times_4, 10000));
__ shl(edx, 1);
__ shl(edx, 6);
- __ shl(edx);
+ __ shl_cl(edx);
__ shrd(edx, Operand(ebx, ecx, times_4, 10000));
+ __ shr(edx, 1);
__ shr(edx, 7);
- __ shr(edx);
+ __ shr_cl(edx);
// Immediates
@@ -353,8 +354,8 @@ TEST(DisasmIa320) {
__ fwait();
__ nop();
{
- CHECK(CpuFeatures::IsSupported(CpuFeatures::SSE2));
- CpuFeatures::Scope fscope(CpuFeatures::SSE2);
+ CHECK(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope fscope(SSE2);
__ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
__ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ addsd(xmm1, xmm0);
@@ -368,8 +369,8 @@ TEST(DisasmIa320) {
// cmov.
{
- CHECK(CpuFeatures::IsSupported(CpuFeatures::CMOV));
- CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
+ CHECK(CpuFeatures::IsSupported(CMOV));
+ CpuFeatures::Scope use_cmov(CMOV);
__ cmov(overflow, eax, Operand(eax, 0));
__ cmov(no_overflow, eax, Operand(eax, 1));
__ cmov(below, eax, Operand(eax, 2));
diff --git a/test/cctest/test-flags.cc b/test/cctest/test-flags.cc
index 9019a89e..32f1264f 100644
--- a/test/cctest/test-flags.cc
+++ b/test/cctest/test-flags.cc
@@ -75,7 +75,7 @@ TEST(Flags2b) {
" -notesting-bool-flag notaflag --testing_int_flag=77 "
"-testing_float_flag=.25 "
"--testing_string_flag no_way! ";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str)));
+ CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK(!FLAG_testing_bool_flag);
CHECK_EQ(77, FLAG_testing_int_flag);
CHECK_EQ(.25, FLAG_testing_float_flag);
@@ -107,7 +107,7 @@ TEST(Flags3b) {
"--testing_bool_flag notaflag --testing_int_flag -666 "
"--testing_float_flag -12E10 "
"-testing-string-flag=foo-bar";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str)));
+ CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK(FLAG_testing_bool_flag);
CHECK_EQ(-666, FLAG_testing_int_flag);
CHECK_EQ(-12E10, FLAG_testing_float_flag);
@@ -129,7 +129,7 @@ TEST(Flags4) {
TEST(Flags4b) {
SetFlagsToDefault();
const char* str = "--testing_bool_flag --foo";
- CHECK_EQ(2, FlagList::SetFlagsFromString(str, strlen(str)));
+ CHECK_EQ(2, FlagList::SetFlagsFromString(str, StrLength(str)));
}
@@ -147,7 +147,7 @@ TEST(Flags5) {
TEST(Flags5b) {
SetFlagsToDefault();
const char* str = " --testing_int_flag=\"foobar\"";
- CHECK_EQ(1, FlagList::SetFlagsFromString(str, strlen(str)));
+ CHECK_EQ(1, FlagList::SetFlagsFromString(str, StrLength(str)));
}
@@ -166,7 +166,7 @@ TEST(Flags6) {
TEST(Flags6b) {
SetFlagsToDefault();
const char* str = " --testing-int-flag 0 --testing_float_flag ";
- CHECK_EQ(3, FlagList::SetFlagsFromString(str, strlen(str)));
+ CHECK_EQ(3, FlagList::SetFlagsFromString(str, StrLength(str)));
}
@@ -191,7 +191,7 @@ TEST(FlagsJSArguments1) {
TEST(FlagsJSArguments1b) {
SetFlagsToDefault();
const char* str = "--testing-int-flag 42 -- testing-float-flag 7";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str)));
+ CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK_EQ(42, FLAG_testing_int_flag);
CHECK_EQ(2.5, FLAG_testing_float_flag);
CHECK_EQ(2, FLAG_js_arguments.argc());
@@ -203,7 +203,7 @@ TEST(FlagsJSArguments1b) {
TEST(FlagsJSArguments2) {
SetFlagsToDefault();
const char* str = "--testing-int-flag 42 --js-arguments testing-float-flag 7";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str)));
+ CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK_EQ(42, FLAG_testing_int_flag);
CHECK_EQ(2.5, FLAG_testing_float_flag);
CHECK_EQ(2, FLAG_js_arguments.argc());
@@ -215,7 +215,7 @@ TEST(FlagsJSArguments2) {
TEST(FlagsJSArguments3) {
SetFlagsToDefault();
const char* str = "--testing-int-flag 42 --js-arguments=testing-float-flag 7";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str)));
+ CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK_EQ(42, FLAG_testing_int_flag);
CHECK_EQ(2.5, FLAG_testing_float_flag);
CHECK_EQ(2, FLAG_js_arguments.argc());
@@ -227,7 +227,7 @@ TEST(FlagsJSArguments3) {
TEST(FlagsJSArguments4) {
SetFlagsToDefault();
const char* str = "--testing-int-flag 42 --";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str)));
+ CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK_EQ(42, FLAG_testing_int_flag);
CHECK_EQ(0, FLAG_js_arguments.argc());
}
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index b199507d..04e0037b 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -384,8 +384,8 @@ TEST(RetainerProfile) {
const char* retainers_of_a = printer.GetRetainers("A");
// The order of retainers is unspecified, so we check string length, and
// verify each retainer separately.
- CHECK_EQ(static_cast<int>(strlen("(global property);1,B;2,C;2")),
- static_cast<int>(strlen(retainers_of_a)));
+ CHECK_EQ(i::StrLength("(global property);1,B;2,C;2"),
+ i::StrLength(retainers_of_a));
CHECK(strstr(retainers_of_a, "(global property);1") != NULL);
CHECK(strstr(retainers_of_a, "B;2") != NULL);
CHECK(strstr(retainers_of_a, "C;2") != NULL);
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 9911ce42..17bee5b0 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -37,8 +37,7 @@ TEST(HeapMaps) {
CheckMap(Heap::meta_map(), MAP_TYPE, Map::kSize);
CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
- CheckMap(Heap::long_string_map(), LONG_STRING_TYPE,
- SeqTwoByteString::kAlignedSize);
+ CheckMap(Heap::string_map(), STRING_TYPE, SeqTwoByteString::kAlignedSize);
}
@@ -262,7 +261,7 @@ TEST(GarbageCollection) {
static void VerifyStringAllocation(const char* string) {
String* s = String::cast(Heap::AllocateStringFromUtf8(CStrVector(string)));
- CHECK_EQ(static_cast<int>(strlen(string)), s->length());
+ CHECK_EQ(StrLength(string), s->length());
for (int index = 0; index < s->length(); index++) {
CHECK_EQ(static_cast<uint16_t>(string[index]), s->Get(index)); }
}
@@ -285,7 +284,7 @@ TEST(LocalHandles) {
v8::HandleScope scope;
const char* name = "Kasper the spunky";
Handle<String> string = Factory::NewStringFromAscii(CStrVector(name));
- CHECK_EQ(static_cast<int>(strlen(name)), string->length());
+ CHECK_EQ(StrLength(name), string->length());
}
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index 43df6ba7..68cbc261 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -163,11 +163,6 @@ v8::Handle<v8::Value> TraceExtension::JSEntrySP(const v8::Arguments& args) {
}
-static void CompileRun(const char* source) {
- Script::Compile(String::New(source))->Run();
-}
-
-
v8::Handle<v8::Value> TraceExtension::JSEntrySPLevel2(
const v8::Arguments& args) {
v8::HandleScope scope;
@@ -329,17 +324,16 @@ TEST(PureJSStackTrace) {
}
-static void CFuncDoTrace() {
+static void CFuncDoTrace(byte dummy_parameter) {
Address fp;
#ifdef __GNUC__
fp = reinterpret_cast<Address>(__builtin_frame_address(0));
-#elif defined _MSC_VER && defined V8_TARGET_ARCH_IA32
- __asm mov [fp], ebp // NOLINT
-#elif defined _MSC_VER && defined V8_TARGET_ARCH_X64
- // TODO(X64): __asm extension is not supported by the Microsoft Visual C++
- // 64-bit compiler.
- fp = 0;
- UNIMPLEMENTED();
+#elif defined _MSC_VER
+ // Approximate a frame pointer address. We compile without base pointers,
+ // so we can't trust ebp/rbp.
+ fp = &dummy_parameter - 2 * sizeof(void*); // NOLINT
+#else
+#error Unexpected platform.
#endif
DoTrace(fp);
}
@@ -347,7 +341,7 @@ static void CFuncDoTrace() {
static int CFunc(int depth) {
if (depth <= 0) {
- CFuncDoTrace();
+ CFuncDoTrace(0);
return 0;
} else {
return CFunc(depth - 1) + 1;
diff --git a/test/cctest/test-log-utils.cc b/test/cctest/test-log-utils.cc
index a08a0a11..c99d770d 100644
--- a/test/cctest/test-log-utils.cc
+++ b/test/cctest/test-log-utils.cc
@@ -16,6 +16,7 @@ using v8::internal::LogRecordCompressor;
using v8::internal::MutableCStrVector;
using v8::internal::ScopedVector;
using v8::internal::Vector;
+using v8::internal::StrLength;
// Fills 'ref_buffer' with test data: a sequence of two-digit
// hex numbers: '0001020304...'. Then writes 'ref_buffer' contents to 'dynabuf'.
@@ -118,7 +119,7 @@ TEST(DynaBufReadTruncation) {
TEST(DynaBufSealing) {
const char* seal = "Sealed";
- const int seal_size = strlen(seal);
+ const int seal_size = StrLength(seal);
LogDynamicBuffer dynabuf(32, 128, seal, seal_size);
EmbeddedVector<char, 100> ref_buf;
WriteData(&dynabuf, &ref_buf);
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 3983215a..85ff331a 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -19,6 +19,7 @@
using v8::internal::Address;
using v8::internal::EmbeddedVector;
using v8::internal::Logger;
+using v8::internal::StrLength;
namespace i = v8::internal;
@@ -55,7 +56,7 @@ TEST(GetMessages) {
CHECK_EQ(0, Logger::GetLogLines(0, log_lines, 3));
// See Logger::StringEvent.
const char* line_1 = "aaa,\"bbb\"\n";
- const int line_1_len = strlen(line_1);
+ const int line_1_len = StrLength(line_1);
// Still smaller than log message length.
CHECK_EQ(0, Logger::GetLogLines(0, log_lines, line_1_len - 1));
// The exact size.
@@ -68,7 +69,7 @@ TEST(GetMessages) {
CHECK_EQ(line_1, log_lines);
memset(log_lines, 0, sizeof(log_lines));
const char* line_2 = "cccc,\"dddd\"\n";
- const int line_2_len = strlen(line_2);
+ const int line_2_len = StrLength(line_2);
// Now start with line_2 beginning.
CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, 0));
CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, 3));
@@ -82,7 +83,7 @@ TEST(GetMessages) {
memset(log_lines, 0, sizeof(log_lines));
// Now get entire buffer contents.
const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n";
- const int all_lines_len = strlen(all_lines);
+ const int all_lines_len = StrLength(all_lines);
CHECK_EQ(all_lines_len, Logger::GetLogLines(0, log_lines, all_lines_len));
CHECK_EQ(all_lines, log_lines);
memset(log_lines, 0, sizeof(log_lines));
@@ -104,7 +105,7 @@ TEST(BeyondWritePosition) {
Logger::StringEvent("cccc", "dddd");
// See Logger::StringEvent.
const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n";
- const int all_lines_len = strlen(all_lines);
+ const int all_lines_len = StrLength(all_lines);
EmbeddedVector<char, 100> buffer;
const int beyond_write_pos = all_lines_len;
CHECK_EQ(0, Logger::GetLogLines(beyond_write_pos, buffer.start(), 1));
@@ -246,7 +247,7 @@ TEST(ProfLazyMode) {
i::FLAG_logfile = "*";
// If tests are being run manually, V8 will be already initialized
- // by the test below.
+ // by the bottom test.
const bool need_to_set_up_logger = i::V8::IsRunning();
v8::HandleScope scope;
v8::Handle<v8::Context> env = v8::Context::New();
@@ -256,11 +257,10 @@ TEST(ProfLazyMode) {
// No sampling should happen prior to resuming profiler.
CHECK(!LoggerTestHelper::IsSamplerActive());
- // Read initial logged data (static libs map).
EmbeddedVector<char, 102400> buffer;
+ // Nothing must be logged until profiling is resumed.
int log_pos = GetLogLines(0, &buffer);
- CHECK_GT(log_pos, 0);
- CHECK_GT(buffer.length(), log_pos);
+ CHECK_EQ(0, log_pos);
CompileAndRunScript("var a = (function(x) { return x + 1; })(10);");
@@ -438,7 +438,7 @@ namespace {
class SimpleExternalString : public v8::String::ExternalStringResource {
public:
explicit SimpleExternalString(const char* source)
- : utf_source_(strlen(source)) {
+ : utf_source_(StrLength(source)) {
for (int i = 0; i < utf_source_.length(); ++i)
utf_source_[i] = source[i];
}
@@ -474,6 +474,145 @@ TEST(Issue23768) {
}
+static v8::Handle<v8::Value> ObjMethod1(const v8::Arguments& args) {
+ return v8::Handle<v8::Value>();
+}
+
+TEST(LogCallbacks) {
+ const bool saved_prof_lazy = i::FLAG_prof_lazy;
+ const bool saved_prof = i::FLAG_prof;
+ const bool saved_prof_auto = i::FLAG_prof_auto;
+ i::FLAG_prof = true;
+ i::FLAG_prof_lazy = false;
+ i::FLAG_prof_auto = false;
+ i::FLAG_logfile = "*";
+
+ // If tests are being run manually, V8 will be already initialized
+ // by the bottom test.
+ const bool need_to_set_up_logger = i::V8::IsRunning();
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ if (need_to_set_up_logger) Logger::Setup();
+ env->Enter();
+
+ // Skip all initially logged stuff.
+ EmbeddedVector<char, 102400> buffer;
+ int log_pos = GetLogLines(0, &buffer);
+
+ v8::Persistent<v8::FunctionTemplate> obj =
+ v8::Persistent<v8::FunctionTemplate>::New(v8::FunctionTemplate::New());
+ obj->SetClassName(v8::String::New("Obj"));
+ v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
+ v8::Local<v8::Signature> signature = v8::Signature::New(obj);
+ proto->Set(v8::String::New("method1"),
+ v8::FunctionTemplate::New(ObjMethod1,
+ v8::Handle<v8::Value>(),
+ signature),
+ static_cast<v8::PropertyAttribute>(v8::DontDelete));
+
+ env->Global()->Set(v8_str("Obj"), obj->GetFunction());
+ CompileAndRunScript("Obj.prototype.method1.toString();");
+
+ i::Logger::LogCompiledFunctions();
+ log_pos = GetLogLines(log_pos, &buffer);
+ CHECK_GT(log_pos, 0);
+ buffer[log_pos] = 0;
+
+ const char* callback_rec = "code-creation,Callback,";
+ char* pos = strstr(buffer.start(), callback_rec);
+ CHECK_NE(NULL, pos);
+ pos += strlen(callback_rec);
+ EmbeddedVector<char, 100> ref_data;
+ i::OS::SNPrintF(ref_data,
+ "0x%" V8PRIxPTR ",1,\"method1\"", ObjMethod1);
+ *(pos + strlen(ref_data.start())) = '\0';
+ CHECK_EQ(ref_data.start(), pos);
+
+ obj.Dispose();
+
+ env->Exit();
+ Logger::TearDown();
+ i::FLAG_prof_lazy = saved_prof_lazy;
+ i::FLAG_prof = saved_prof;
+ i::FLAG_prof_auto = saved_prof_auto;
+}
+
+
+static v8::Handle<v8::Value> Prop1Getter(v8::Local<v8::String> property,
+ const v8::AccessorInfo& info) {
+ return v8::Handle<v8::Value>();
+}
+
+static void Prop1Setter(v8::Local<v8::String> property,
+ v8::Local<v8::Value> value,
+ const v8::AccessorInfo& info) {
+}
+
+static v8::Handle<v8::Value> Prop2Getter(v8::Local<v8::String> property,
+ const v8::AccessorInfo& info) {
+ return v8::Handle<v8::Value>();
+}
+
+TEST(LogAccessorCallbacks) {
+ const bool saved_prof_lazy = i::FLAG_prof_lazy;
+ const bool saved_prof = i::FLAG_prof;
+ const bool saved_prof_auto = i::FLAG_prof_auto;
+ i::FLAG_prof = true;
+ i::FLAG_prof_lazy = false;
+ i::FLAG_prof_auto = false;
+ i::FLAG_logfile = "*";
+
+ // If tests are being run manually, V8 will be already initialized
+ // by the bottom test.
+ const bool need_to_set_up_logger = i::V8::IsRunning();
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ if (need_to_set_up_logger) Logger::Setup();
+ env->Enter();
+
+ // Skip all initially logged stuff.
+ EmbeddedVector<char, 102400> buffer;
+ int log_pos = GetLogLines(0, &buffer);
+
+ v8::Persistent<v8::FunctionTemplate> obj =
+ v8::Persistent<v8::FunctionTemplate>::New(v8::FunctionTemplate::New());
+ obj->SetClassName(v8::String::New("Obj"));
+ v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
+ inst->SetAccessor(v8::String::New("prop1"), Prop1Getter, Prop1Setter);
+ inst->SetAccessor(v8::String::New("prop2"), Prop2Getter);
+
+ i::Logger::LogAccessorCallbacks();
+ log_pos = GetLogLines(log_pos, &buffer);
+ CHECK_GT(log_pos, 0);
+ buffer[log_pos] = 0;
+ printf("%s", buffer.start());
+
+ EmbeddedVector<char, 100> prop1_getter_record;
+ i::OS::SNPrintF(prop1_getter_record,
+ "code-creation,Callback,0x%" V8PRIxPTR ",1,\"get prop1\"",
+ Prop1Getter);
+ CHECK_NE(NULL, strstr(buffer.start(), prop1_getter_record.start()));
+ EmbeddedVector<char, 100> prop1_setter_record;
+ i::OS::SNPrintF(prop1_setter_record,
+ "code-creation,Callback,0x%" V8PRIxPTR ",1,\"set prop1\"",
+ Prop1Setter);
+ CHECK_NE(NULL, strstr(buffer.start(), prop1_setter_record.start()));
+ EmbeddedVector<char, 100> prop2_getter_record;
+ i::OS::SNPrintF(prop2_getter_record,
+ "code-creation,Callback,0x%" V8PRIxPTR ",1,\"get prop2\"",
+ Prop2Getter);
+ CHECK_NE(NULL, strstr(buffer.start(), prop2_getter_record.start()));
+
+ obj.Dispose();
+
+ env->Exit();
+ Logger::TearDown();
+ i::FLAG_prof_lazy = saved_prof_lazy;
+ i::FLAG_prof = saved_prof;
+ i::FLAG_prof_auto = saved_prof_auto;
+}
+
+
static inline bool IsStringEqualTo(const char* r, const char* s) {
return strncmp(r, s, strlen(r)) == 0;
}
@@ -593,7 +732,7 @@ class ParseLogResult {
entities[i] = NULL;
}
const size_t map_length = bounds.Length();
- entities_map = i::NewArray<int>(map_length);
+ entities_map = i::NewArray<int>(static_cast<int>(map_length));
for (size_t i = 0; i < map_length; ++i) {
entities_map[i] = -1;
}
@@ -769,7 +908,7 @@ static inline void PrintCodeEntityInfo(CodeEntityInfo entity) {
const int max_len = 50;
if (entity != NULL) {
char* eol = strchr(entity, '\n');
- int len = eol - entity;
+ int len = static_cast<int>(eol - entity);
len = len <= max_len ? len : max_len;
printf("%-*.*s ", max_len, len, entity);
} else {
@@ -789,7 +928,7 @@ static void PrintCodeEntitiesInfo(
static inline int StrChrLen(const char* s, char c) {
- return strchr(s, c) - s;
+ return static_cast<int>(strchr(s, c) - s);
}
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
index 9c1197ff..511b933a 100755
--- a/test/cctest/test-macro-assembler-x64.cc
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -57,7 +57,7 @@ using v8::internal::rsp;
using v8::internal::r8;
using v8::internal::r9;
using v8::internal::r11;
-using v8::internal::r12;
+using v8::internal::r12; // Remember: r12..r15 are callee save!
using v8::internal::r13;
using v8::internal::r14;
using v8::internal::r15;
@@ -133,7 +133,7 @@ TEST(SmiMove) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
masm->set_allow_stub_calls(false);
Label exit;
@@ -218,7 +218,7 @@ TEST(SmiCompare) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -265,7 +265,7 @@ TEST(Integer32ToSmi) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -364,7 +364,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm,
int64_t result = x + y;
ASSERT(Smi::IsValid(result));
__ movl(rax, Immediate(id));
- __ Move(r8, Smi::FromInt(result));
+ __ Move(r8, Smi::FromInt(static_cast<int>(result)));
__ movq(rcx, x, RelocInfo::NONE);
__ movq(r11, rcx);
__ Integer64PlusConstantToSmi(rdx, rcx, y);
@@ -390,7 +390,7 @@ TEST(Integer64PlusConstantToSmi) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -431,7 +431,7 @@ TEST(SmiCheck) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -676,7 +676,7 @@ TEST(SmiNeg) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -761,7 +761,7 @@ TEST(SmiAdd) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -948,7 +948,7 @@ TEST(SmiSub) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1035,7 +1035,7 @@ TEST(SmiMul) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1138,12 +1138,14 @@ TEST(SmiDiv) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
Label exit;
+ __ push(r12);
+ __ push(r15);
TestSmiDiv(masm, &exit, 0x10, 1, 1);
TestSmiDiv(masm, &exit, 0x20, 1, 0);
TestSmiDiv(masm, &exit, 0x30, -1, 0);
@@ -1168,6 +1170,8 @@ TEST(SmiDiv) {
__ xor_(r15, r15); // Success.
__ bind(&exit);
__ movq(rax, r15);
+ __ pop(r15);
+ __ pop(r12);
__ ret(0);
CodeDesc desc;
@@ -1241,12 +1245,14 @@ TEST(SmiMod) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
Label exit;
+ __ push(r12);
+ __ push(r15);
TestSmiMod(masm, &exit, 0x10, 1, 1);
TestSmiMod(masm, &exit, 0x20, 1, 0);
TestSmiMod(masm, &exit, 0x30, -1, 0);
@@ -1271,6 +1277,8 @@ TEST(SmiMod) {
__ xor_(r15, r15); // Success.
__ bind(&exit);
__ movq(rax, r15);
+ __ pop(r15);
+ __ pop(r12);
__ ret(0);
CodeDesc desc;
@@ -1330,7 +1338,7 @@ TEST(SmiIndex) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1396,7 +1404,7 @@ TEST(SmiSelectNonSmi) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false); // Avoid inline checks.
@@ -1472,7 +1480,7 @@ TEST(SmiAnd) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1550,7 +1558,7 @@ TEST(SmiOr) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1630,7 +1638,7 @@ TEST(SmiXor) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1694,7 +1702,7 @@ TEST(SmiNot) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1835,7 +1843,7 @@ TEST(SmiShiftLeft) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1872,7 +1880,7 @@ void TestSmiShiftLogicalRight(MacroAssembler* masm,
int shift = shifts[i];
intptr_t result = static_cast<unsigned int>(x) >> shift;
if (Smi::IsValid(result)) {
- __ Move(r8, Smi::FromInt(result));
+ __ Move(r8, Smi::FromInt(static_cast<int>(result)));
__ Move(rcx, Smi::FromInt(x));
__ SmiShiftLogicalRightConstant(r9, rcx, shift, exit);
@@ -1938,7 +1946,7 @@ TEST(SmiShiftLogicalRight) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -2004,7 +2012,7 @@ TEST(SmiShiftArithmeticRight) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -2065,7 +2073,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
true));
CHECK(buffer);
HandleScope handles;
- MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
new file mode 100755
index 00000000..d62b6a5d
--- /dev/null
+++ b/test/cctest/test-parsing.cc
@@ -0,0 +1,129 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "token.h"
+#include "scanner.h"
+#include "utils.h"
+
+#include "cctest.h"
+
+namespace i = ::v8::internal;
+
+TEST(KeywordMatcher) {
+ struct KeywordToken {
+ const char* keyword;
+ i::Token::Value token;
+ };
+
+ static const KeywordToken keywords[] = {
+#define KEYWORD(t, s, d) { s, i::Token::t },
+#define IGNORE(t, s, d) /* */
+ TOKEN_LIST(IGNORE, KEYWORD, IGNORE)
+#undef KEYWORD
+ { NULL, i::Token::IDENTIFIER }
+ };
+
+ static const char* future_keywords[] = {
+#define FUTURE(t, s, d) s,
+ TOKEN_LIST(IGNORE, IGNORE, FUTURE)
+#undef FUTURE
+#undef IGNORE
+ NULL
+ };
+
+ KeywordToken key_token;
+ for (int i = 0; (key_token = keywords[i]).keyword != NULL; i++) {
+ i::KeywordMatcher matcher;
+ const char* keyword = key_token.keyword;
+ int length = i::StrLength(keyword);
+ for (int j = 0; j < length; j++) {
+ if (key_token.token == i::Token::INSTANCEOF && j == 2) {
+ // "in" is a prefix of "instanceof". It's the only keyword
+ // that is a prefix of another.
+ CHECK_EQ(i::Token::IN, matcher.token());
+ } else {
+ CHECK_EQ(i::Token::IDENTIFIER, matcher.token());
+ }
+ matcher.AddChar(keyword[j]);
+ }
+ CHECK_EQ(key_token.token, matcher.token());
+ // Adding more characters will make keyword matching fail.
+ matcher.AddChar('z');
+ CHECK_EQ(i::Token::IDENTIFIER, matcher.token());
+ // Adding a keyword later will not make it match again.
+ matcher.AddChar('i');
+ matcher.AddChar('f');
+ CHECK_EQ(i::Token::IDENTIFIER, matcher.token());
+ }
+
+ // Future keywords are not recognized.
+ const char* future_keyword;
+ for (int i = 0; (future_keyword = future_keywords[i]) != NULL; i++) {
+ i::KeywordMatcher matcher;
+ int length = i::StrLength(future_keyword);
+ for (int j = 0; j < length; j++) {
+ matcher.AddChar(future_keyword[j]);
+ }
+ CHECK_EQ(i::Token::IDENTIFIER, matcher.token());
+ }
+
+ // Zero isn't ignored at first.
+ i::KeywordMatcher bad_start;
+ bad_start.AddChar(0);
+ CHECK_EQ(i::Token::IDENTIFIER, bad_start.token());
+ bad_start.AddChar('i');
+ bad_start.AddChar('f');
+ CHECK_EQ(i::Token::IDENTIFIER, bad_start.token());
+
+ // Zero isn't ignored at end.
+ i::KeywordMatcher bad_end;
+ bad_end.AddChar('i');
+ bad_end.AddChar('f');
+ CHECK_EQ(i::Token::IF, bad_end.token());
+ bad_end.AddChar(0);
+ CHECK_EQ(i::Token::IDENTIFIER, bad_end.token());
+
+ // Case isn't ignored.
+ i::KeywordMatcher bad_case;
+ bad_case.AddChar('i');
+ bad_case.AddChar('F');
+ CHECK_EQ(i::Token::IDENTIFIER, bad_case.token());
+
+ // If we mark it as failure, continuing won't help.
+ i::KeywordMatcher full_stop;
+ full_stop.AddChar('i');
+ CHECK_EQ(i::Token::IDENTIFIER, full_stop.token());
+ full_stop.Fail();
+ CHECK_EQ(i::Token::IDENTIFIER, full_stop.token());
+ full_stop.AddChar('f');
+ CHECK_EQ(i::Token::IDENTIFIER, full_stop.token());
+}
+
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index 81c22052..6aa0730c 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -74,7 +74,7 @@ static SmartPointer<const char> Parse(const char* input) {
static bool CheckSimple(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope;
- unibrow::Utf8InputBuffer<> buffer(input, strlen(input));
+ unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
ZoneScope zone_scope(DELETE_ON_EXIT);
FlatStringReader reader(CStrVector(input));
RegExpCompileData result;
@@ -92,7 +92,7 @@ struct MinMaxPair {
static MinMaxPair CheckMinMaxMatch(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope;
- unibrow::Utf8InputBuffer<> buffer(input, strlen(input));
+ unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
ZoneScope zone_scope(DELETE_ON_EXIT);
FlatStringReader reader(CStrVector(input));
RegExpCompileData result;
@@ -1466,7 +1466,7 @@ static void TestRangeCaseIndependence(CharacterRange input,
ZoneScope zone_scope(DELETE_ON_EXIT);
int count = expected.length();
ZoneList<CharacterRange>* list = new ZoneList<CharacterRange>(count);
- input.AddCaseEquivalents(list);
+ input.AddCaseEquivalents(list, false);
CHECK_EQ(count, list->length());
for (int i = 0; i < list->length(); i++) {
CHECK_EQ(expected[i].from(), list->at(i).from());
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index db37eb34..8f4441ac 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -123,13 +123,17 @@ TEST(ExternalReferenceEncoder) {
ExternalReference::the_hole_value_location();
CHECK_EQ(make_code(UNCLASSIFIED, 2),
encoder.Encode(the_hole_value_location.address()));
- ExternalReference stack_guard_limit_address =
- ExternalReference::address_of_stack_guard_limit();
+ ExternalReference stack_limit_address =
+ ExternalReference::address_of_stack_limit();
CHECK_EQ(make_code(UNCLASSIFIED, 4),
- encoder.Encode(stack_guard_limit_address.address()));
- CHECK_EQ(make_code(UNCLASSIFIED, 10),
+ encoder.Encode(stack_limit_address.address()));
+ ExternalReference real_stack_limit_address =
+ ExternalReference::address_of_real_stack_limit();
+ CHECK_EQ(make_code(UNCLASSIFIED, 5),
+ encoder.Encode(real_stack_limit_address.address()));
+ CHECK_EQ(make_code(UNCLASSIFIED, 11),
encoder.Encode(ExternalReference::debug_break().address()));
- CHECK_EQ(make_code(UNCLASSIFIED, 6),
+ CHECK_EQ(make_code(UNCLASSIFIED, 7),
encoder.Encode(ExternalReference::new_space_start().address()));
CHECK_EQ(make_code(UNCLASSIFIED, 3),
encoder.Encode(ExternalReference::roots_address().address()));
@@ -158,74 +162,49 @@ TEST(ExternalReferenceDecoder) {
decoder.Decode(make_code(UNCLASSIFIED, 1)));
CHECK_EQ(ExternalReference::the_hole_value_location().address(),
decoder.Decode(make_code(UNCLASSIFIED, 2)));
- CHECK_EQ(ExternalReference::address_of_stack_guard_limit().address(),
+ CHECK_EQ(ExternalReference::address_of_stack_limit().address(),
decoder.Decode(make_code(UNCLASSIFIED, 4)));
+ CHECK_EQ(ExternalReference::address_of_real_stack_limit().address(),
+ decoder.Decode(make_code(UNCLASSIFIED, 5)));
CHECK_EQ(ExternalReference::debug_break().address(),
- decoder.Decode(make_code(UNCLASSIFIED, 10)));
+ decoder.Decode(make_code(UNCLASSIFIED, 11)));
CHECK_EQ(ExternalReference::new_space_start().address(),
- decoder.Decode(make_code(UNCLASSIFIED, 6)));
+ decoder.Decode(make_code(UNCLASSIFIED, 7)));
}
static void Serialize() {
-#ifdef DEBUG
- FLAG_debug_serialization = true;
-#endif
- StatsTable::SetCounterFunction(counter_function);
-
- v8::HandleScope scope;
- const int kExtensionCount = 1;
- const char* extension_list[kExtensionCount] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(kExtensionCount, extension_list);
- Serializer::Enable();
- v8::Persistent<v8::Context> env = v8::Context::New(&extensions);
- env->Enter();
-
+ // We have to create one context. One reason for this is so that the builtins
+ // can be loaded from v8natives.js and their addresses can be processed. This
+ // will clear the pending fixups array, which would otherwise contain GC roots
+ // that would confuse the serialization/deserialization process.
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ env.Dispose();
Snapshot::WriteToFile(FLAG_testing_serialization_file);
}
-// Test that the whole heap can be serialized when running from the
-// internal snapshot.
-// (Smoke test.)
-TEST(SerializeInternal) {
- Snapshot::Initialize(NULL);
- Serialize();
-}
-
-
-// Test that the whole heap can be serialized when running from a
-// bootstrapped heap.
-// (Smoke test.)
+// Test that the whole heap can be serialized.
TEST(Serialize) {
- if (Snapshot::IsEnabled()) return;
+ Serializer::Enable();
+ v8::V8::Initialize();
Serialize();
}
-// Test that the heap isn't destroyed after a serialization.
-TEST(SerializeNondestructive) {
- if (Snapshot::IsEnabled()) return;
- StatsTable::SetCounterFunction(counter_function);
- v8::HandleScope scope;
+// Test that heap serialization is non-destructive.
+TEST(SerializeTwice) {
Serializer::Enable();
- v8::Persistent<v8::Context> env = v8::Context::New();
- v8::Context::Scope context_scope(env);
- Serializer().Serialize();
- const char* c_source = "\"abcd\".charAt(2) == 'c'";
- v8::Local<v8::String> source = v8::String::New(c_source);
- v8::Local<v8::Script> script = v8::Script::Compile(source);
- v8::Local<v8::Value> value = script->Run();
- CHECK(value->BooleanValue());
+ v8::V8::Initialize();
+ Serialize();
+ Serialize();
}
+
//----------------------------------------------------------------------------
// Tests that the heap can be deserialized.
static void Deserialize() {
-#ifdef DEBUG
- FLAG_debug_serialization = true;
-#endif
CHECK(Snapshot::Initialize(FLAG_testing_serialization_file));
}
@@ -248,49 +227,56 @@ DEPENDENT_TEST(Deserialize, Serialize) {
Deserialize();
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ env->Enter();
+
SanityCheck();
}
-DEPENDENT_TEST(DeserializeAndRunScript, Serialize) {
+
+DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
v8::HandleScope scope;
Deserialize();
- const char* c_source = "\"1234\".length";
- v8::Local<v8::String> source = v8::String::New(c_source);
- v8::Local<v8::Script> script = v8::Script::Compile(source);
- CHECK_EQ(4, script->Run()->Int32Value());
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ SanityCheck();
}
-DEPENDENT_TEST(DeserializeNatives, Serialize) {
+DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
v8::HandleScope scope;
Deserialize();
- const char* c_source = "\"abcd\".charAt(2) == 'c'";
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ const char* c_source = "\"1234\".length";
v8::Local<v8::String> source = v8::String::New(c_source);
v8::Local<v8::Script> script = v8::Script::Compile(source);
- v8::Local<v8::Value> value = script->Run();
- CHECK(value->BooleanValue());
+ CHECK_EQ(4, script->Run()->Int32Value());
}
-DEPENDENT_TEST(DeserializeExtensions, Serialize) {
+DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
+ SerializeTwice) {
v8::HandleScope scope;
Deserialize();
- const char* c_source = "gc();";
+
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ const char* c_source = "\"1234\".length";
v8::Local<v8::String> source = v8::String::New(c_source);
v8::Local<v8::Script> script = v8::Script::Compile(source);
- v8::Local<v8::Value> value = script->Run();
- CHECK(value->IsUndefined());
+ CHECK_EQ(4, script->Run()->Int32Value());
}
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
-
-
TEST(TestThatAlwaysSucceeds) {
}
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index bb9a6f99..59a40af2 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -63,6 +63,21 @@ class Resource: public v8::String::ExternalStringResource,
};
+class AsciiResource: public v8::String::ExternalAsciiStringResource,
+ public ZoneObject {
+ public:
+ explicit AsciiResource(Vector<const char> string): data_(string.start()) {
+ length_ = string.length();
+ }
+ virtual const char* data() const { return data_; }
+ virtual size_t length() const { return length_; }
+
+ private:
+ const char* data_;
+ size_t length_;
+};
+
+
static void InitializeBuildingBlocks(
Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]) {
// A list of pointers that we don't have any interest in cleaning up.
@@ -241,17 +256,6 @@ TEST(Traverse) {
printf("6\n");
TraverseFirst(left_asymmetric, right_deep_asymmetric, 65536);
printf("7\n");
- Handle<String> right_deep_slice =
- Factory::NewStringSlice(left_deep_asymmetric,
- left_deep_asymmetric->length() - 1050,
- left_deep_asymmetric->length() - 50);
- Handle<String> left_deep_slice =
- Factory::NewStringSlice(right_deep_asymmetric,
- right_deep_asymmetric->length() - 1050,
- right_deep_asymmetric->length() - 50);
- printf("8\n");
- Traverse(right_deep_slice, left_deep_slice);
- printf("9\n");
FlattenString(left_asymmetric);
printf("10\n");
Traverse(flat, left_asymmetric);
@@ -269,60 +273,6 @@ TEST(Traverse) {
}
-static Handle<String> SliceOf(Handle<String> underlying) {
- int start = gen() % underlying->length();
- int end = start + gen() % (underlying->length() - start);
- return Factory::NewStringSlice(underlying,
- start,
- end);
-}
-
-
-static Handle<String> ConstructSliceTree(
- Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS],
- int from,
- int to) {
- CHECK(to > from);
- if (to - from <= 1)
- return SliceOf(building_blocks[from % NUMBER_OF_BUILDING_BLOCKS]);
- if (to - from == 2) {
- Handle<String> lhs = building_blocks[from % NUMBER_OF_BUILDING_BLOCKS];
- if (gen() % 2 == 0)
- lhs = SliceOf(lhs);
- Handle<String> rhs = building_blocks[(from+1) % NUMBER_OF_BUILDING_BLOCKS];
- if (gen() % 2 == 0)
- rhs = SliceOf(rhs);
- return Factory::NewConsString(lhs, rhs);
- }
- Handle<String> part1 =
- ConstructBalancedHelper(building_blocks, from, from + ((to - from) / 2));
- Handle<String> part2 =
- ConstructBalancedHelper(building_blocks, from + ((to - from) / 2), to);
- Handle<String> branch = Factory::NewConsString(part1, part2);
- if (gen() % 2 == 0)
- return branch;
- return(SliceOf(branch));
-}
-
-
-TEST(Slice) {
- printf("TestSlice\n");
- InitializeVM();
- v8::HandleScope scope;
- Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS];
- ZoneScope zone(DELETE_ON_EXIT);
- InitializeBuildingBlocks(building_blocks);
-
- seed = 42;
- Handle<String> slice_tree =
- ConstructSliceTree(building_blocks, 0, DEEP_DEPTH);
- seed = 42;
- Handle<String> flat_slice_tree =
- ConstructSliceTree(building_blocks, 0, DEEP_DEPTH);
- FlattenString(flat_slice_tree);
- Traverse(flat_slice_tree, slice_tree);
-}
-
static const int DEEP_ASCII_DEPTH = 100000;
@@ -357,8 +307,10 @@ TEST(Utf8Conversion) {
v8::HandleScope handle_scope;
// A simple ascii string
const char* ascii_string = "abcdef12345";
- int len = v8::String::New(ascii_string, strlen(ascii_string))->Utf8Length();
- CHECK_EQ(strlen(ascii_string), len);
+ int len =
+ v8::String::New(ascii_string,
+ StrLength(ascii_string))->Utf8Length();
+ CHECK_EQ(StrLength(ascii_string), len);
// A mixed ascii and non-ascii string
// U+02E4 -> CB A4
// U+0064 -> 64
@@ -392,127 +344,89 @@ TEST(Utf8Conversion) {
}
-class TwoByteResource: public v8::String::ExternalStringResource {
- public:
- TwoByteResource(const uint16_t* data, size_t length, bool* destructed)
- : data_(data), length_(length), destructed_(destructed) {
- CHECK_NE(destructed, NULL);
- *destructed_ = false;
- }
-
- virtual ~TwoByteResource() {
- CHECK_NE(destructed_, NULL);
- CHECK(!*destructed_);
- *destructed_ = true;
- }
-
- const uint16_t* data() const { return data_; }
- size_t length() const { return length_; }
-
- private:
- const uint16_t* data_;
- size_t length_;
- bool* destructed_;
-};
-
+TEST(ExternalShortStringAdd) {
+ ZoneScope zone(DELETE_ON_EXIT);
-// Regression test case for http://crbug.com/9746. The problem was
-// that when we marked objects reachable only through weak pointers,
-// we ended up keeping a sliced symbol alive, even though we already
-// invoked the weak callback on the underlying external string thus
-// deleting its resource.
-TEST(Regress9746) {
InitializeVM();
+ v8::HandleScope handle_scope;
- // Setup lengths that guarantee we'll get slices instead of simple
- // flat strings.
- static const int kFullStringLength = String::kMinNonFlatLength * 2;
- static const int kSliceStringLength = String::kMinNonFlatLength + 1;
-
- uint16_t* source = new uint16_t[kFullStringLength];
- for (int i = 0; i < kFullStringLength; i++) source[i] = '1';
- char* key = new char[kSliceStringLength];
- for (int i = 0; i < kSliceStringLength; i++) key[i] = '1';
- Vector<const char> key_vector(key, kSliceStringLength);
-
- // Allocate an external string resource that keeps track of when it
- // is destructed.
- bool resource_destructed = false;
- TwoByteResource* resource =
- new TwoByteResource(source, kFullStringLength, &resource_destructed);
-
- {
- v8::HandleScope scope;
-
- // Allocate an external string resource and external string. We
- // have to go through the API to get the weak handle and the
- // automatic destruction going.
- Handle<String> string =
- v8::Utils::OpenHandle(*v8::String::NewExternal(resource));
-
- // Create a slice of the external string.
- Handle<String> slice =
- Factory::NewStringSlice(string, 0, kSliceStringLength);
- CHECK_EQ(kSliceStringLength, slice->length());
- CHECK(StringShape(*slice).IsSliced());
-
- // Make sure the slice ends up in old space so we can morph it
- // into a symbol.
- while (Heap::InNewSpace(*slice)) {
- Heap::PerformScavenge();
+ // Make sure we cover all always-flat lengths and at least one above.
+ static const int kMaxLength = 20;
+ CHECK_GT(kMaxLength, i::String::kMinNonFlatLength);
+
+ // Allocate two JavaScript arrays for holding short strings.
+ v8::Handle<v8::Array> ascii_external_strings =
+ v8::Array::New(kMaxLength + 1);
+ v8::Handle<v8::Array> non_ascii_external_strings =
+ v8::Array::New(kMaxLength + 1);
+
+ // Generate short ascii and non-ascii external strings.
+ for (int i = 0; i <= kMaxLength; i++) {
+ char* ascii = Zone::NewArray<char>(i + 1);
+ for (int j = 0; j < i; j++) {
+ ascii[j] = 'a';
}
-
- // Force the slice into the symbol table.
- slice = Factory::SymbolFromString(slice);
- CHECK(slice->IsSymbol());
- CHECK(StringShape(*slice).IsSliced());
-
- Handle<String> buffer(Handle<SlicedString>::cast(slice)->buffer());
- CHECK(StringShape(*buffer).IsExternal());
- CHECK(buffer->IsTwoByteRepresentation());
-
- // Finally, base a script on the slice of the external string and
- // get its wrapper. This allocates yet another weak handle that
- // indirectly refers to the external string.
- Handle<Script> script = Factory::NewScript(slice);
- Handle<JSObject> wrapper = GetScriptWrapper(script);
- }
-
- // When we collect all garbage, we cannot get rid of the sliced
- // symbol entry in the symbol table because it is used by the script
- // kept alive by the weak wrapper. Make sure we don't destruct the
- // external string.
- Heap::CollectAllGarbage(false);
- CHECK(!resource_destructed);
-
- {
- v8::HandleScope scope;
-
- // Make sure the sliced symbol is still in the table.
- Handle<String> symbol = Factory::LookupSymbol(key_vector);
- CHECK(StringShape(*symbol).IsSliced());
-
- // Make sure the buffer is still a two-byte external string.
- Handle<String> buffer(Handle<SlicedString>::cast(symbol)->buffer());
- CHECK(StringShape(*buffer).IsExternal());
- CHECK(buffer->IsTwoByteRepresentation());
+ // Terminating '\0' is left out on purpose. It is not required for external
+ // string data.
+ AsciiResource* ascii_resource =
+ new AsciiResource(Vector<const char>(ascii, i));
+ v8::Local<v8::String> ascii_external_string =
+ v8::String::NewExternal(ascii_resource);
+
+ ascii_external_strings->Set(v8::Integer::New(i), ascii_external_string);
+ uc16* non_ascii = Zone::NewArray<uc16>(i + 1);
+ for (int j = 0; j < i; j++) {
+ non_ascii[j] = 0x1234;
+ }
+ // Terminating '\0' is left out on purpose. It is not required for external
+ // string data.
+ Resource* resource = new Resource(Vector<const uc16>(non_ascii, i));
+ v8::Local<v8::String> non_ascii_external_string =
+ v8::String::NewExternal(resource);
+ non_ascii_external_strings->Set(v8::Integer::New(i),
+ non_ascii_external_string);
}
- // Forcing another garbage collection should let us get rid of the
- // slice from the symbol table. The external string remains in the
- // heap until the next GC.
- Heap::CollectAllGarbage(false);
- CHECK(!resource_destructed);
- v8::HandleScope scope;
- Handle<String> key_string = Factory::NewStringFromAscii(key_vector);
- String* out;
- CHECK(!Heap::LookupSymbolIfExists(*key_string, &out));
-
- // Forcing yet another garbage collection must allow us to finally
- // get rid of the external string.
- Heap::CollectAllGarbage(false);
- CHECK(resource_destructed);
-
- delete[] source;
- delete[] key;
+ // Add the arrays with the short external strings in the global object.
+ v8::Handle<v8::Object> global = env->Global();
+ global->Set(v8_str("external_ascii"), ascii_external_strings);
+ global->Set(v8_str("external_non_ascii"), non_ascii_external_strings);
+ global->Set(v8_str("max_length"), v8::Integer::New(kMaxLength));
+
+ // Add short external ascii and non-ascii strings checking the result.
+ static const char* source =
+ "function test() {"
+ " var ascii_chars = 'aaaaaaaaaaaaaaaaaaaa';"
+ " var non_ascii_chars = '\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234';" //NOLINT
+ " if (ascii_chars.length != max_length) return 1;"
+ " if (non_ascii_chars.length != max_length) return 2;"
+ " var ascii = Array(max_length + 1);"
+ " var non_ascii = Array(max_length + 1);"
+ " for (var i = 0; i <= max_length; i++) {"
+ " ascii[i] = ascii_chars.substring(0, i);"
+ " non_ascii[i] = non_ascii_chars.substring(0, i);"
+ " };"
+ " for (var i = 0; i <= max_length; i++) {"
+ " if (ascii[i] != external_ascii[i]) return 3;"
+ " if (non_ascii[i] != external_non_ascii[i]) return 4;"
+ " for (var j = 0; j < i; j++) {"
+ " if (external_ascii[i] !="
+ " (external_ascii[j] + external_ascii[i - j])) return 5;"
+ " if (external_non_ascii[i] !="
+ " (external_non_ascii[j] + external_non_ascii[i - j])) return 6;"
+ " if (non_ascii[i] != (non_ascii[j] + non_ascii[i - j])) return 7;"
+ " if (ascii[i] != (ascii[j] + ascii[i - j])) return 8;"
+ " if (ascii[i] != (external_ascii[j] + ascii[i - j])) return 9;"
+ " if (ascii[i] != (ascii[j] + external_ascii[i - j])) return 10;"
+ " if (non_ascii[i] !="
+ " (external_non_ascii[j] + non_ascii[i - j])) return 11;"
+ " if (non_ascii[i] !="
+ " (non_ascii[j] + external_non_ascii[i - j])) return 12;"
+ " }"
+ " }"
+ " return 0;"
+ "};"
+ "test()";
+ CHECK_EQ(0,
+ v8::Script::Compile(v8::String::New(source))->Run()->Int32Value());
}
diff --git a/test/cctest/test-thread-termination.cc b/test/cctest/test-thread-termination.cc
index 552f49df..1e8102ec 100644
--- a/test/cctest/test-thread-termination.cc
+++ b/test/cctest/test-thread-termination.cc
@@ -82,14 +82,30 @@ v8::Handle<v8::Value> DoLoop(const v8::Arguments& args) {
}
+v8::Handle<v8::Value> DoLoopNoCall(const v8::Arguments& args) {
+ v8::TryCatch try_catch;
+ v8::Script::Compile(v8::String::New("var term = true;"
+ "while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ "}"))->Run();
+ CHECK(try_catch.HasCaught());
+ CHECK(try_catch.Exception()->IsNull());
+ CHECK(try_catch.Message().IsEmpty());
+ CHECK(!try_catch.CanContinue());
+ return v8::Undefined();
+}
+
+
v8::Handle<v8::ObjectTemplate> CreateGlobalTemplate(
- v8::InvocationCallback terminate) {
+ v8::InvocationCallback terminate,
+ v8::InvocationCallback doloop) {
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
global->Set(v8::String::New("terminate"),
v8::FunctionTemplate::New(terminate));
global->Set(v8::String::New("fail"), v8::FunctionTemplate::New(Fail));
global->Set(v8::String::New("loop"), v8::FunctionTemplate::New(Loop));
- global->Set(v8::String::New("doloop"), v8::FunctionTemplate::New(DoLoop));
+ global->Set(v8::String::New("doloop"), v8::FunctionTemplate::New(doloop));
return global;
}
@@ -99,7 +115,25 @@ v8::Handle<v8::ObjectTemplate> CreateGlobalTemplate(
TEST(TerminateOnlyV8ThreadFromThreadItself) {
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> global =
- CreateGlobalTemplate(TerminateCurrentThread);
+ CreateGlobalTemplate(TerminateCurrentThread, DoLoop);
+ v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Context::Scope context_scope(context);
+ // Run a loop that will be infinite if thread termination does not work.
+ v8::Handle<v8::String> source =
+ v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
+ v8::Script::Compile(source)->Run();
+ // Test that we can run the code again after thread termination.
+ v8::Script::Compile(source)->Run();
+ context.Dispose();
+}
+
+
+// Test that a single thread of JavaScript execution can terminate
+// itself in a loop that performs no calls.
+TEST(TerminateOnlyV8ThreadFromThreadItselfNoLoop) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> global =
+ CreateGlobalTemplate(TerminateCurrentThread, DoLoopNoCall);
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
v8::Context::Scope context_scope(context);
// Run a loop that will be infinite if thread termination does not work.
@@ -128,7 +162,7 @@ TEST(TerminateOnlyV8ThreadFromOtherThread) {
thread.Start();
v8::HandleScope scope;
- v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(Signal);
+ v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(Signal, DoLoop);
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
v8::Context::Scope context_scope(context);
// Run a loop that will be infinite if thread termination does not work.
@@ -149,7 +183,8 @@ class LoopingThread : public v8::internal::Thread {
v8::Locker locker;
v8::HandleScope scope;
v8_thread_id_ = v8::V8::GetCurrentThreadId();
- v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(Signal);
+ v8::Handle<v8::ObjectTemplate> global =
+ CreateGlobalTemplate(Signal, DoLoop);
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
v8::Context::Scope context_scope(context);
// Run a loop that will be infinite if thread termination does not work.
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index ffcaf8ab..1d65e686 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -166,7 +166,7 @@ TEST(SNPrintF) {
// Make sure that strings that are truncated because of too small
// buffers are zero-terminated anyway.
const char* s = "the quick lazy .... oh forget it!";
- int length = strlen(s);
+ int length = StrLength(s);
for (int i = 1; i < length * 2; i++) {
static const char kMarker = static_cast<char>(42);
Vector<char> buffer = Vector<char>::New(i + 1);
@@ -177,9 +177,9 @@ TEST(SNPrintF) {
CHECK_EQ(0, strncmp(buffer.start(), s, i - 1));
CHECK_EQ(kMarker, buffer[i]);
if (i <= length) {
- CHECK_EQ(i - 1, strlen(buffer.start()));
+ CHECK_EQ(i - 1, StrLength(buffer.start()));
} else {
- CHECK_EQ(length, strlen(buffer.start()));
+ CHECK_EQ(length, StrLength(buffer.start()));
}
buffer.Dispose();
}
diff --git a/test/mjsunit/arguments-read-and-assignment.js b/test/mjsunit/arguments-read-and-assignment.js
new file mode 100644
index 00000000..c5d34bfa
--- /dev/null
+++ b/test/mjsunit/arguments-read-and-assignment.js
@@ -0,0 +1,164 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Testing basic functionality of the arguments object.
+// Introduced to ensure that the fast compiler does the right thing.
+// The arguments object itself.
+assertEquals(42, function(){ return arguments;}(42)[0],
+ "return arguments value");
+assertEquals(42, function(){ return arguments;}(42)[0],
+ "arguments in plain value context");
+assertEquals(42, function(){ arguments;return 42}(37),
+ "arguments in effect context");
+assertEquals(42, function(){ if(arguments)return 42;}(),
+ "arguments in a boolean context");
+assertEquals(42, function(){ return arguments || true;}(42)[0],
+ "arguments in a short-circuit boolean context - or");
+assertEquals(true, function(){ return arguments && [true];}(42)[0],
+ "arguments in a short-circuit boolean context - and");
+assertEquals(42, function(){ arguments = 42; return 42;}(),
+ "arguments assignment");
+// Properties of the arguments object.
+assertEquals(42, function(){ return arguments[0]; }(42),
+ "args[0] value returned");
+assertEquals(42, function(){ arguments[0]; return 42}(),
+ "args[0] value ignored");
+assertEquals(42, function(){ if (arguments[0]) return 42; }(37),
+ "args[0] to boolean");
+assertEquals(42, function(){ return arguments[0] || "no"; }(42),
+ "args[0] short-circuit boolean or true");
+assertEquals(42, function(){ return arguments[0] || 42; }(0),
+ "args[0] short-circuit boolean or false");
+assertEquals(37, function(){ return arguments[0] && 37; }(42),
+ "args[0] short-circuit boolean and true");
+assertEquals(0, function(){ return arguments[0] && 42; }(0),
+ "args[0] short-circuit boolean and false");
+assertEquals(42, function(){ arguments[0] = 42; return arguments[0]; }(37),
+ "args[0] assignment");
+// Link between arguments and parameters.
+assertEquals(42, function(a) { arguments[0] = 42; return a; }(37),
+ "assign args[0]->a");
+assertEquals(42, function(a) { a = 42; return arguments[0]; }(37),
+ "assign a->args[0]");
+assertEquals(54, function(a, b) { arguments[1] = 54; return b; }(42, 37),
+ "assign args[1]->b:b");
+assertEquals(54, function(a, b) { b = 54; return arguments[1]; }(42, 47),
+ "assign b->args[1]:b");
+assertEquals(42, function(a, b) { arguments[1] = 54; return a; }(42, 37),
+ "assign args[1]->b:a");
+assertEquals(42, function(a, b) { b = 54; return arguments[0]; }(42, 47),
+ "assign b->args[1]:a");
+
+// Capture parameters in nested contexts.
+assertEquals(33,
+ function(a,b) {
+ return a + arguments[0] +
+ function(b){ return a + b + arguments[0]; }(b); }(7,6),
+ "captured parameters");
+assertEquals(42, function(a) {
+ arguments[0] = 42;
+ return function(b){ return a; }();
+ }(37),
+ "capture value returned");
+assertEquals(42,
+ function(a) {
+ arguments[0] = 26;
+ return function(b){ a; return 42; }();
+ }(37),
+ "capture value ignored");
+assertEquals(42,
+ function(a) {
+ arguments[0] = 26;
+ return function(b){ if (a) return 42; }();
+ }(37),
+ "capture to boolean");
+assertEquals(42,
+ function(a) {
+ arguments[0] = 42;
+ return function(b){ return a || "no"; }();
+ }(37),
+ "capture short-circuit boolean or true");
+assertEquals(0,
+ function(a) {
+ arguments[0] = 0;
+ return function(b){ return a && 42; }();
+ }(37),
+ "capture short-circuit boolean and false");
+// Deeply nested.
+assertEquals(42,
+ function(a,b) {
+ return arguments[2] +
+ function(){
+ return b +
+ function() {
+ return a;
+ }();
+ }();
+ }(7,14,21),
+ "deep nested capture");
+
+// Assignment to captured parameters.
+assertEquals(42, function(a,b) {
+ arguments[1] = 11;
+ return a + function(){ a = b; return a; }() + a;
+ }(20, 37), "captured assignment");
+
+// Inside non-function scopes.
+assertEquals(42,
+ function(a) {
+ arguments[0] = 20;
+ with ({ b : 22 }) { return a + b; }
+ }(37),
+ "a in with");
+assertEquals(42,
+ function(a) {
+ with ({ b : 22 }) { return arguments[0] + b; }
+ }(20),
+ "args in with");
+assertEquals(42,
+ function(a) {
+ arguments[0] = 20;
+ with ({ b : 22 }) {
+ return function() { return a; }() + b; }
+ }(37),
+ "captured a in with");
+assertEquals(42,
+ function(a) {
+ arguments[0] = 12;
+ with ({ b : 22 }) {
+ return function f() {
+ try { throw 8 } catch(e) { return e + a };
+ }() + b;
+ }
+ }(37),
+ "in a catch in a named function captured a in with ");
+// Escaping arguments.
+function weirdargs(a,b,c) { if (!a) return arguments;
+ return [b[2],c]; }
+var args1 = weirdargs(false, null, 40);
+var res = weirdargs(true, args1, 15);
+assertEquals(40, res[0], "return old args element");
+assertEquals(15, res[1], "return own args element"); \ No newline at end of file
diff --git a/src/location.h b/test/mjsunit/compiler/function-call.js
index 9702ce4e..b2e0702a 100644
--- a/src/location.h
+++ b/test/mjsunit/compiler/function-call.js
@@ -25,33 +25,28 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_LOCATION_H_
-#define V8_LOCATION_H_
+// Test of function calls.
-#include "utils.h"
+function f(x) { return x; }
-namespace v8 {
-namespace internal {
+var a;
-class Location BASE_EMBEDDED {
- public:
- static Location Temporary() { return Location(TEMP); }
- static Location Nowhere() { return Location(NOWHERE); }
- static Location Constant() { return Location(CONSTANT); }
+// Call on global object.
+a = f(8);
+assertEquals(8, a);
- bool is_temporary() { return type_ == TEMP; }
- bool is_nowhere() { return type_ == NOWHERE; }
- bool is_constant() { return type_ == CONSTANT; }
+// Call on a named property.
+var b;
+b = {x:f};
+a = b.x(9);
+assertEquals(9, a);
- private:
- enum Type { TEMP, NOWHERE, CONSTANT };
+// Call on a keyed property.
+c = "x";
+a = b[c](10);
+assertEquals(10, a);
- explicit Location(Type type) : type_(type) {}
-
- Type type_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_LOCATION_H_
+// Call on a function expression
+function g() { return f; }
+a = g()(8);
+assertEquals(8, a);
diff --git a/test/mjsunit/compiler/globals.js b/test/mjsunit/compiler/globals.js
index 066f9277..0abd5dd3 100644
--- a/test/mjsunit/compiler/globals.js
+++ b/test/mjsunit/compiler/globals.js
@@ -53,3 +53,13 @@ assertEquals("2", eval('g'));
// Test a second load.
g = 3;
assertEquals(3, eval('g'));
+
+// Test postfix count operation
+var t;
+t = g++;
+assertEquals(3, t);
+assertEquals(4, g);
+
+code = "g--; 1";
+assertEquals(1, eval(code));
+assertEquals(3, g);
diff --git a/test/mjsunit/compiler/jsnatives.js b/test/mjsunit/compiler/jsnatives.js
new file mode 100644
index 00000000..f5d6ac46
--- /dev/null
+++ b/test/mjsunit/compiler/jsnatives.js
@@ -0,0 +1,33 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test call of JS runtime functions.
+
+var a = %GlobalParseInt("21", 16);
+assertEquals(33, a);
diff --git a/test/mjsunit/compiler/literals-assignment.js b/test/mjsunit/compiler/literals-assignment.js
index 932bfa7f..d2996c78 100644
--- a/test/mjsunit/compiler/literals-assignment.js
+++ b/test/mjsunit/compiler/literals-assignment.js
@@ -69,3 +69,36 @@ code = "(function() {\
})()";
assertEquals(8, eval(code));
+// Test object literals.
+var a, b;
+code = "a = {x:8}";
+eval(code);
+assertEquals(8, a.x);
+
+code = "b = {x:a, y:'abc'}";
+eval(code);
+assertEquals(a, b.x);
+assertEquals(8, b.x.x);
+assertEquals("abc", b.y);
+
+code = "({x:8, y:9}); 10";
+assertEquals(10, eval(code));
+
+code = "({x:8, y:9})";
+eval(code);
+assertEquals(9, eval(code+".y"));
+
+code = "a = {2:8, x:9}";
+eval(code);
+assertEquals(8, a[2]);
+assertEquals(8, a["2"]);
+assertEquals(9, a["x"]);
+
+// Test regexp literals.
+
+a = /abc/;
+
+assertEquals(/abc/, a);
+
+code = "/abc/; 8";
+assertEquals(8, eval(code));
diff --git a/test/mjsunit/compiler/loops.js b/test/mjsunit/compiler/loops.js
new file mode 100644
index 00000000..4de45e7f
--- /dev/null
+++ b/test/mjsunit/compiler/loops.js
@@ -0,0 +1,35 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test compilation of loops.
+
+var n = 1;
+for (var i = 1; (6 - i); i++) {
+ // Factorial!
+ n = n * i;
+}
+assertEquals(120, n);
diff --git a/test/mjsunit/compiler/objectliterals.js b/test/mjsunit/compiler/objectliterals.js
new file mode 100644
index 00000000..788acb48
--- /dev/null
+++ b/test/mjsunit/compiler/objectliterals.js
@@ -0,0 +1,57 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test object literals with getter, setter and prototype properties.
+
+var o = { x: 41, get bar() { return {x:42} } };
+
+assertEquals(41, o.x);
+assertEquals(42, o.bar.x);
+
+o = { f: function() { return 41 },
+ get bar() { return this.x },
+ x:0,
+ set bar(t) { this.x = t },
+ g: function() { return 43 }
+};
+o.bar = 7;
+assertEquals(7, o.bar);
+assertEquals(7, o.x);
+assertEquals(41, o.f());
+assertEquals(43, o.g());
+
+p = {x:42};
+o = {get foo() { return this.x; },
+ f: function() { return this.foo + 1 },
+ set bar(t) { this.x = t; },
+ __proto__: p,
+};
+assertEquals(42, o.x);
+assertEquals(42, o.foo);
+assertEquals(43, o.f());
+o.bar = 44;
+assertEquals(44, o.foo);
diff --git a/test/mjsunit/compiler/property-simple.js b/test/mjsunit/compiler/property-simple.js
new file mode 100644
index 00000000..b0f0ffa6
--- /dev/null
+++ b/test/mjsunit/compiler/property-simple.js
@@ -0,0 +1,39 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test for property access
+
+var a;
+var b;
+
+code = "a = {x:8, y:9}; a.x";
+
+assertEquals(8, eval(code));
+
+code = "b = {z:a}; b.z.y";
+
+assertEquals(9, eval(code));
diff --git a/test/mjsunit/compiler/thisfunction.js b/test/mjsunit/compiler/thisfunction.js
new file mode 100644
index 00000000..2af846f3
--- /dev/null
+++ b/test/mjsunit/compiler/thisfunction.js
@@ -0,0 +1,35 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --always_fast_compiler
+
+// Test reference to this-function.
+
+var g = (function f(x) {
+ if (x == 1) return 42; else return f(1);
+ })(0);
+assertEquals(42, g);
diff --git a/test/mjsunit/cyrillic.js b/test/mjsunit/cyrillic.js
new file mode 100644
index 00000000..c5712e6f
--- /dev/null
+++ b/test/mjsunit/cyrillic.js
@@ -0,0 +1,199 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test Unicode character ranges in regexps.
+
+
+// Cyrillic.
+var cyrillic = {
+ FIRST: "\u0410", // A
+ first: "\u0430", // a
+ LAST: "\u042f", // YA
+ last: "\u044f", // ya
+ MIDDLE: "\u0427", // CHE
+ middle: "\u0447", // che
+ // Actually no characters are between the cases in Cyrillic.
+ BetweenCases: false};
+
+var SIGMA = "\u03a3";
+var sigma = "\u03c3";
+var alternative_sigma = "\u03c2";
+
+// Greek.
+var greek = {
+ FIRST: "\u0391", // ALPHA
+ first: "\u03b1", // alpha
+ LAST: "\u03a9", // OMEGA
+ last: "\u03c9", // omega
+ MIDDLE: SIGMA, // SIGMA
+ middle: sigma, // sigma
+ // Epsilon acute is between ALPHA-OMEGA and alpha-omega, ie it
+ // is between OMEGA and alpha.
+ BetweenCases: "\u03ad"};
+
+
+function Range(from, to, flags) {
+ return new RegExp("[" + from + "-" + to + "]", flags);
+}
+
+// Test Cyrillic and Greek separately.
+for (var lang = 0; lang < 2; lang++) {
+ var chars = (lang == 0) ? cyrillic : greek;
+
+ for (var i = 0; i < 2; i++) {
+ var lc = (i == 0); // Lower case.
+ var first = lc ? chars.first : chars.FIRST;
+ var middle = lc ? chars.middle : chars.MIDDLE;
+ var last = lc ? chars.last : chars.LAST;
+ var first_other_case = lc ? chars.FIRST : chars.first;
+ var middle_other_case = lc ? chars.MIDDLE : chars.middle;
+ var last_other_case = lc ? chars.LAST : chars.last;
+
+ assertTrue(Range(first, last).test(first), 1);
+ assertTrue(Range(first, last).test(middle), 2);
+ assertTrue(Range(first, last).test(last), 3);
+
+ assertFalse(Range(first, last).test(first_other_case), 4);
+ assertFalse(Range(first, last).test(middle_other_case), 5);
+ assertFalse(Range(first, last).test(last_other_case), 6);
+
+ assertTrue(Range(first, last, "i").test(first), 7);
+ assertTrue(Range(first, last, "i").test(middle), 8);
+ assertTrue(Range(first, last, "i").test(last), 9);
+
+ assertTrue(Range(first, last, "i").test(first_other_case), 10);
+ assertTrue(Range(first, last, "i").test(middle_other_case), 11);
+ assertTrue(Range(first, last, "i").test(last_other_case), 12);
+
+ if (chars.BetweenCases) {
+ assertFalse(Range(first, last).test(chars.BetweenCases), 13);
+ assertFalse(Range(first, last, "i").test(chars.BetweenCases), 14);
+ }
+ }
+ if (chars.BetweenCases) {
+ assertTrue(Range(chars.FIRST, chars.last).test(chars.BetweenCases), 15);
+ assertTrue(Range(chars.FIRST, chars.last, "i").test(chars.BetweenCases), 16);
+ }
+}
+
+// Test range that covers both greek and cyrillic characters.
+for (key in greek) {
+ assertTrue(Range(greek.FIRST, cyrillic.last).test(greek[key]), 17 + key);
+ if (cyrillic[key]) {
+ assertTrue(Range(greek.FIRST, cyrillic.last).test(cyrillic[key]), 18 + key);
+ }
+}
+
+for (var i = 0; i < 2; i++) {
+ var ignore_case = (i == 0);
+ var flag = ignore_case ? "i" : "";
+ assertTrue(Range(greek.first, cyrillic.LAST, flag).test(greek.first), 19);
+ assertTrue(Range(greek.first, cyrillic.LAST, flag).test(greek.middle), 20);
+ assertTrue(Range(greek.first, cyrillic.LAST, flag).test(greek.last), 21);
+
+ assertTrue(Range(greek.first, cyrillic.LAST, flag).test(cyrillic.FIRST), 22);
+ assertTrue(Range(greek.first, cyrillic.LAST, flag).test(cyrillic.MIDDLE), 23);
+ assertTrue(Range(greek.first, cyrillic.LAST, flag).test(cyrillic.LAST), 24);
+
+ // A range that covers the lower case greek letters and the upper case cyrillic
+ // letters.
+ assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(greek.FIRST), 25);
+ assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(greek.MIDDLE), 26);
+ assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(greek.LAST), 27);
+
+ assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(cyrillic.first), 28);
+ assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(cyrillic.middle), 29);
+ assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(cyrillic.last), 30);
+}
+
+
+// Sigma is special because there are two lower case versions of the same upper
+// case character. JS requires that case independece means that you should
+// convert everything to upper case, so the two sigma variants are equal to each
+// other in a case independt comparison.
+for (var i = 0; i < 2; i++) {
+ var simple = (i != 0);
+ var name = simple ? "" : "[]";
+ var regex = simple ? SIGMA : "[" + SIGMA + "]";
+
+ assertFalse(new RegExp(regex).test(sigma), 31 + name);
+ assertFalse(new RegExp(regex).test(alternative_sigma), 32 + name);
+ assertTrue(new RegExp(regex).test(SIGMA), 33 + name);
+
+ assertTrue(new RegExp(regex, "i").test(sigma), 34 + name);
+ // JSC and Tracemonkey fail this one.
+ assertTrue(new RegExp(regex, "i").test(alternative_sigma), 35 + name);
+ assertTrue(new RegExp(regex, "i").test(SIGMA), 36 + name);
+
+ regex = simple ? sigma : "[" + sigma + "]";
+
+ assertTrue(new RegExp(regex).test(sigma), 41 + name);
+ assertFalse(new RegExp(regex).test(alternative_sigma), 42 + name);
+ assertFalse(new RegExp(regex).test(SIGMA), 43 + name);
+
+ assertTrue(new RegExp(regex, "i").test(sigma), 44 + name);
+ // JSC and Tracemonkey fail this one.
+ assertTrue(new RegExp(regex, "i").test(alternative_sigma), 45 + name);
+ assertTrue(new RegExp(regex, "i").test(SIGMA), 46 + name);
+
+ regex = simple ? alternative_sigma : "[" + alternative_sigma + "]";
+
+ assertFalse(new RegExp(regex).test(sigma), 51 + name);
+ assertTrue(new RegExp(regex).test(alternative_sigma), 52 + name);
+ assertFalse(new RegExp(regex).test(SIGMA), 53 + name);
+
+ // JSC and Tracemonkey fail this one.
+ assertTrue(new RegExp(regex, "i").test(sigma), 54 + name);
+ assertTrue(new RegExp(regex, "i").test(alternative_sigma), 55 + name);
+ // JSC and Tracemonkey fail this one.
+ assertTrue(new RegExp(regex, "i").test(SIGMA), 56 + name);
+}
+
+
+for (var add_non_ascii_character_to_subject = 0;
+ add_non_ascii_character_to_subject < 2;
+ add_non_ascii_character_to_subject++) {
+ var suffix = add_non_ascii_character_to_subject ? "\ufffe" : "";
+ // A range that covers both ASCII and non-ASCII.
+ for (var i = 0; i < 2; i++) {
+ var full = (i != 0);
+ var mixed = full ? "[a-\uffff]" : "[a-" + cyrillic.LAST + "]";
+ var f = full ? "f" : "c";
+ for (var j = 0; j < 2; j++) {
+ var ignore_case = (j == 0);
+ var flag = ignore_case ? "i" : "";
+ var re = new RegExp(mixed, flag);
+ assertEquals(ignore_case || (full && add_non_ascii_character_to_subject),
+ re.test("A" + suffix),
+ 58 + flag + f);
+ assertTrue(re.test("a" + suffix), 59 + flag + f);
+ assertTrue(re.test("~" + suffix), 60 + flag + f);
+ assertTrue(re.test(cyrillic.MIDDLE), 61 + flag + f);
+ assertEquals(ignore_case || full, re.test(cyrillic.middle), 62 + flag + f);
+ }
+ }
+}
diff --git a/test/mjsunit/debug-stepnext-do-while.js b/test/mjsunit/debug-stepnext-do-while.js
new file mode 100644
index 00000000..17058a7b
--- /dev/null
+++ b/test/mjsunit/debug-stepnext-do-while.js
@@ -0,0 +1,79 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var break_break_point_hit_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (break_break_point_hit_count == 0) {
+ assertEquals(' debugger;',
+ event_data.sourceLineText());
+ assertEquals('runDoWhile', event_data.func().name());
+ } else if (break_break_point_hit_count == 1) {
+ assertEquals(' } while(condition());',
+ event_data.sourceLineText());
+ assertEquals('runDoWhile', event_data.func().name());
+ }
+
+ break_break_point_hit_count++;
+ // Continue stepping until returned to bottom frame.
+ if (exec_state.frameCount() > 1) {
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ }
+
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function condition() {
+ return false;
+}
+
+function runDoWhile() {
+ do {
+ debugger;
+ } while(condition());
+};
+
+break_break_point_hit_count = 0;
+runDoWhile();
+assertNull(exception);
+assertEquals(4, break_break_point_hit_count);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/test/mjsunit/deep-recursion.js b/test/mjsunit/deep-recursion.js
index a8093eb6..588b5d61 100644
--- a/test/mjsunit/deep-recursion.js
+++ b/test/mjsunit/deep-recursion.js
@@ -30,9 +30,7 @@
* cause stack overflows.
*/
-var depth = 110000;
-
-function newdeep(start) {
+function newdeep(start, depth) {
var d = start;
for (var i = 0; i < depth; i++) {
d = d + "f";
@@ -40,23 +38,27 @@ function newdeep(start) {
return d;
}
-var deep = newdeep("foo");
+var default_depth = 110000;
+
+var deep = newdeep("foo", default_depth);
assertEquals('f', deep[0]);
-var cmp1 = newdeep("a");
-var cmp2 = newdeep("b");
+var cmp1 = newdeep("a", default_depth);
+var cmp2 = newdeep("b", default_depth);
assertEquals(-1, cmp1.localeCompare(cmp2), "ab");
-var cmp2empty = newdeep("c");
+var cmp2empty = newdeep("c", default_depth);
assertTrue(cmp2empty.localeCompare("") > 0, "c");
-var cmp3empty = newdeep("d");
+var cmp3empty = newdeep("d", default_depth);
assertTrue("".localeCompare(cmp3empty) < 0), "d";
-var slicer = newdeep("slice");
+var slicer_depth = 1100;
+
+var slicer = newdeep("slice", slicer_depth);
-for (i = 0; i < depth + 4; i += 2) {
+for (i = 0; i < slicer_depth + 4; i += 2) {
slicer = slicer.slice(1, -1);
}
diff --git a/test/mjsunit/eval-typeof-non-existing.js b/test/mjsunit/eval-typeof-non-existing.js
index 3513767d..8cc6d0bc 100644
--- a/test/mjsunit/eval-typeof-non-existing.js
+++ b/test/mjsunit/eval-typeof-non-existing.js
@@ -25,8 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Typeof expression must resolve to undefined when it used on a
+// Typeof expression must resolve to 'undefined' when it used on a
// non-existing property. It is *not* allowed to throw a
// ReferenceError.
assertEquals('undefined', typeof xxx);
assertEquals('undefined', eval('typeof xxx'));
+
+assertThrows('typeof(true ? xxx : yyy)', ReferenceError);
+assertThrows('with ({}) { typeof(true ? xxx : yyy) }', ReferenceError);
diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js
index cdf58a55..f495c727 100644
--- a/test/mjsunit/fuzz-natives.js
+++ b/test/mjsunit/fuzz-natives.js
@@ -129,7 +129,9 @@ var knownProblems = {
"Log": true,
"DeclareGlobals": true,
- "CollectStackTrace": true
+ "CollectStackTrace": true,
+ "PromoteScheduledException": true,
+ "DeleteHandleScopeExtensions": true
};
var currentlyUncallable = {
diff --git a/test/mjsunit/math-min-max.js b/test/mjsunit/math-min-max.js
index 0ed99120..1a98d440 100644
--- a/test/mjsunit/math-min-max.js
+++ b/test/mjsunit/math-min-max.js
@@ -34,11 +34,15 @@ assertEquals(1, Math.min(2, 1));
assertEquals(1, Math.min(1, 2, 3));
assertEquals(1, Math.min(3, 2, 1));
assertEquals(1, Math.min(2, 3, 1));
+assertEquals(1.1, Math.min(1.1, 2.2, 3.3));
+assertEquals(1.1, Math.min(3.3, 2.2, 1.1));
+assertEquals(1.1, Math.min(2.2, 3.3, 1.1));
var o = {};
o.valueOf = function() { return 1; };
assertEquals(1, Math.min(2, 3, '1'));
assertEquals(1, Math.min(3, o, 2));
+assertEquals(1, Math.min(o, 2));
assertEquals(Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY / Math.min(-0, +0));
assertEquals(Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY / Math.min(+0, -0));
assertEquals(Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY / Math.min(+0, -0, 1));
@@ -46,7 +50,9 @@ assertEquals(-1, Math.min(+0, -0, -1));
assertEquals(-1, Math.min(-1, +0, -0));
assertEquals(-1, Math.min(+0, -1, -0));
assertEquals(-1, Math.min(-0, -1, +0));
-
+assertNaN(Math.min('oxen'));
+assertNaN(Math.min('oxen', 1));
+assertNaN(Math.min(1, 'oxen'));
// Test Math.max().
@@ -58,15 +64,22 @@ assertEquals(2, Math.max(2, 1));
assertEquals(3, Math.max(1, 2, 3));
assertEquals(3, Math.max(3, 2, 1));
assertEquals(3, Math.max(2, 3, 1));
+assertEquals(3.3, Math.max(1.1, 2.2, 3.3));
+assertEquals(3.3, Math.max(3.3, 2.2, 1.1));
+assertEquals(3.3, Math.max(2.2, 3.3, 1.1));
var o = {};
o.valueOf = function() { return 3; };
assertEquals(3, Math.max(2, '3', 1));
assertEquals(3, Math.max(1, o, 2));
+assertEquals(3, Math.max(o, 1));
assertEquals(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY / Math.max(-0, +0));
assertEquals(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY / Math.max(+0, -0));
assertEquals(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY / Math.max(+0, -0, -1));
assertEquals(1, Math.max(+0, -0, +1));
assertEquals(1, Math.max(+1, +0, -0));
assertEquals(1, Math.max(+0, +1, -0));
-assertEquals(1, Math.max(-0, +1, +0)); \ No newline at end of file
+assertEquals(1, Math.max(-0, +1, +0));
+assertNaN(Math.max('oxen'));
+assertNaN(Math.max('oxen', 1));
+assertNaN(Math.max(1, 'oxen'));
diff --git a/test/mjsunit/mjsunit.js b/test/mjsunit/mjsunit.js
index 1fb3f02a..8ced0119 100644
--- a/test/mjsunit/mjsunit.js
+++ b/test/mjsunit/mjsunit.js
@@ -75,6 +75,9 @@ function deepEquals(a, b) {
if (typeof a == "number" && typeof b == "number" && isNaN(a) && isNaN(b)) {
return true;
}
+ if (a.constructor === RegExp || b.constructor === RegExp) {
+ return (a.constructor === b.constructor) && (a.toString === b.toString);
+ }
if ((typeof a) !== 'object' || (typeof b) !== 'object' ||
(a === null) || (b === null))
return false;
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 0b069ccb..8eb59b7e 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -34,8 +34,18 @@ bugs: FAIL
# too long to run in debug mode on ARM.
fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm)
+# Issue 494: new snapshot code breaks mjsunit/apply on mac debug snapshot.
+apply: PASS, FAIL if ($system == macos && $mode == debug)
+
big-object-literal: PASS, SKIP if ($arch == arm)
+# Issue 488: this test sometimes times out.
+array-constructor: PASS || TIMEOUT
+
+# Very slow on ARM, contains no architecture dependent code.
+unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm)
+
+
[ $arch == arm ]
# Slow tests which times out in debug mode.
@@ -46,9 +56,9 @@ array-constructor: PASS, SKIP if $mode == debug
# Flaky test that can hit compilation-time stack overflow in debug mode.
unicode-test: PASS, (PASS || FAIL) if $mode == debug
-# Bug number 130 http://code.google.com/p/v8/issues/detail?id=130
-# Fails on real ARM hardware but not on the simulator.
-string-compare-alignment: PASS || FAIL
-
# Times out often in release mode on ARM.
array-splice: PASS || TIMEOUT
+
+# Skip long running test in debug mode on ARM.
+string-indexof-2: PASS, SKIP if $mode == debug
+
diff --git a/test/mjsunit/parse-int-float.js b/test/mjsunit/parse-int-float.js
index ad2275e6..b9620ff6 100644
--- a/test/mjsunit/parse-int-float.js
+++ b/test/mjsunit/parse-int-float.js
@@ -36,9 +36,12 @@ assertEquals(-63, parseInt(' -077'));
assertEquals(3, parseInt('11', 2));
assertEquals(4, parseInt('11', 3));
+assertEquals(4, parseInt('11', 3.8));
assertEquals(0x12, parseInt('0x12'));
assertEquals(0x12, parseInt('0x12', 16));
+assertEquals(0x12, parseInt('0x12', 16.1));
+assertEquals(0x12, parseInt('0x12', NaN));
assertEquals(12, parseInt('12aaa'));
diff --git a/test/mjsunit/regress/regress-124.js b/test/mjsunit/regress/regress-124.js
index 81526b0e..0b3aae53 100644
--- a/test/mjsunit/regress/regress-124.js
+++ b/test/mjsunit/regress/regress-124.js
@@ -48,9 +48,9 @@ function F(f) {
assertEquals("[object global]", eval("f()"));
// Receiver should be the arguments object here.
- assertEquals("[object Object]", eval("arguments[0]()"));
+ assertEquals("[object Arguments]", eval("arguments[0]()"));
with (arguments) {
- assertEquals("[object Object]", toString());
+ assertEquals("[object Arguments]", toString());
}
}
diff --git a/test/mjsunit/regress/regress-2249423.js b/test/mjsunit/regress/regress-2249423.js
new file mode 100644
index 00000000..a590f33f
--- /dev/null
+++ b/test/mjsunit/regress/regress-2249423.js
@@ -0,0 +1,40 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See http://code.google.com/p/chromium/issues/detail?id=27227
+// Regression test for stack corruption issue.
+
+function top() {
+ function g(a, b) {}
+ function t() {
+ for (var i=0; i<1; ++i) {
+ g(32768, g());
+ }
+ }
+ t();
+}
+top();
diff --git a/test/mjsunit/regress/regress-485.js b/test/mjsunit/regress/regress-485.js
new file mode 100755
index 00000000..62c6fb95
--- /dev/null
+++ b/test/mjsunit/regress/regress-485.js
@@ -0,0 +1,64 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See: http://code.google.com/p/v8/issues/detail?id=485
+
+// Ensure that we don't expose the builtins object when calling
+// builtin functions that use or return "this".
+
+var global = this;
+var global2 = (function(){return this;})();
+assertEquals(global, global2, "direct call to local function returns global");
+
+var builtin = Object.prototype.valueOf; // Builtin function that returns this.
+
+assertEquals(global, builtin(), "Direct call to builtin");
+
+assertEquals(global, builtin.call(), "call() to builtin");
+assertEquals(global, builtin.call(null), "call(null) to builtin");
+assertEquals(global, builtin.call(undefined), "call(undefined) to builtin");
+
+assertEquals(global, builtin.apply(), "apply() to builtin");
+assertEquals(global, builtin.apply(null), "apply(null) to builtin");
+assertEquals(global, builtin.apply(undefined), "apply(undefined) to builtin");
+
+assertEquals(global, builtin.call.call(builtin), "call.call() to builtin");
+assertEquals(global, builtin.call.apply(builtin), "call.apply() to builtin");
+assertEquals(global, builtin.apply.call(builtin), "apply.call() to builtin");
+assertEquals(global, builtin.apply.apply(builtin), "apply.apply() to builtin");
+
+
+// Builtin that depends on value of this to compute result.
+var builtin2 = Object.prototype.toString;
+
+// Global object has class "Object" according to Object.prototype.toString.
+// Builtins object displays as "[object builtins]".
+assertTrue("[object builtins]" != builtin2(), "Direct call to toString");
+assertTrue("[object builtins]" != builtin2.call(), "call() to toString");
+assertTrue("[object builtins]" != builtin2.apply(), "call() to toString");
+assertTrue("[object builtins]" != builtin2.call.call(builtin2),
+ "call.call() to toString");
diff --git a/test/mjsunit/regress/regress-486.js b/test/mjsunit/regress/regress-486.js
new file mode 100644
index 00000000..c1e29a63
--- /dev/null
+++ b/test/mjsunit/regress/regress-486.js
@@ -0,0 +1,30 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var st = "\u0422\u0435\u0441\u0442"; // Test in Cyrillic characters.
+var cyrillicMatch = /^[\u0430-\u044fa-z]+$/i.test(st); // a-ja a-z.
+assertTrue(cyrillicMatch);
diff --git a/test/mjsunit/regress/regress-490.js b/test/mjsunit/regress/regress-490.js
new file mode 100644
index 00000000..8dd89591
--- /dev/null
+++ b/test/mjsunit/regress/regress-490.js
@@ -0,0 +1,48 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See: http://code.google.com/p/v8/issues/detail?id=490
+
+var kXXX = 11
+// Build a string longer than 2^11. See StringBuilderConcatHelper and
+// Runtime_StringBuilderConcat in runtime.cc and
+// ReplaceResultBuilder.prototype.addSpecialSlice in string.js.
+var a = '';
+while (a.length < (2 << 11)) { a+= 'x'; }
+
+// Test specific for bug introduced in r3153.
+a.replace(/^(.*)/, '$1$1$1');
+
+// More generalized test.
+for (var i = 0; i < 10; i++) {
+ var b = '';
+ for (var j = 0; j < 10; j++) {
+ b += '$1';
+ a.replace(/^(.*)/, b);
+ }
+ a += a;
+}
diff --git a/test/mjsunit/regress/regress-491.js b/test/mjsunit/regress/regress-491.js
new file mode 100644
index 00000000..2cf5e20e
--- /dev/null
+++ b/test/mjsunit/regress/regress-491.js
@@ -0,0 +1,47 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See: http://code.google.com/p/v8/issues/detail?id=491
+// This should not hit any asserts in debug mode on ARM.
+
+function function_with_n_strings(n) {
+ var source = '(function f(){';
+ for (var i = 0; i < n; i++) {
+ if (i != 0) source += ';';
+ source += '"x"';
+ }
+ source += '})()';
+ eval(source);
+}
+
+var i;
+for (i = 500; i < 600; i++) {
+ function_with_n_strings(i);
+}
+for (i = 1100; i < 1200; i++) {
+ function_with_n_strings(i);
+}
diff --git a/test/mjsunit/regress/regress-492.js b/test/mjsunit/regress/regress-492.js
new file mode 100644
index 00000000..a8b783b3
--- /dev/null
+++ b/test/mjsunit/regress/regress-492.js
@@ -0,0 +1,52 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See: http://code.google.com/p/v8/issues/detail?id=492
+// This should not hit any asserts in debug mode on ARM.
+
+function function_with_n_args(n) {
+ var source = '(function f(';
+ for (var arg = 0; arg < n; arg++) {
+ if (arg != 0) source += ',';
+ source += 'arg' + arg;
+ }
+ source += ') { })()';
+ eval(source);
+}
+
+var args;
+for (args = 250; args < 270; args++) {
+ function_with_n_args(args);
+}
+
+for (args = 500; args < 520; args++) {
+ function_with_n_args(args);
+}
+
+for (args = 1019; args < 1041; args++) {
+ function_with_n_args(args);
+}
diff --git a/test/mjsunit/regress/regress-496.js b/test/mjsunit/regress/regress-496.js
new file mode 100644
index 00000000..33c1a677
--- /dev/null
+++ b/test/mjsunit/regress/regress-496.js
@@ -0,0 +1,39 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for http://code.google.com/p/v8/issues/detail?id=496.
+//
+// Tests that we do not treat the unaliased eval call in g as an
+// aliased call to eval.
+
+function h() {
+ function f() { return eval; }
+ function g() { var x = 44; return eval("x"); }
+ assertEquals(44, g());
+}
+
+h();
diff --git a/test/mjsunit/regress/regress-502.js b/test/mjsunit/regress/regress-502.js
new file mode 100644
index 00000000..d3c9381d
--- /dev/null
+++ b/test/mjsunit/regress/regress-502.js
@@ -0,0 +1,38 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for http://code.google.com/p/v8/issues/detail?id=502.
+//
+// Test that we do not generate an inlined version of the constructor
+// function C.
+
+var X = 'x';
+function C() { this[X] = 42; }
+var a = new C();
+var b = new C();
+assertEquals(42, a.x);
+assertEquals(42, b.x);
diff --git a/test/mjsunit/regress/regress-503.js b/test/mjsunit/regress/regress-503.js
new file mode 100644
index 00000000..5b156b27
--- /dev/null
+++ b/test/mjsunit/regress/regress-503.js
@@ -0,0 +1,63 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertTrue(undefined == undefined, 1);
+assertFalse(undefined <= undefined, 2);
+assertFalse(undefined >= undefined, 3);
+assertFalse(undefined < undefined, 4);
+assertFalse(undefined > undefined, 5);
+
+assertTrue(null == null, 6);
+assertTrue(null <= null, 7);
+assertTrue(null >= null, 8);
+assertFalse(null < null, 9);
+assertFalse(null > null, 10);
+
+assertTrue(void 0 == void 0, 11);
+assertFalse(void 0 <= void 0, 12);
+assertFalse(void 0 >= void 0, 13);
+assertFalse(void 0 < void 0, 14);
+assertFalse(void 0 > void 0, 15);
+
+var x = void 0;
+
+assertTrue(x == x, 16);
+assertFalse(x <= x, 17);
+assertFalse(x >= x, 18);
+assertFalse(x < x, 19);
+assertFalse(x > x, 20);
+
+var not_undefined = [null, 0, 1, 1/0, -1/0, "", true, false];
+for (var i = 0; i < not_undefined.length; i++) {
+ x = not_undefined[i];
+
+ assertTrue(x == x, "" + 21 + x);
+ assertTrue(x <= x, "" + 22 + x);
+ assertTrue(x >= x, "" + 23 + x);
+ assertFalse(x < x, "" + 24 + x);
+ assertFalse(x > x, "" + 25 + x);
+}
diff --git a/test/mjsunit/regress/regress-515.js b/test/mjsunit/regress/regress-515.js
new file mode 100644
index 00000000..7675fe19
--- /dev/null
+++ b/test/mjsunit/regress/regress-515.js
@@ -0,0 +1,40 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for http://code.google.com/p/v8/issues/detail?id=515.
+//
+// The test passes if it does not crash.
+
+var length = 2048;
+var s = "";
+for (var i = 0; i < 2048; i++) {
+ s += '.';
+}
+
+var string = s + 'x' + s + 'x' + s;
+
+string.replace(/x/g, "")
diff --git a/test/mjsunit/regress/regress-526.js b/test/mjsunit/regress/regress-526.js
new file mode 100644
index 00000000..0cae97aa
--- /dev/null
+++ b/test/mjsunit/regress/regress-526.js
@@ -0,0 +1,32 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test object literals with computed property and getter.
+
+var o = { foo: function() { }, get bar() { return {x:42} } };
+
+assertEquals(42, o.bar.x);
diff --git a/test/mjsunit/regress/regress-540.js b/test/mjsunit/regress/regress-540.js
new file mode 100644
index 00000000..c40fa2cb
--- /dev/null
+++ b/test/mjsunit/regress/regress-540.js
@@ -0,0 +1,47 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test context slot declarations in the arguments object.
+// See http://code.google.com/p/v8/issues/detail?id=540
+
+function f(x, y) { eval(x); return y(); }
+var result = f("function y() { return 1; }", function () { return 0; })
+assertEquals(1, result);
+
+result =
+ (function (x) {
+ function x() { return 3; }
+ return x();
+ })(function () { return 2; });
+assertEquals(3, result);
+
+result =
+ (function (x) {
+ function x() { return 5; }
+ return arguments[0]();
+ })(function () { return 4; });
+assertEquals(5, result);
diff --git a/test/mjsunit/regress/regress-r3391.js b/test/mjsunit/regress/regress-r3391.js
new file mode 100644
index 00000000..d5572843
--- /dev/null
+++ b/test/mjsunit/regress/regress-r3391.js
@@ -0,0 +1,77 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check what we do if toLocaleString doesn't return a string when we are
+// calling Array.prototype.toLocaleString. The standard is somewhat
+// vague on this point. This test is now passed by both V8 and JSC.
+
+var evil_called = 0;
+var evil_locale_called = 0;
+var exception_thrown = 0;
+
+function evil_to_string() {
+ evil_called++;
+ return this;
+}
+
+function evil_to_locale_string() {
+ evil_locale_called++;
+ return this;
+}
+
+var o = {toString: evil_to_string, toLocaleString: evil_to_locale_string};
+
+try {
+ [o].toLocaleString();
+} catch(e) {
+ exception_thrown++;
+}
+
+assertEquals(1, evil_called, "evil1");
+assertEquals(1, evil_locale_called, "local1");
+assertEquals(1, exception_thrown, "exception1");
+
+try {
+ [o].toString();
+} catch(e) {
+ exception_thrown++;
+}
+
+assertEquals(2, evil_called, "evil2");
+assertEquals(1, evil_locale_called, "local2");
+assertEquals(2, exception_thrown, "exception2");
+
+try {
+ [o].join(o);
+} catch(e) {
+ exception_thrown++;
+}
+
+assertEquals(3, evil_called, "evil3");
+assertEquals(1, evil_locale_called, "local3");
+assertEquals(3, exception_thrown, "exception3");
+print("ok");
diff --git a/test/mjsunit/string-add.js b/test/mjsunit/string-add.js
index c42cf793..f226ca18 100644
--- a/test/mjsunit/string-add.js
+++ b/test/mjsunit/string-add.js
@@ -173,3 +173,23 @@ assertEquals(0, null + null, "uu");
assertEquals("42strz", reswz, "swwz");
assertEquals(84, resww, "swww");
})(1);
+
+// Generate ascii and non ascii strings from length 0 to 20.
+var ascii = 'aaaaaaaaaaaaaaaaaaaa';
+var non_ascii = '\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234';
+assertEquals(20, ascii.length);
+assertEquals(20, non_ascii.length);
+var a = Array(21);
+var b = Array(21);
+for (var i = 0; i <= 20; i++) {
+ a[i] = ascii.substring(0, i);
+ b[i] = non_ascii.substring(0, i);
+}
+
+// Add ascii and non-ascii strings generating strings with length from 0 to 20.
+for (var i = 0; i <= 20; i++) {
+ for (var j = 0; j < i; j++) {
+ assertEquals(a[i], a[j] + a[i - j])
+ assertEquals(b[i], b[j] + b[i - j])
+ }
+}
diff --git a/test/mjsunit/string-charcodeat.js b/test/mjsunit/string-charcodeat.js
index f66dd3ef..39275577 100644
--- a/test/mjsunit/string-charcodeat.js
+++ b/test/mjsunit/string-charcodeat.js
@@ -30,7 +30,7 @@
*/
function Cons() {
- return "Te" + "st";
+ return "Te" + "st testing 123";
}
@@ -38,22 +38,22 @@ function Deep() {
var a = "T";
a += "e";
a += "s";
- a += "t";
+ a += "ting testing 123";
return a;
}
function Slice() {
- return "testing Testing".substring(8, 12);
+ return "testing Testing testing 123456789012345".substring(8, 22);
}
function Flat() {
- return "Test";
+ return "Testing testing 123";
}
function Cons16() {
- return "Te" + "\u1234t";
+ return "Te" + "\u1234t testing 123";
}
@@ -61,18 +61,18 @@ function Deep16() {
var a = "T";
a += "e";
a += "\u1234";
- a += "t";
+ a += "ting testing 123";
return a;
}
function Slice16Beginning() {
- return "Te\u1234t test".substring(0, 4);
+ return "Te\u1234t testing testing 123".substring(0, 14);
}
function Slice16Middle() {
- return "test Te\u1234t test".substring(5, 9);
+ return "test Te\u1234t testing testing 123".substring(5, 19);
}
@@ -82,7 +82,7 @@ function Slice16End() {
function Flat16() {
- return "Te\u1234t";
+ return "Te\u1234ting testing 123";
}
@@ -108,32 +108,35 @@ function NotAString16() {
function TestStringType(generator, sixteen) {
var g = generator;
- assertTrue(isNaN(g().charCodeAt(-1e19)));
- assertTrue(isNaN(g().charCodeAt(-0x80000001)));
- assertTrue(isNaN(g().charCodeAt(-0x80000000)));
- assertTrue(isNaN(g().charCodeAt(-0x40000000)));
- assertTrue(isNaN(g().charCodeAt(-1)));
- assertTrue(isNaN(g().charCodeAt(4)));
- assertTrue(isNaN(g().charCodeAt(5)));
- assertTrue(isNaN(g().charCodeAt(0x3fffffff)));
- assertTrue(isNaN(g().charCodeAt(0x7fffffff)));
- assertTrue(isNaN(g().charCodeAt(0x80000000)));
- assertTrue(isNaN(g().charCodeAt(1e9)));
- assertEquals(84, g().charCodeAt(0));
- assertEquals(84, g().charCodeAt("test"));
- assertEquals(84, g().charCodeAt(""));
- assertEquals(84, g().charCodeAt(null));
- assertEquals(84, g().charCodeAt(undefined));
- assertEquals(84, g().charCodeAt());
- assertEquals(84, g().charCodeAt(void 0));
- assertEquals(84, g().charCodeAt(false));
- assertEquals(101, g().charCodeAt(true));
- assertEquals(101, g().charCodeAt(1));
- assertEquals(sixteen ? 0x1234 : 115, g().charCodeAt(2));
- assertEquals(116, g().charCodeAt(3));
- assertEquals(101, g().charCodeAt(1.1));
- assertEquals(sixteen ? 0x1234 : 115, g().charCodeAt(2.1718));
- assertEquals(116, g().charCodeAt(3.14159));
+ var len = g().toString().length;
+ var t = sixteen ? "t" : "f"
+ t += generator.name;
+ assertTrue(isNaN(g().charCodeAt(-1e19)), 1 + t);
+ assertTrue(isNaN(g().charCodeAt(-0x80000001)), 2 + t);
+ assertTrue(isNaN(g().charCodeAt(-0x80000000)), 3 + t);
+ assertTrue(isNaN(g().charCodeAt(-0x40000000)), 4 + t);
+ assertTrue(isNaN(g().charCodeAt(-1)), 5 + t);
+ assertTrue(isNaN(g().charCodeAt(len)), 6 + t);
+ assertTrue(isNaN(g().charCodeAt(len + 1)), 7 + t);
+ assertTrue(isNaN(g().charCodeAt(0x3fffffff)), 8 + t);
+ assertTrue(isNaN(g().charCodeAt(0x7fffffff)), 9 + t);
+ assertTrue(isNaN(g().charCodeAt(0x80000000)), 10 + t);
+ assertTrue(isNaN(g().charCodeAt(1e9)), 11 + t);
+ assertEquals(84, g().charCodeAt(0), 12 + t);
+ assertEquals(84, g().charCodeAt("test"), 13 + t);
+ assertEquals(84, g().charCodeAt(""), 14 + t);
+ assertEquals(84, g().charCodeAt(null), 15 + t);
+ assertEquals(84, g().charCodeAt(undefined), 16 + t);
+ assertEquals(84, g().charCodeAt(), 17 + t);
+ assertEquals(84, g().charCodeAt(void 0), 18 + t);
+ assertEquals(84, g().charCodeAt(false), 19 + t);
+ assertEquals(101, g().charCodeAt(true), 20 + t);
+ assertEquals(101, g().charCodeAt(1), 21 + t);
+ assertEquals(sixteen ? 0x1234 : 115, g().charCodeAt(2), 22 + t);
+ assertEquals(116, g().charCodeAt(3), 23 + t);
+ assertEquals(101, g().charCodeAt(1.1), 24 + t);
+ assertEquals(sixteen ? 0x1234 : 115, g().charCodeAt(2.1718), 25 + t);
+ assertEquals(116, g().charCodeAt(3.14159), 26 + t);
}
@@ -157,10 +160,10 @@ function StupidThing() {
this.charCodeAt = String.prototype.charCodeAt;
}
-assertEquals(52, new StupidThing().charCodeAt(0));
-assertEquals(50, new StupidThing().charCodeAt(1));
-assertTrue(isNaN(new StupidThing().charCodeAt(2)));
-assertTrue(isNaN(new StupidThing().charCodeAt(-1)));
+assertEquals(52, new StupidThing().charCodeAt(0), 27);
+assertEquals(50, new StupidThing().charCodeAt(1), 28);
+assertTrue(isNaN(new StupidThing().charCodeAt(2)), 29);
+assertTrue(isNaN(new StupidThing().charCodeAt(-1)), 30);
// Medium (>255) and long (>65535) strings.
@@ -178,12 +181,12 @@ long += long + long + long; // 4096.
long += long + long + long; // 16384.
long += long + long + long; // 65536.
-assertTrue(isNaN(medium.charCodeAt(-1)));
-assertEquals(49, medium.charCodeAt(0));
-assertEquals(56, medium.charCodeAt(255));
-assertTrue(isNaN(medium.charCodeAt(256)));
+assertTrue(isNaN(medium.charCodeAt(-1)), 31);
+assertEquals(49, medium.charCodeAt(0), 32);
+assertEquals(56, medium.charCodeAt(255), 33);
+assertTrue(isNaN(medium.charCodeAt(256)), 34);
-assertTrue(isNaN(long.charCodeAt(-1)));
-assertEquals(49, long.charCodeAt(0));
-assertEquals(56, long.charCodeAt(65535));
-assertTrue(isNaN(long.charCodeAt(65536)));
+assertTrue(isNaN(long.charCodeAt(-1)), 35);
+assertEquals(49, long.charCodeAt(0), 36);
+assertEquals(56, long.charCodeAt(65535), 37);
+assertTrue(isNaN(long.charCodeAt(65536)), 38);
diff --git a/test/mjsunit/string-indexof.js b/test/mjsunit/string-indexof-1.js
index 2018da72..c7dcdb83 100644
--- a/test/mjsunit/string-indexof.js
+++ b/test/mjsunit/string-indexof-1.js
@@ -97,46 +97,3 @@ assertEquals(1534, long.indexOf("AJABACA", 511), "Long AJABACA, Second J");
pattern = "JABACABADABACABA";
assertEquals(511, long.indexOf(pattern), "Long JABACABA..., First J");
assertEquals(1535, long.indexOf(pattern, 512), "Long JABACABA..., Second J");
-
-
-var lipsum = "lorem ipsum per se esse fugiendum. itaque aiunt hanc quasi "
- + "naturalem atque insitam in animis nostris inesse notionem, ut "
- + "alterum esse appetendum, alterum aspernandum sentiamus. Alii autem,"
- + " quibus ego assentior, cum a philosophis compluribus permulta "
- + "dicantur, cur nec voluptas in bonis sit numeranda nec in malis "
- + "dolor, non existimant oportere nimium nos causae confidere, sed et"
- + " argumentandum et accurate disserendum et rationibus conquisitis de"
- + " voluptate et dolore disputandum putant.\n"
- + "Sed ut perspiciatis, unde omnis iste natus error sit voluptatem "
- + "accusantium doloremque laudantium, totam rem aperiam eaque ipsa,"
- + "quae ab illo inventore veritatis et quasi architecto beatae vitae "
- + "dicta sunt, explicabo. nemo enim ipsam voluptatem, quia voluptas"
- + "sit, aspernatur aut odit aut fugit, sed quia consequuntur magni"
- + " dolores eos, qui ratione voluptatem sequi nesciunt, neque porro"
- + " quisquam est, qui dolorem ipsum, quia dolor sit, amet, "
- + "consectetur, adipisci velit, sed quia non numquam eius modi"
- + " tempora incidunt, ut labore et dolore magnam aliquam quaerat "
- + "voluptatem. ut enim ad minima veniam, quis nostrum exercitationem "
- + "ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi "
- + "consequatur? quis autem vel eum iure reprehenderit, qui in ea "
- + "voluptate velit esse, quam nihil molestiae consequatur, vel illum, "
- + "qui dolorem eum fugiat, quo voluptas nulla pariatur?\n";
-
-assertEquals(893, lipsum.indexOf("lorem ipsum, quia dolor sit, amet"),
- "Lipsum");
-// test a lot of substrings of differing length and start-position.
-for(var i = 0; i < lipsum.length; i += 3) {
- for(var len = 1; i + len < lipsum.length; len += 7) {
- var substring = lipsum.substring(i, i + len);
- var index = -1;
- do {
- index = lipsum.indexOf(substring, index + 1);
- assertTrue(index != -1,
- "Lipsum substring " + i + ".." + (i + len-1) + " not found");
- assertEquals(lipsum.substring(index, index + len), substring,
- "Wrong lipsum substring found: " + i + ".." + (i + len - 1) + "/" +
- index + ".." + (index + len - 1));
- } while (index >= 0 && index < i);
- assertEquals(i, index, "Lipsum match at " + i + ".." + (i + len - 1));
- }
-}
diff --git a/test/mjsunit/string-indexof-2.js b/test/mjsunit/string-indexof-2.js
new file mode 100644
index 00000000..a7c3f600
--- /dev/null
+++ b/test/mjsunit/string-indexof-2.js
@@ -0,0 +1,68 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var lipsum = "lorem ipsum per se esse fugiendum. itaque aiunt hanc quasi "
+ + "naturalem atque insitam in animis nostris inesse notionem, ut "
+ + "alterum esse appetendum, alterum aspernandum sentiamus. Alii autem,"
+ + " quibus ego assentior, cum a philosophis compluribus permulta "
+ + "dicantur, cur nec voluptas in bonis sit numeranda nec in malis "
+ + "dolor, non existimant oportere nimium nos causae confidere, sed et"
+ + " argumentandum et accurate disserendum et rationibus conquisitis de"
+ + " voluptate et dolore disputandum putant.\n"
+ + "Sed ut perspiciatis, unde omnis iste natus error sit voluptatem "
+ + "accusantium doloremque laudantium, totam rem aperiam eaque ipsa,"
+ + "quae ab illo inventore veritatis et quasi architecto beatae vitae "
+ + "dicta sunt, explicabo. nemo enim ipsam voluptatem, quia voluptas"
+ + "sit, aspernatur aut odit aut fugit, sed quia consequuntur magni"
+ + " dolores eos, qui ratione voluptatem sequi nesciunt, neque porro"
+ + " quisquam est, qui dolorem ipsum, quia dolor sit, amet, "
+ + "consectetur, adipisci velit, sed quia non numquam eius modi"
+ + " tempora incidunt, ut labore et dolore magnam aliquam quaerat "
+ + "voluptatem. ut enim ad minima veniam, quis nostrum exercitationem "
+ + "ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi "
+ + "consequatur? quis autem vel eum iure reprehenderit, qui in ea "
+ + "voluptate velit esse, quam nihil molestiae consequatur, vel illum, "
+ + "qui dolorem eum fugiat, quo voluptas nulla pariatur?\n";
+
+assertEquals(893, lipsum.indexOf("lorem ipsum, quia dolor sit, amet"),
+ "Lipsum");
+// test a lot of substrings of differing length and start-position.
+for(var i = 0; i < lipsum.length; i += 3) {
+ for(var len = 1; i + len < lipsum.length; len += 7) {
+ var substring = lipsum.substring(i, i + len);
+ var index = -1;
+ do {
+ index = lipsum.indexOf(substring, index + 1);
+ assertTrue(index != -1,
+ "Lipsum substring " + i + ".." + (i + len-1) + " not found");
+ assertEquals(lipsum.substring(index, index + len), substring,
+ "Wrong lipsum substring found: " + i + ".." + (i + len - 1) + "/" +
+ index + ".." + (index + len - 1));
+ } while (index >= 0 && index < i);
+ assertEquals(i, index, "Lipsum match at " + i + ".." + (i + len - 1));
+ }
+}
diff --git a/test/mjsunit/typeof.js b/test/mjsunit/typeof.js
new file mode 100644
index 00000000..b460fbba
--- /dev/null
+++ b/test/mjsunit/typeof.js
@@ -0,0 +1,40 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --nofast-compiler
+
+// The type of a regular expression should be 'function', including in
+// the context of string equality comparisons.
+
+var r = new RegExp;
+assertEquals('function', typeof r);
+assertTrue(typeof r == 'function');
+
+function test(x, y) { return x == y; }
+assertFalse(test('object', typeof r));
+
+assertFalse(typeof r == 'object');
diff --git a/test/mjsunit/unicode-case-overoptimization.js b/test/mjsunit/unicode-case-overoptimization.js
new file mode 100644
index 00000000..bfda48c7
--- /dev/null
+++ b/test/mjsunit/unicode-case-overoptimization.js
@@ -0,0 +1,35 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test all non-ASCII characters individually to ensure that our optimizations
+// didn't break anything.
+for (var i = 0x80; i <= 0xfffe; i++) {
+ var c = String.fromCharCode(i);
+ var c2 = String.fromCharCode(i + 1);
+ var re = new RegExp("[" + c + "-" + c2 + "]", "i");
+ assertTrue(re.test(c), i);
+}
diff --git a/test/mozilla/testcfg.py b/test/mozilla/testcfg.py
index 477b2b2f..d1c1767a 100644
--- a/test/mozilla/testcfg.py
+++ b/test/mozilla/testcfg.py
@@ -103,6 +103,7 @@ class MozillaTestConfiguration(test.TestConfiguration):
for excluded in EXCLUDED:
if excluded in dirs:
dirs.remove(excluded)
+ dirs.sort()
root_path = root[len(self.root):].split(os.path.sep)
root_path = current_path + [x for x in root_path if x]
framework = []
@@ -113,6 +114,7 @@ class MozillaTestConfiguration(test.TestConfiguration):
if exists(script):
framework.append(script)
framework.reverse()
+ files.sort()
for file in files:
if (not file in FRAMEWORK) and file.endswith('.js'):
full_path = root_path + [file[:-3]]
diff --git a/test/sputnik/README b/test/sputnik/README
new file mode 100644
index 00000000..3d39a67e
--- /dev/null
+++ b/test/sputnik/README
@@ -0,0 +1,6 @@
+To run the sputniktests you must check out the test suite from
+googlecode.com. The test expectations are currently relative to
+version 28. To get the tests run the following command within
+v8/tests/sputnik/
+
+ svn co http://sputniktests.googlecode.com/svn/trunk/ -r28 sputniktests
diff --git a/test/sputnik/sputnik.status b/test/sputnik/sputnik.status
new file mode 100644
index 00000000..16a44c51
--- /dev/null
+++ b/test/sputnik/sputnik.status
@@ -0,0 +1,318 @@
+# Copyright 2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+prefix sputnik
+def FAIL_OK = FAIL, OKAY
+
+##################### DELIBERATE INCOMPATIBILITIES #####################
+
+# 900066: Deleting elements in .arguments should disconnect the
+# element from the actual arguments. Implementing this is nontrivial
+# and we have no indication that anything on the web depends on this
+# feature.
+S13_A13_T1: FAIL_OK
+S13_A13_T2: FAIL_OK
+S13_A13_T3: FAIL_OK
+
+# This tests precision of trignometric functions. We're slightly off
+# from the implementation in libc (~ 1e-17) but it's not clear if we
+# or they are closer to the right answer, or if it even matters.
+S15.8.2.16_A7: PASS || FAIL_OK
+S15.8.2.18_A7: PASS || FAIL_OK
+S15.8.2.13_A23: PASS || FAIL_OK
+
+# We allow calls to regexp exec() with no arguments to fail for
+# compatibility reasons.
+S15.10.6.2_A1_T16: FAIL_OK
+S15.10.6.3_A1_T16: FAIL_OK
+
+# We allow regexps to be called as functions for compatibility reasons.
+S15.10.7_A1_T1: FAIL_OK
+S15.10.7_A1_T2: FAIL_OK
+
+# We allow construct calls to built-in functions, and we allow built-in
+# functions to have prototypes.
+S15.1.2.1_A4.6: FAIL_OK
+S15.1.2.1_A4.7: FAIL_OK
+S15.1.2.2_A9.6: FAIL_OK
+S15.1.2.2_A9.7: FAIL_OK
+S15.1.2.3_A7.6: FAIL_OK
+S15.1.2.3_A7.7: FAIL_OK
+S15.1.2.4_A2.6: FAIL_OK
+S15.1.2.4_A2.7: FAIL_OK
+S15.1.2.5_A2.6: FAIL_OK
+S15.1.2.5_A2.7: FAIL_OK
+S15.1.3.1_A5.6: FAIL_OK
+S15.1.3.1_A5.7: FAIL_OK
+S15.1.3.2_A5.6: FAIL_OK
+S15.1.3.2_A5.7: FAIL_OK
+S15.1.3.3_A5.6: FAIL_OK
+S15.1.3.3_A5.7: FAIL_OK
+S15.1.3.4_A5.6: FAIL_OK
+S15.1.3.4_A5.7: FAIL_OK
+S15.10.6.2_A6: FAIL_OK
+S15.10.6.3_A6: FAIL_OK
+S15.10.6.4_A6: FAIL_OK
+S15.10.6.4_A7: FAIL_OK
+S15.2.4.2_A6: FAIL_OK
+S15.2.4.3_A6: FAIL_OK
+S15.2.4.4_A6: FAIL_OK
+S15.2.4.5_A6: FAIL_OK
+S15.2.4.6_A6: FAIL_OK
+S15.2.4.7_A6: FAIL_OK
+S15.3.4.2_A6: FAIL_OK
+S15.4.4.10_A5.6: FAIL_OK
+S15.4.4.10_A5.7: FAIL_OK
+S15.4.4.11_A7.6: FAIL_OK
+S15.4.4.11_A7.7: FAIL_OK
+S15.4.4.12_A5.6: FAIL_OK
+S15.4.4.12_A5.7: FAIL_OK
+S15.4.4.13_A5.6: FAIL_OK
+S15.4.4.13_A5.7: FAIL_OK
+S15.4.4.2_A4.6: FAIL_OK
+S15.4.4.3_A4.6: FAIL_OK
+S15.4.4.3_A4.6: FAIL_OK
+S15.4.4.4_A4.6: FAIL_OK
+S15.4.4.4_A4.7: FAIL_OK
+S15.4.4.5_A6.6: FAIL_OK
+S15.4.4.5_A6.7: FAIL_OK
+S15.4.4.6_A5.6: FAIL_OK
+S15.4.4.6_A5.7: FAIL_OK
+S15.4.4.7_A6.6: FAIL_OK
+S15.4.4.7_A6.7: FAIL_OK
+S15.4.4.8_A5.6: FAIL_OK
+S15.4.4.8_A5.7: FAIL_OK
+S15.4.4.9_A5.6: FAIL_OK
+S15.4.4.9_A5.7: FAIL_OK
+S15.5.4.10_A6: FAIL_OK
+S15.5.4.11_A6: FAIL_OK
+S15.5.4.12_A6: FAIL_OK
+S15.5.4.13_A6: FAIL_OK
+S15.5.4.14_A6: FAIL_OK
+S15.5.4.15_A6: FAIL_OK
+S15.5.4.16_A6: FAIL_OK
+S15.5.4.17_A6: FAIL_OK
+S15.5.4.18_A6: FAIL_OK
+S15.5.4.19_A6: FAIL_OK
+S15.5.4.4_A6: FAIL_OK
+S15.5.4.5_A6: FAIL_OK
+S15.5.4.6_A6: FAIL_OK
+S15.5.4.7_A6: FAIL_OK
+S15.5.4.9_A6: FAIL_OK
+S15.3.4.3_A12: FAIL_OK
+S15.3.4.4_A12: FAIL_OK
+S15.5.4.8_A6: FAIL_OK
+
+# We are silent in some regexp cases where the spec wants us to give
+# errors, for compatibility.
+S15.10.2.11_A1_T2: FAIL
+S15.10.2.11_A1_T3: FAIL
+S15.10.4.1_A5_T1: FAIL
+S15.10.4.1_A5_T2: FAIL
+S15.10.4.1_A5_T3: FAIL
+S15.10.4.1_A5_T4: FAIL
+S15.10.4.1_A5_T5: FAIL
+S15.10.4.1_A5_T6: FAIL
+S15.10.4.1_A5_T7: FAIL
+S15.10.4.1_A5_T8: FAIL
+S15.10.4.1_A5_T9: FAIL
+
+# We are more lenient in which string character escapes we allow than
+# the spec (7.8.4 p. 19) wants us to be. This is for compatibility.
+S7.8.4_A4.3_T2: FAIL_OK
+S7.8.4_A4.3_T2: FAIL_OK
+S7.8.4_A6.2_T2: FAIL_OK
+S7.8.4_A6.1_T4: FAIL_OK
+S7.8.4_A4.3_T4: FAIL_OK
+S7.8.4_A7.2_T2: FAIL_OK
+S7.8.4_A7.1_T4: FAIL_OK
+S7.8.4_A6.4_T2: FAIL_OK
+S7.8.4_A7.4_T2: FAIL_OK
+S7.8.4_A7.2_T4: FAIL_OK
+S7.8.4_A4.3_T6: FAIL_OK
+S7.8.4_A7.2_T6: FAIL_OK
+S7.8.4_A4.3_T1: FAIL_OK
+S7.8.4_A6.2_T1: FAIL_OK
+S7.8.4_A4.3_T3: FAIL_OK
+S7.8.4_A7.2_T1: FAIL_OK
+S7.8.4_A6.4_T1: FAIL_OK
+S7.8.4_A7.2_T3: FAIL_OK
+S7.8.4_A7.4_T1: FAIL_OK
+S7.8.4_A4.3_T5: FAIL_OK
+S7.8.4_A7.2_T5: FAIL_OK
+S7.8.4_A4.3_T1: FAIL_OK
+S7.8.4_A6.2_T1: FAIL_OK
+S7.8.4_A4.3_T3: FAIL_OK
+S7.8.4_A7.2_T1: FAIL_OK
+S7.8.4_A6.4_T1: FAIL_OK
+S7.8.4_A7.2_T3: FAIL_OK
+S7.8.4_A7.4_T1: FAIL_OK
+S7.8.4_A4.3_T5: FAIL_OK
+S7.8.4_A7.2_T5: FAIL_OK
+
+# We allow some keywords to be used as identifiers
+S7.5.3_A1.17: FAIL_OK
+S7.5.3_A1.26: FAIL_OK
+S7.5.3_A1.18: FAIL_OK
+S7.5.3_A1.27: FAIL_OK
+S7.5.3_A1.28: FAIL_OK
+S7.5.3_A1.19: FAIL_OK
+S7.5.3_A1.29: FAIL_OK
+S7.5.3_A1.1: FAIL_OK
+S7.5.3_A1.2: FAIL_OK
+S7.5.3_A1.3: FAIL_OK
+S7.5.3_A1.4: FAIL_OK
+S7.5.3_A1.5: FAIL_OK
+S7.5.3_A1.8: FAIL_OK
+S7.5.3_A1.9: FAIL_OK
+S7.5.3_A1.10: FAIL_OK
+S7.5.3_A1.11: FAIL_OK
+S7.5.3_A1.21: FAIL_OK
+S7.5.3_A1.12: FAIL_OK
+S7.5.3_A1.30: FAIL_OK
+S7.5.3_A1.31: FAIL_OK
+S7.5.3_A1.13: FAIL_OK
+S7.5.3_A1.22: FAIL_OK
+S7.5.3_A1.23: FAIL_OK
+S7.5.3_A1.14: FAIL_OK
+S7.5.3_A1.15: FAIL_OK
+S7.5.3_A1.24: FAIL_OK
+S7.5.3_A1.25: FAIL_OK
+S7.5.3_A1.16: FAIL_OK
+
+# This checks for non-262 behavior
+S12.6.4_A14_T1: PASS || FAIL_OK
+S12.6.4_R1: PASS || FAIL_OK
+S12.6.4_R2: PASS || FAIL_OK
+S8.4_D2.1: PASS || FAIL_OK
+S8.4_D2.2: PASS || FAIL_OK
+S8.4_D2.3: PASS || FAIL_OK
+S8.4_D2.4: PASS || FAIL_OK
+S8.4_D2.5: PASS || FAIL_OK
+S8.4_D2.6: PASS || FAIL_OK
+S8.4_D2.7: PASS || FAIL_OK
+S8.4_D1.1: PASS || FAIL_OK
+S13.2_D1.2: PASS || FAIL_OK
+S11.4.3_D1.2: PASS || FAIL_OK
+S7.6_D1: PASS || FAIL_OK
+S7.6_D2: PASS || FAIL_OK
+S15.1.2.2_D1.2: PASS || FAIL_OK
+S13_D1_T1: PASS || FAIL_OK
+S14_D4_T3: PASS || FAIL_OK
+S14_D7: PASS || FAIL_OK
+S15.5.4.11_D1.1_T2: PASS || FAIL_OK
+S15.5.4.11_D1.1_T4: PASS || FAIL_OK
+S15.5.2_D2: PASS || FAIL_OK
+S15.5.4.11_D1.1_T1: PASS || FAIL_OK
+S15.5.4.11_D1.1_T3: PASS || FAIL_OK
+S12.6.4_D1: PASS || FAIL_OK
+
+# We deliberately don't throw type errors when iterating through the
+# undefined object
+S9.9_A1: FAIL_OK
+S9.9_A2: FAIL_OK
+
+# We allow function declarations within statements
+S12.5_A9_T1: FAIL_OK
+S12.5_A9_T2: FAIL_OK
+# S12.6.2_A13_T3: FAIL_OK
+# S12.5_A9_T3: FAIL_OK
+# S12.6.1_A13_T3: FAIL_OK
+S12.1_A1: FAIL_OK
+S12.6.2_A13_T1: FAIL_OK
+S12.6.2_A13_T2: FAIL_OK
+S12.6.1_A13_T1: FAIL_OK
+S12.6.1_A13_T2: FAIL_OK
+S12.6.4_A13_T1: FAIL_OK
+S12.6.4_A13_T2: FAIL_OK
+#S12.6.4_A13_T3: FAIL_OK
+S15.3.4.2_A1_T1: FAIL_OK
+
+# Linux and Mac defaults to extended 80 bit floating point format in the FPU.
+# We follow the other major JS engines by keeping this default.
+S8.5_A2.2: PASS, FAIL if $system == linux, FAIL if $system == macos
+S8.5_A2.1: PASS, FAIL if $system == linux, FAIL if $system == macos
+
+##################### SKIPPED TESTS #####################
+
+# These tests take a looong time to run in debug mode.
+S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug
+S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug
+
+
+# These tests fail because we had to add bugs to be compatible with JSC. See
+# http://code.google.com/p/chromium/issues/detail?id=1717
+S15.4.4_A1.1_T2: FAIL_OK
+S15.5.4.1_A1_T2: FAIL_OK
+S15.5.4_A1: FAIL_OK
+S15.5.4_A3: FAIL_OK
+S15.9.5.10_A1_T2: FAIL_OK
+S15.9.5.11_A1_T2: FAIL_OK
+S15.9.5.12_A1_T2: FAIL_OK
+S15.9.5.13_A1_T2: FAIL_OK
+S15.9.5.14_A1_T2: FAIL_OK
+S15.9.5.15_A1_T2: FAIL_OK
+S15.9.5.16_A1_T2: FAIL_OK
+S15.9.5.17_A1_T2: FAIL_OK
+S15.9.5.18_A1_T2: FAIL_OK
+S15.9.5.19_A1_T2: FAIL_OK
+S15.9.5.20_A1_T2: FAIL_OK
+S15.9.5.21_A1_T2: FAIL_OK
+S15.9.5.22_A1_T2: FAIL_OK
+S15.9.5.23_A1_T2: FAIL_OK
+S15.9.5.24_A1_T2: FAIL_OK
+S15.9.5.25_A1_T2: FAIL_OK
+S15.9.5.26_A1_T2: FAIL_OK
+S15.9.5.27_A1_T2: FAIL_OK
+S15.9.5.28_A1_T2: FAIL_OK
+S15.9.5.29_A1_T2: FAIL_OK
+S15.9.5.2_A1_T2: FAIL_OK
+S15.9.5.30_A1_T2: FAIL_OK
+S15.9.5.31_A1_T2: FAIL_OK
+S15.9.5.32_A1_T2: FAIL_OK
+S15.9.5.33_A1_T2: FAIL_OK
+S15.9.5.34_A1_T2: FAIL_OK
+S15.9.5.35_A1_T2: FAIL_OK
+S15.9.5.36_A1_T2: FAIL_OK
+S15.9.5.37_A1_T2: FAIL_OK
+S15.9.5.38_A1_T2: FAIL_OK
+S15.9.5.39_A1_T2: FAIL_OK
+S15.9.5.3_A1_T2: FAIL_OK
+S15.9.5.40_A1_T2: FAIL_OK
+S15.9.5.41_A1_T2: FAIL_OK
+S15.9.5.42_A1_T2: FAIL_OK
+S15.9.5.4_A1_T2: FAIL_OK
+S15.9.5.5_A1_T2: FAIL_OK
+S15.9.5.6_A1_T2: FAIL_OK
+S15.9.5.7_A1_T2: FAIL_OK
+S15.9.5.8_A1_T2: FAIL_OK
+S15.9.5.9_A1_T2: FAIL_OK
+
+# Regexps have type "function", not "object".
+S11.4.3_A3.6: FAIL_OK
+S15.10.7_A3_T2: FAIL_OK
+S15.10.7_A3_T1: FAIL_OK
diff --git a/test/sputnik/testcfg.py b/test/sputnik/testcfg.py
new file mode 100644
index 00000000..65923822
--- /dev/null
+++ b/test/sputnik/testcfg.py
@@ -0,0 +1,112 @@
+# Copyright 2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import os
+from os.path import join, exists
+import sys
+import test
+import time
+
+
+class SputnikTestCase(test.TestCase):
+
+ def __init__(self, case, path, context, mode):
+ super(SputnikTestCase, self).__init__(context, path)
+ self.case = case
+ self.mode = mode
+ self.tmpfile = None
+ self.source = None
+
+ def IsNegative(self):
+ return '@negative' in self.GetSource()
+
+ def IsFailureOutput(self, output):
+ if output.exit_code != 0:
+ return True
+ out = output.stdout
+ return "SputnikError" in out
+
+ def BeforeRun(self):
+ self.tmpfile = sputnik.TempFile(suffix='.js', prefix='sputnik-', text=True)
+ self.tmpfile.Write(self.GetSource())
+ self.tmpfile.Close()
+
+ def AfterRun(self):
+ self.tmpfile.Dispose()
+ self.tmpfile = None
+
+ def GetCommand(self):
+ result = [self.context.GetVm(self.mode)]
+ result.append(self.tmpfile.name)
+ return result
+
+ def GetLabel(self):
+ return "%s sputnik %s" % (self.mode, self.GetName())
+
+ def GetName(self):
+ return self.path[-1]
+
+ def GetSource(self):
+ if not self.source:
+ self.source = self.case.GetSource()
+ return self.source
+
+class SputnikTestConfiguration(test.TestConfiguration):
+
+ def __init__(self, context, root):
+ super(SputnikTestConfiguration, self).__init__(context, root)
+
+ def ListTests(self, current_path, path, mode):
+ # Import the sputnik test runner script as a module
+ testroot = join(self.root, 'sputniktests')
+ modroot = join(testroot, 'tools')
+ sys.path.append(modroot)
+ import sputnik
+ globals()['sputnik'] = sputnik
+ test_suite = sputnik.TestSuite(testroot)
+ test_suite.Validate()
+ tests = test_suite.EnumerateTests([])
+ result = []
+ for test in tests:
+ full_path = current_path + [test.GetPath()[-1]]
+ if self.Contains(path, full_path):
+ case = SputnikTestCase(test, full_path, self.context, mode)
+ result.append(case)
+ return result
+
+ def GetBuildRequirements(self):
+ return ['sample', 'sample=shell']
+
+ def GetTestStatus(self, sections, defs):
+ status_file = join(self.root, 'sputnik.status')
+ if exists(status_file):
+ test.ReadConfigurationInto(status_file, sections, defs)
+
+
+def GetConfiguration(context, root):
+ return SputnikTestConfiguration(context, root)
diff --git a/tools/codemap.js b/tools/codemap.js
index 404127f2..af511f64 100644
--- a/tools/codemap.js
+++ b/tools/codemap.js
@@ -244,7 +244,7 @@ devtools.profiler.CodeMap.CodeEntry.prototype.toString = function() {
devtools.profiler.CodeMap.NameGenerator = function() {
- this.knownNames_ = [];
+ this.knownNames_ = {};
};
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 5e2bb88e..ba7224b4 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -81,6 +81,7 @@
['OS=="linux"', {
'cflags!': [
'-O2',
+ '-Os',
],
'cflags': [
'-fomit-frame-pointer',
@@ -156,8 +157,8 @@
'target_name': 'v8_snapshot',
'type': '<(library)',
'dependencies': [
- 'mksnapshot',
- 'js2c',
+ 'mksnapshot#host',
+ 'js2c#host',
'v8_base',
],
'include_dirs+': [
@@ -183,8 +184,9 @@
{
'target_name': 'v8_nosnapshot',
'type': '<(library)',
+ 'toolsets': ['host', 'target'],
'dependencies': [
- 'js2c',
+ 'js2c#host',
'v8_base',
],
'include_dirs+': [
@@ -194,10 +196,21 @@
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'../../src/snapshot-empty.cc',
],
+ 'conditions': [
+ # The ARM assembler assumes the host is 32 bits, so force building
+ # 32-bit host tools.
+ # TODO(piman): This assumes that the host is ia32 or amd64. Fixing the
+ # code would be better
+ ['target_arch=="arm" and _toolset=="host"', {
+ 'cflags': ['-m32'],
+ 'ldflags': ['-m32'],
+ }]
+ ]
},
{
'target_name': 'v8_base',
'type': '<(library)',
+ 'toolsets': ['host', 'target'],
'include_dirs+': [
'../../src',
],
@@ -293,7 +306,6 @@
'../../src/jsregexp.h',
'../../src/list-inl.h',
'../../src/list.h',
- '../../src/location.h',
'../../src/log-inl.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
@@ -394,6 +406,7 @@
'../../src/arm/codegen-arm.cc',
'../../src/arm/codegen-arm.h',
'../../src/arm/constants-arm.h',
+ '../../src/arm/constants-arm.cc',
'../../src/arm/cpu-arm.cc',
'../../src/arm/debug-arm.cc',
'../../src/arm/disasm-arm.cc',
@@ -412,6 +425,16 @@
'../../src/arm/virtual-frame-arm.cc',
'../../src/arm/virtual-frame-arm.h',
],
+ 'conditions': [
+ # The ARM assembler assumes the host is 32 bits, so force building
+ # 32-bit host tools.
+ # TODO(piman): This assumes that the host is ia32 or amd64. Fixing
+ # the code would be better
+ ['_toolset=="host"', {
+ 'cflags': ['-m32'],
+ 'ldflags': ['-m32'],
+ }]
+ ]
}],
['target_arch=="ia32"', {
'include_dirs+': [
@@ -483,6 +506,17 @@
],
}
],
+ ['OS=="openbsd"', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/local/lib -lexecinfo',
+ ]},
+ 'sources': [
+ '../../src/platform-openbsd.cc',
+ '../../src/platform-posix.cc'
+ ],
+ }
+ ],
['OS=="mac"', {
'sources': [
'../../src/platform-macos.cc',
@@ -508,6 +542,7 @@
{
'target_name': 'js2c',
'type': 'none',
+ 'toolsets': ['host'],
'variables': {
'library_files': [
'../../src/runtime.js',
@@ -550,6 +585,7 @@
{
'target_name': 'mksnapshot',
'type': 'executable',
+ 'toolsets': ['host'],
'dependencies': [
'v8_nosnapshot',
],
@@ -559,6 +595,16 @@
'sources': [
'../../src/mksnapshot.cc',
],
+ 'conditions': [
+ # The ARM assembler assumes the host is 32 bits, so force building
+ # 32-bit host tools.
+ # TODO(piman): This assumes that the host is ia32 or amd64. Fixing
+ # the code would be better
+ ['target_arch=="arm" and _toolset=="host"', {
+ 'cflags': ['-m32'],
+ 'ldflags': ['-m32'],
+ }]
+ ]
},
{
'target_name': 'v8_shell',
diff --git a/tools/js2c.py b/tools/js2c.py
index 2b7dbdfb..b889530d 100755
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -301,7 +301,7 @@ def JS2C(source, target, env):
else:
ids.append((id, len(lines)))
source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
- source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': 0 })
+ source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
# Build delay support functions
get_index_cases = [ ]
diff --git a/tools/presubmit.py b/tools/presubmit.py
index c4f78536..3f27c001 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -28,9 +28,11 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import md5
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
+import pickle
import re
import sys
import subprocess
@@ -93,6 +95,50 @@ whitespace/todo
""".split()
+class FileContentsCache(object):
+
+ def __init__(self, sums_file_name):
+ self.sums = {}
+ self.sums_file_name = sums_file_name
+
+ def Load(self):
+ try:
+ sums_file = None
+ try:
+ sums_file = open(self.sums_file_name, 'r')
+ self.sums = pickle.load(sums_file)
+ except IOError:
+ # File might not exist, this is OK.
+ pass
+ finally:
+ if sums_file:
+ sums_file.close()
+
+ def Save(self):
+ try:
+ sums_file = open(self.sums_file_name, 'w')
+ pickle.dump(self.sums, sums_file)
+ finally:
+ sums_file.close()
+
+ def FilterUnchangedFiles(self, files):
+ changed_or_new = []
+ for file in files:
+ try:
+ handle = open(file, "r")
+ file_sum = md5.new(handle.read()).digest()
+ if not file in self.sums or self.sums[file] != file_sum:
+ changed_or_new.append(file)
+ self.sums[file] = file_sum
+ finally:
+ handle.close()
+ return changed_or_new
+
+ def RemoveFile(self, file):
+ if file in self.sums:
+ self.sums.pop(file)
+
+
class SourceFileProcessor(object):
"""
Utility class that can run through a directory structure, find all relevant
@@ -108,7 +154,7 @@ class SourceFileProcessor(object):
return True
def IgnoreDir(self, name):
- return name.startswith('.') or name == 'data'
+ return name.startswith('.') or name == 'data' or name == 'sputniktests'
def IgnoreFile(self, name):
return name.startswith('.')
@@ -137,7 +183,7 @@ class CppLintProcessor(SourceFileProcessor):
or (name == 'third_party'))
IGNORE_LINT = ['flag-definitions.h']
-
+
def IgnoreFile(self, name):
return (super(CppLintProcessor, self).IgnoreFile(name)
or (name in CppLintProcessor.IGNORE_LINT))
@@ -146,13 +192,32 @@ class CppLintProcessor(SourceFileProcessor):
return ['src', 'public', 'samples', join('test', 'cctest')]
def ProcessFiles(self, files, path):
+ good_files_cache = FileContentsCache('.cpplint-cache')
+ good_files_cache.Load()
+ files = good_files_cache.FilterUnchangedFiles(files)
+ if len(files) == 0:
+ print 'No changes in files detected. Skipping cpplint check.'
+ return True
+
filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
command = ['cpplint.py', '--filter', filt] + join(files)
local_cpplint = join(path, "tools", "cpplint.py")
if exists(local_cpplint):
command = ['python', local_cpplint, '--filter', filt] + join(files)
- process = subprocess.Popen(command)
- return process.wait() == 0
+
+ process = subprocess.Popen(command, stderr=subprocess.PIPE)
+ LINT_ERROR_PATTERN = re.compile(r'^(.+)[:(]\d+[:)]')
+ while True:
+ out_line = process.stderr.readline()
+ if out_line == '' and process.poll() != None:
+ break
+ sys.stderr.write(out_line)
+ m = LINT_ERROR_PATTERN.match(out_line)
+ if m:
+ good_files_cache.RemoveFile(m.group(1))
+
+ good_files_cache.Save()
+ return process.returncode == 0
COPYRIGHT_HEADER_PATTERN = re.compile(
diff --git a/tools/process-heap-prof.py b/tools/process-heap-prof.py
index ff83952e..6a2c3978 100755
--- a/tools/process-heap-prof.py
+++ b/tools/process-heap-prof.py
@@ -40,9 +40,14 @@
# to get JS constructor profile
-import csv, sys, time
+import csv, sys, time, optparse
-def process_logfile(filename, itemname):
+def ProcessLogFile(filename, options):
+ if options.js_cons_profile:
+ itemname = 'heap-js-cons-item'
+ else:
+ itemname = 'heap-sample-item'
+
first_call_time = None
sample_time = 0.0
sampling = False
@@ -68,13 +73,48 @@ def process_logfile(filename, itemname):
print('END_SAMPLE %.2f' % sample_time)
sampling = False
elif row[0] == itemname and sampling:
- print('%s %d' % (row[1], int(row[3])))
+ print(row[1]),
+ if options.count:
+ print('%d' % (int(row[2]))),
+ if options.size:
+ print('%d' % (int(row[3]))),
+ print
finally:
logfile.close()
except:
sys.exit('can\'t open %s' % filename)
-if sys.argv[1] == '--js-cons-profile':
- process_logfile(sys.argv[2], 'heap-js-cons-item')
-else:
- process_logfile(sys.argv[1], 'heap-sample-item')
+
+def BuildOptions():
+ result = optparse.OptionParser()
+ result.add_option("--js_cons_profile", help="Constructor profile",
+ default=False, action="store_true")
+ result.add_option("--size", help="Report object size",
+ default=False, action="store_true")
+ result.add_option("--count", help="Report object count",
+ default=False, action="store_true")
+ return result
+
+
+def ProcessOptions(options):
+ if not options.size and not options.count:
+ options.size = True
+ return True
+
+
+def Main():
+ parser = BuildOptions()
+ (options, args) = parser.parse_args()
+ if not ProcessOptions(options):
+ parser.print_help()
+ sys.exit();
+
+ if not args:
+ print "Missing logfile"
+ sys.exit();
+
+ ProcessLogFile(args[0], options)
+
+
+if __name__ == '__main__':
+ sys.exit(Main())
diff --git a/tools/test.py b/tools/test.py
index 586925ae..75b4f61f 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -359,8 +359,19 @@ class TestCase(object):
self.Cleanup()
return TestOutput(self, full_command, output)
+ def BeforeRun(self):
+ pass
+
+ def AfterRun(self):
+ pass
+
def Run(self):
- return self.RunCommand(self.GetCommand())
+ self.BeforeRun()
+ try:
+ result = self.RunCommand(self.GetCommand())
+ finally:
+ self.AfterRun()
+ return result
def Cleanup(self):
return
@@ -1094,6 +1105,8 @@ def BuildOptions():
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
+ result.add_option("--snapshot", help="Run the tests with snapshot turned on",
+ default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
@@ -1139,6 +1152,8 @@ def ProcessOptions(options):
if options.arch == 'none':
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
+ if options.snapshot:
+ options.scons_flags.append("snapshot=on")
return True
diff --git a/tools/utils.py b/tools/utils.py
index 78d1e0d6..196bb055 100644
--- a/tools/utils.py
+++ b/tools/utils.py
@@ -55,6 +55,8 @@ def GuessOS():
return 'win32'
elif id == 'FreeBSD':
return 'freebsd'
+ elif id == 'OpenBSD':
+ return 'openbsd'
else:
return None
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index d2af6262..3ffd1829 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -214,6 +214,10 @@
9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
9F92FAA90F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
9F92FAAA0F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
+ 9FBE03DE10BD409900F8BFBA /* fast-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FBE03DC10BD409900F8BFBA /* fast-codegen.cc */; };
+ 9FBE03DF10BD409900F8BFBA /* fast-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FBE03DC10BD409900F8BFBA /* fast-codegen.cc */; };
+ 9FBE03E210BD40EA00F8BFBA /* fast-codegen-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FBE03E110BD40EA00F8BFBA /* fast-codegen-ia32.cc */; };
+ 9FBE03E510BD412600F8BFBA /* fast-codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FBE03E410BD412600F8BFBA /* fast-codegen-arm.cc */; };
9FC86ABD0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */; };
9FC86ABE0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */; };
/* End PBXBuildFile section */
@@ -550,6 +554,10 @@
9F4B7B880FCC877A00DC4117 /* log-utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "log-utils.h"; sourceTree = "<group>"; };
9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "func-name-inferrer.cc"; sourceTree = "<group>"; };
9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "func-name-inferrer.h"; sourceTree = "<group>"; };
+ 9FBE03DC10BD409900F8BFBA /* fast-codegen.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "fast-codegen.cc"; sourceTree = "<group>"; };
+ 9FBE03DD10BD409900F8BFBA /* fast-codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fast-codegen.h"; sourceTree = "<group>"; };
+ 9FBE03E110BD40EA00F8BFBA /* fast-codegen-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "fast-codegen-ia32.cc"; path = "ia32/fast-codegen-ia32.cc"; sourceTree = "<group>"; };
+ 9FBE03E410BD412600F8BFBA /* fast-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "fast-codegen-arm.cc"; path = "arm/fast-codegen-arm.cc"; sourceTree = "<group>"; };
9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "oprofile-agent.cc"; sourceTree = "<group>"; };
9FC86ABC0F5FEDAC00F22668 /* oprofile-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "oprofile-agent.h"; sourceTree = "<group>"; };
/* End PBXFileReference section */
@@ -715,6 +723,10 @@
897FF1310E719B8F00D62E90 /* execution.h */,
897FF1320E719B8F00D62E90 /* factory.cc */,
897FF1330E719B8F00D62E90 /* factory.h */,
+ 9FBE03DC10BD409900F8BFBA /* fast-codegen.cc */,
+ 9FBE03DD10BD409900F8BFBA /* fast-codegen.h */,
+ 9FBE03E410BD412600F8BFBA /* fast-codegen-arm.cc */,
+ 9FBE03E110BD40EA00F8BFBA /* fast-codegen-ia32.cc */,
89471C7F0EB23EE400B6874B /* flag-definitions.h */,
897FF1350E719B8F00D62E90 /* flags.cc */,
897FF1360E719B8F00D62E90 /* flags.h */,
@@ -1225,6 +1237,8 @@
9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */,
8981F6001010501900D1520E /* frame-element.cc in Sources */,
9F11D9A0105AF0A300EBE5B2 /* heap-profiler.cc in Sources */,
+ 9FBE03DE10BD409900F8BFBA /* fast-codegen.cc in Sources */,
+ 9FBE03E210BD40EA00F8BFBA /* fast-codegen-ia32.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -1332,6 +1346,8 @@
9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */,
8981F6011010502800D1520E /* frame-element.cc in Sources */,
9F11D9A1105AF0A300EBE5B2 /* heap-profiler.cc in Sources */,
+ 9FBE03DF10BD409900F8BFBA /* fast-codegen.cc in Sources */,
+ 9FBE03E510BD412600F8BFBA /* fast-codegen-arm.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index fc7402ae..6b473597 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -557,10 +557,6 @@
>
</File>
<File
- RelativePath="..\..\src\location.h"
- >
- </File>
- <File
RelativePath="..\..\src\log.cc"
>
</File>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index fca4a960..afb4f74b 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -561,10 +561,6 @@
>
</File>
<File
- RelativePath="..\..\src\location.h"
- >
- </File>
- <File
RelativePath="..\..\src\log.cc"
>
</File>