From e0cee9b3ed82e2391fd85d118aeaa4ea361c687d Mon Sep 17 00:00:00 2001 From: Ben Murdoch Date: Wed, 25 May 2011 10:26:03 +0100 Subject: Update V8 to r7079 as required by WebKit r80534. Change-Id: I487c152e485d5a40b68997d7c0d2f1fba5da0834 --- .gitignore | 5 + Android.v8common.mk | 1 + ChangeLog | 62 + SConstruct | 8 +- V8_MERGE_REVISION | 5 +- copy-new-sources | 1 - samples/shell.cc | 3 + src/SConscript | 5 + src/accessors.cc | 15 + src/api.cc | 9 +- src/arguments.h | 2 +- src/arm/assembler-arm.cc | 110 +- src/arm/assembler-arm.h | 29 +- src/arm/builtins-arm.cc | 27 +- src/arm/code-stubs-arm.cc | 1271 ++++++++-- src/arm/code-stubs-arm.h | 89 +- src/arm/codegen-arm.cc | 65 +- src/arm/constants-arm.h | 8 + src/arm/cpu-arm.cc | 5 + src/arm/deoptimizer-arm.cc | 66 +- src/arm/full-codegen-arm.cc | 1082 +++++++-- src/arm/ic-arm.cc | 69 +- src/arm/lithium-arm.cc | 132 +- src/arm/lithium-arm.h | 102 +- src/arm/lithium-codegen-arm.cc | 711 +++--- src/arm/lithium-codegen-arm.h | 92 +- src/arm/lithium-gap-resolver-arm.cc | 303 +++ src/arm/lithium-gap-resolver-arm.h | 84 + src/arm/macro-assembler-arm.cc | 331 ++- src/arm/macro-assembler-arm.h | 94 +- src/arm/regexp-macro-assembler-arm.cc | 71 +- src/arm/regexp-macro-assembler-arm.h | 3 +- src/arm/simulator-arm.cc | 65 +- src/arm/simulator-arm.h | 15 +- src/arm/stub-cache-arm.cc | 118 +- src/arm/virtual-frame-arm.cc | 12 +- src/arm/virtual-frame-arm.h | 2 +- src/array.js | 65 +- src/assembler.cc | 43 +- src/assembler.h | 43 +- src/bootstrapper.cc | 38 + src/builtins.cc | 20 +- src/builtins.h | 217 +- src/code-stubs.h | 3 - src/compiler.cc | 85 +- src/compiler.h | 5 +- src/cpu-profiler-inl.h | 7 +- src/cpu-profiler.cc | 140 +- src/cpu-profiler.h | 48 +- src/d8.cc | 10 +- src/d8.js | 618 +++++ src/date.js | 7 +- src/debug-debugger.js | 121 + src/debug.cc | 11 +- src/execution.cc | 5 + src/extensions/experimental/break-iterator.cc | 249 ++ src/extensions/experimental/break-iterator.h | 89 + src/extensions/experimental/experimental.gyp | 2 + src/extensions/experimental/i18n-extension.cc | 20 + src/flag-definitions.h | 20 +- src/frame-element.h | 4 + src/full-codegen.cc | 20 +- src/full-codegen.h | 48 +- src/gdb-jit.cc | 139 +- src/handles-inl.h | 4 +- src/handles.cc | 71 +- src/handles.h | 69 +- src/heap-profiler.cc | 45 +- src/heap-profiler.h | 4 +- src/heap.cc | 213 +- src/heap.h | 91 +- src/hydrogen-instructions.cc | 370 +-- src/hydrogen-instructions.h | 1118 ++++----- src/hydrogen.cc | 1851 +++++++------- src/hydrogen.h | 329 ++- src/ia32/assembler-ia32.cc | 4 +- src/ia32/assembler-ia32.h | 16 +- src/ia32/builtins-ia32.cc | 23 +- src/ia32/code-stubs-ia32.cc | 214 +- src/ia32/code-stubs-ia32.h | 42 +- src/ia32/codegen-ia32.cc | 65 +- src/ia32/deoptimizer-ia32.cc | 72 +- src/ia32/full-codegen-ia32.cc | 532 ++-- src/ia32/ic-ia32.cc | 70 +- src/ia32/lithium-codegen-ia32.cc | 265 +- src/ia32/lithium-codegen-ia32.h | 21 +- src/ia32/lithium-ia32.cc | 78 +- src/ia32/lithium-ia32.h | 83 +- src/ia32/macro-assembler-ia32.cc | 115 +- src/ia32/macro-assembler-ia32.h | 20 +- src/ia32/simulator-ia32.h | 9 +- src/ia32/stub-cache-ia32.cc | 65 +- src/ia32/virtual-frame-ia32.cc | 27 +- src/ia32/virtual-frame-ia32.h | 6 +- src/ic-inl.h | 9 + src/ic.cc | 238 +- src/ic.h | 47 +- src/json.js | 20 +- src/jsregexp.cc | 18 +- src/lithium-allocator.cc | 22 +- src/lithium-allocator.h | 2 - src/lithium.h | 8 +- src/liveedit.cc | 45 +- src/liveobjectlist-inl.h | 90 + src/liveobjectlist.cc | 2538 +++++++++++++++++++- src/liveobjectlist.h | 280 ++- src/log-utils.cc | 2 + src/log.cc | 151 +- src/log.h | 26 +- src/macro-assembler.h | 7 + src/mark-compact.cc | 19 +- src/messages.js | 10 +- src/objects-inl.h | 10 +- src/objects.cc | 232 +- src/objects.h | 88 +- src/parser.cc | 139 +- src/platform-cygwin.cc | 745 ++++++ src/platform-freebsd.cc | 2 +- src/platform-linux.cc | 7 +- src/platform-macos.cc | 2 +- src/platform-openbsd.cc | 2 +- src/platform-solaris.cc | 2 +- src/platform-win32.cc | 2 +- src/platform.h | 10 +- src/profile-generator-inl.h | 39 +- src/profile-generator.cc | 883 +++---- src/profile-generator.h | 196 +- src/regexp-macro-assembler.cc | 6 +- src/regexp.js | 6 +- src/runtime-profiler.cc | 71 +- src/runtime-profiler.h | 5 +- src/runtime.cc | 1312 +++++++--- src/runtime.h | 30 +- src/runtime.js | 4 +- src/spaces.h | 6 + src/string.js | 14 +- src/stub-cache.cc | 99 +- src/stub-cache.h | 36 +- src/top.cc | 89 +- src/top.h | 14 +- src/type-info.cc | 11 +- src/v8.cc | 5 + src/v8natives.js | 28 +- src/version.cc | 6 +- src/virtual-frame-heavy-inl.h | 6 +- src/x64/assembler-x64-inl.h | 2 +- src/x64/assembler-x64.cc | 38 +- src/x64/assembler-x64.h | 14 +- src/x64/builtins-x64.cc | 90 +- src/x64/code-stubs-x64.cc | 861 +++++-- src/x64/code-stubs-x64.h | 83 +- src/x64/codegen-x64-inl.h | 2 +- src/x64/codegen-x64.cc | 64 +- src/x64/codegen-x64.h | 2 +- src/x64/cpu-x64.cc | 2 +- src/x64/debug-x64.cc | 2 +- src/x64/deoptimizer-x64.cc | 206 +- src/x64/disasm-x64.cc | 12 +- src/x64/frames-x64.cc | 2 +- src/x64/frames-x64.h | 2 +- src/x64/full-codegen-x64.cc | 576 +++-- src/x64/ic-x64.cc | 69 +- src/x64/jump-target-x64.cc | 2 +- src/x64/lithium-codegen-x64.cc | 1270 +++++++++- src/x64/lithium-codegen-x64.h | 27 +- src/x64/lithium-x64.cc | 312 ++- src/x64/lithium-x64.h | 193 +- src/x64/macro-assembler-x64.cc | 220 +- src/x64/macro-assembler-x64.h | 81 +- src/x64/regexp-macro-assembler-x64.cc | 2 +- src/x64/regexp-macro-assembler-x64.h | 2 +- src/x64/simulator-x64.h | 9 +- src/x64/stub-cache-x64.cc | 62 +- src/x64/virtual-frame-x64.cc | 32 +- src/x64/virtual-frame-x64.h | 8 +- test/cctest/cctest.status | 21 - test/cctest/test-api.cc | 273 ++- test/cctest/test-compiler.cc | 2 +- test/cctest/test-cpu-profiler.cc | 10 +- test/cctest/test-debug.cc | 3 +- test/cctest/test-decls.cc | 8 +- test/cctest/test-heap.cc | 75 +- test/cctest/test-log-stack-tracer.cc | 32 +- test/cctest/test-log.cc | 23 +- test/cctest/test-mark-compact.cc | 11 +- test/cctest/test-parsing.cc | 11 +- test/cctest/test-profile-generator.cc | 6 +- test/cctest/test-serialize.cc | 5 +- test/es5conform/es5conform.status | 136 +- test/mjsunit/array-concat.js | 44 +- test/mjsunit/array-join.js | 25 + test/mjsunit/compiler/regress-valueof.js | 35 + test/mjsunit/fuzz-natives.js | 3 +- test/mjsunit/indexed-value-properties.js | 56 + test/mjsunit/mjsunit.js | 7 + test/mjsunit/mjsunit.status | 18 - test/mjsunit/override-eval-with-non-function.js | 36 + test/mjsunit/regexp.js | 14 + test/mjsunit/regress/regress-1105.js | 38 - test/mjsunit/regress/regress-1145.js | 54 + test/mjsunit/regress/regress-1146.js | 48 + test/mjsunit/regress/regress-1149.js | 39 + test/mjsunit/regress/regress-1151.js | 49 + test/mjsunit/regress/regress-1156.js | 49 + test/mjsunit/regress/regress-1160.js | 46 + test/mjsunit/regress/regress-1166.js | 35 + test/mjsunit/regress/regress-1167.js | 72 + test/mjsunit/regress/regress-1170.js | 66 + test/mjsunit/regress/regress-1172-bis.js | 37 + test/mjsunit/regress/regress-1172.js | 39 + test/mjsunit/regress/regress-1174.js | 43 + test/mjsunit/regress/regress-1176.js | 33 + test/mjsunit/regress/regress-1181.js | 54 + test/mjsunit/regress/regress-1184.js | 47 + test/mjsunit/regress/regress-1207.js | 35 + test/mjsunit/regress/regress-1209.js | 34 + test/mjsunit/regress/regress-1210.js | 48 + test/mjsunit/regress/regress-1213.js | 43 + test/mjsunit/regress/regress-1218.js | 29 + test/mjsunit/regress/regress-crbug-72736.js | 37 + test/mjsunit/strict-mode.js | 615 ++++- .../mjsunit/tools/tickprocessor-test-func-info.log | 10 +- test/sputnik/README | 4 +- test/sputnik/sputnik.status | 49 +- test/sputnik/testcfg.py | 3 +- tools/disasm.py | 92 + tools/grokdump.py | 840 +++++++ tools/gyp/v8.gyp | 2 + tools/linux-tick-processor.py | 78 - tools/ll_prof.py | 82 +- tools/profile.js | 141 +- tools/splaytree.py | 226 -- tools/tickprocessor.js | 75 +- tools/tickprocessor.py | 571 ----- tools/utils.py | 2 + tools/v8.xcodeproj/project.pbxproj | 6 + tools/visual_studio/x64.vsprops | 1 + tools/windows-tick-processor.py | 137 -- 238 files changed, 21525 insertions(+), 8165 deletions(-) delete mode 100755 copy-new-sources create mode 100644 src/arm/lithium-gap-resolver-arm.cc create mode 100644 src/arm/lithium-gap-resolver-arm.h create mode 100644 src/extensions/experimental/break-iterator.cc create mode 100644 src/extensions/experimental/break-iterator.h create mode 100644 src/platform-cygwin.cc create mode 100644 test/mjsunit/compiler/regress-valueof.js create mode 100644 test/mjsunit/indexed-value-properties.js create mode 100644 test/mjsunit/override-eval-with-non-function.js delete mode 100644 test/mjsunit/regress/regress-1105.js create mode 100644 test/mjsunit/regress/regress-1145.js create mode 100644 test/mjsunit/regress/regress-1146.js create mode 100644 test/mjsunit/regress/regress-1149.js create mode 100644 test/mjsunit/regress/regress-1151.js create mode 100644 test/mjsunit/regress/regress-1156.js create mode 100644 test/mjsunit/regress/regress-1160.js create mode 100644 test/mjsunit/regress/regress-1166.js create mode 100644 test/mjsunit/regress/regress-1167.js create mode 100644 test/mjsunit/regress/regress-1170.js create mode 100644 test/mjsunit/regress/regress-1172-bis.js create mode 100644 test/mjsunit/regress/regress-1172.js create mode 100644 test/mjsunit/regress/regress-1174.js create mode 100644 test/mjsunit/regress/regress-1176.js create mode 100644 test/mjsunit/regress/regress-1181.js create mode 100644 test/mjsunit/regress/regress-1184.js create mode 100644 test/mjsunit/regress/regress-1207.js create mode 100644 test/mjsunit/regress/regress-1209.js create mode 100644 test/mjsunit/regress/regress-1210.js create mode 100644 test/mjsunit/regress/regress-1213.js create mode 100644 test/mjsunit/regress/regress-1218.js create mode 100644 test/mjsunit/regress/regress-crbug-72736.js create mode 100644 tools/disasm.py create mode 100755 tools/grokdump.py delete mode 100755 tools/linux-tick-processor.py delete mode 100644 tools/splaytree.py delete mode 100644 tools/tickprocessor.py delete mode 100755 tools/windows-tick-processor.py diff --git a/.gitignore b/.gitignore index 974628d8..db57d1bb 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,11 @@ d8_g shell shell_g /obj/ +/test/es5conform/data/ +/test/mozilla/data/ +/test/sputnik/sputniktests/ +/tools/oom_dump/oom_dump +/tools/oom_dump/oom_dump.o /tools/visual_studio/Debug /tools/visual_studio/Release /xcodebuild/ diff --git a/Android.v8common.mk b/Android.v8common.mk index a976a486..0a57ce6e 100644 --- a/Android.v8common.mk +++ b/Android.v8common.mk @@ -114,6 +114,7 @@ ifeq ($(TARGET_ARCH),arm) src/arm/jump-target-arm.cc \ src/arm/lithium-arm.cc \ src/arm/lithium-codegen-arm.cc \ + src/arm/lithium-gap-resolver-arm.cc \ src/arm/macro-assembler-arm.cc \ src/arm/regexp-macro-assembler-arm.cc \ src/arm/register-allocator-arm.cc \ diff --git a/ChangeLog b/ChangeLog index 5d2af9c5..44935caf 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,65 @@ +2011-03-07: Version 3.2.0 + + Fixed a number of crash bugs. + + Turned on Crankshaft by default on x64 and ARM. + + Improved Crankshaft for x64 and ARM. + + Implemented more of EcmaScript 5 strict mode. + + +2011-03-02: Version 3.1.8 + + Fixed a number of crash bugs. + + Improved Crankshaft for x64 and ARM. + + Implemented more of EcmaScript 5 strict mode. + + Fixed issue with unaligned reads and writes on ARM. + + Improved heap profiler support. + + +2011-02-28: Version 3.1.7 + + Fixed a number of crash bugs. + + Improved Crankshaft for x64 and ARM. + + Fixed implementation of indexOf/lastIndexOf for sparse + arrays (http://crbug.com/73940). + + Fixed bug in map space compaction (http://crbug.com/59688). + + Added support for direct getter accessors calls on ARM. + + +2011-02-24: Version 3.1.6 + + Fixed a number of crash bugs. + + Added support for Cygwin (issue 64). + + Improved Crankshaft for x64 and ARM. + + Added Crankshaft support for stores to pixel arrays. + + Fixed issue in CPU profiler with Crankshaft. + + +2011-02-16: Version 3.1.5 + + Change RegExp parsing to disallow /(*)/. + + Added GDB JIT support for ARM. + + Fixed several crash bugs. + + Performance improvements on the IA32 platform. + + 2011-02-14: Version 3.1.4 Fixed incorrect compare of prototypes of the global object (issue diff --git a/SConstruct b/SConstruct index 017bcad3..84707e98 100644 --- a/SConstruct +++ b/SConstruct @@ -663,8 +663,8 @@ def GuessToolchain(os): def GuessVisibility(os, toolchain): - if os == 'win32' and toolchain == 'gcc': - # MinGW can't do it. + if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc': + # MinGW / Cygwin can't do it. return 'default' elif os == 'solaris': return 'default' @@ -685,7 +685,7 @@ SIMPLE_OPTIONS = { 'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS }, 'os': { - 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'], + 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'], 'default': OS_GUESS, 'help': 'the os to build for (%s)' % OS_GUESS }, @@ -890,7 +890,7 @@ def VerifyOptions(env): return False if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on': Abort("Profiling on windows only supported for static library.") - if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64')): + if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')): Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.") if env['os'] == 'win32' and env['soname'] == 'on': Abort("Shared Object soname not applicable for Windows.") diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION index 07693bd6..120fd679 100644 --- a/V8_MERGE_REVISION +++ b/V8_MERGE_REVISION @@ -1,6 +1,5 @@ We use a V8 revision that has been used for a Chromium release. -http://src.chromium.org/svn/releases/11.0.672.0/DEPS -http://v8.googlecode.com/svn/trunk@6768 plus a partial cherry-pick to fix Android build ... -- r7077 - CreateThread() in src/platform-linux.cc +http://src.chromium.org/svn/releases/11.0.696.0/DEPS +http://v8.googlecode.com/svn/trunk@7079 diff --git a/copy-new-sources b/copy-new-sources deleted file mode 100755 index 84fc6843..00000000 --- a/copy-new-sources +++ /dev/null @@ -1 +0,0 @@ -cp -r AUTHORS ChangeLog LICENSE SConstruct benchmarks include samples src test tools ../android/master/external/v8/ diff --git a/samples/shell.cc b/samples/shell.cc index 6b67df6c..64f78f02 100644 --- a/samples/shell.cc +++ b/samples/shell.cc @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -290,11 +291,13 @@ bool ExecuteString(v8::Handle source, } else { v8::Handle result = script->Run(); if (result.IsEmpty()) { + assert(try_catch.HasCaught()); // Print errors that happened during execution. if (report_exceptions) ReportException(&try_catch); return false; } else { + assert(!try_catch.HasCaught()); if (print_result && !result->IsUndefined()) { // If all went well and the result wasn't undefined then print // the returned value. diff --git a/src/SConscript b/src/SConscript index c3561be3..34ca91ca 100755 --- a/src/SConscript +++ b/src/SConscript @@ -153,6 +153,7 @@ SOURCES = { arm/jump-target-arm.cc arm/lithium-arm.cc arm/lithium-codegen-arm.cc + arm/lithium-gap-resolver-arm.cc arm/macro-assembler-arm.cc arm/regexp-macro-assembler-arm.cc arm/register-allocator-arm.cc @@ -233,6 +234,7 @@ SOURCES = { 'os:android': ['platform-linux.cc', 'platform-posix.cc'], 'os:macos': ['platform-macos.cc', 'platform-posix.cc'], 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'], + 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'], 'os:nullos': ['platform-nullos.cc'], 'os:win32': ['platform-win32.cc'], 'mode:release': [], @@ -264,6 +266,9 @@ D8_FILES = { 'os:solaris': [ 'd8-posix.cc' ], + 'os:cygwin': [ + 'd8-posix.cc' + ], 'os:win32': [ 'd8-windows.cc' ], diff --git a/src/accessors.cc b/src/accessors.cc index 2b205d5d..18264254 100644 --- a/src/accessors.cc +++ b/src/accessors.cc @@ -446,6 +446,14 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { bool found_it = false; JSFunction* function = FindInPrototypeChain(object, &found_it); if (!found_it) return Heap::undefined_value(); + while (!function->should_have_prototype()) { + found_it = false; + function = FindInPrototypeChain(object->GetPrototype(), + &found_it); + // There has to be one because we hit the getter. + ASSERT(found_it); + } + if (!function->has_prototype()) { Object* prototype; { MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function); @@ -466,6 +474,13 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object, bool found_it = false; JSFunction* function = FindInPrototypeChain(object, &found_it); if (!found_it) return Heap::undefined_value(); + if (!function->should_have_prototype()) { + // Since we hit this accessor, object will have no prototype property. + return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(), + value, + NONE); + } + if (function->has_initial_map()) { // If the function has allocated the initial map // replace it with a copy containing the new prototype. diff --git a/src/api.cc b/src/api.cc index d718c887..555af843 100644 --- a/src/api.cc +++ b/src/api.cc @@ -2286,7 +2286,8 @@ bool v8::Object::Set(v8::Handle key, v8::Handle value, self, key_obj, value_obj, - static_cast(attribs)); + static_cast(attribs), + i::kNonStrictMode); has_pending_exception = obj.is_null(); EXCEPTION_BAILOUT_CHECK(false); return true; @@ -2303,7 +2304,8 @@ bool v8::Object::Set(uint32_t index, v8::Handle value) { i::Handle obj = i::SetElement( self, index, - value_obj); + value_obj, + i::kNonStrictMode); has_pending_exception = obj.is_null(); EXCEPTION_BAILOUT_CHECK(false); return true; @@ -2711,7 +2713,8 @@ bool v8::Object::SetHiddenValue(v8::Handle key, hidden_props, key_obj, value_obj, - static_cast(None)); + static_cast(None), + i::kNonStrictMode); has_pending_exception = obj.is_null(); EXCEPTION_BAILOUT_CHECK(false); return true; diff --git a/src/arguments.h b/src/arguments.h index d51c9e4c..5cf8deaa 100644 --- a/src/arguments.h +++ b/src/arguments.h @@ -78,7 +78,7 @@ class Arguments BASE_EMBEDDED { class CustomArguments : public Relocatable { public: inline CustomArguments(Object* data, - JSObject* self, + Object* self, JSObject* holder) { values_[2] = self; values_[1] = holder; diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc index fb9bb488..c91d4ba2 100644 --- a/src/arm/assembler-arm.cc +++ b/src/arm/assembler-arm.cc @@ -1848,11 +1848,31 @@ void Assembler::vldr(const DwVfpRegister dst, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); + ASSERT(offset >= 0); - emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | + 0xB*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8); + } +} + + +void Assembler::vldr(const DwVfpRegister dst, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(dst, operand.rn(), operand.offset(), cond); } @@ -1870,13 +1890,33 @@ void Assembler::vldr(const SwVfpRegister dst, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); - ASSERT(offset >= 0); int sd, d; dst.split_code(&sd, &d); + ASSERT(offset >= 0); + + if ((offset % 4) == 0 && (offset / 4) < 256) { emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | 0xA*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); + } +} + + +void Assembler::vldr(const SwVfpRegister dst, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(dst, operand.rn(), operand.offset(), cond); } @@ -1894,11 +1934,30 @@ void Assembler::vstr(const DwVfpRegister src, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); ASSERT(offset >= 0); - emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | + 0xB*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8); + } +} + + +void Assembler::vstr(const DwVfpRegister src, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vstr(src, operand.rn(), operand.offset(), cond); } @@ -1916,13 +1975,32 @@ void Assembler::vstr(const SwVfpRegister src, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); - ASSERT(offset >= 0); int sd, d; src.split_code(&sd, &d); - emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | - 0xA*B8 | ((offset / 4) & 255)); + ASSERT(offset >= 0); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | + 0xA*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); + } +} + + +void Assembler::vstr(const SwVfpRegister src, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(src, operand.rn(), operand.offset(), cond); } diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h index 3941c84b..f5eb5075 100644 --- a/src/arm/assembler-arm.h +++ b/src/arm/assembler-arm.h @@ -284,6 +284,7 @@ const SwVfpRegister s29 = { 29 }; const SwVfpRegister s30 = { 30 }; const SwVfpRegister s31 = { 31 }; +const DwVfpRegister no_dreg = { -1 }; const DwVfpRegister d0 = { 0 }; const DwVfpRegister d1 = { 1 }; const DwVfpRegister d2 = { 2 }; @@ -387,7 +388,7 @@ class Operand BASE_EMBEDDED { // Return true if this is a register operand. INLINE(bool is_reg() const); - // Return true of this operand fits in one instruction so that no + // Return true if this operand fits in one instruction so that no // 2-instruction solution with a load into the ip register is necessary. bool is_single_instruction() const; bool must_use_constant_pool() const; @@ -439,7 +440,7 @@ class MemOperand BASE_EMBEDDED { offset_ = offset; } - uint32_t offset() { + uint32_t offset() const { ASSERT(rm_.is(no_reg)); return offset_; } @@ -447,6 +448,10 @@ class MemOperand BASE_EMBEDDED { Register rn() const { return rn_; } Register rm() const { return rm_; } + bool OffsetIsUint12Encodable() const { + return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_); + } + private: Register rn_; // base Register rm_; // register offset @@ -902,22 +907,34 @@ class Assembler : public Malloced { void vldr(const DwVfpRegister dst, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vldr(const DwVfpRegister dst, + const MemOperand& src, const Condition cond = al); void vldr(const SwVfpRegister dst, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vldr(const SwVfpRegister dst, + const MemOperand& src, const Condition cond = al); void vstr(const DwVfpRegister src, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vstr(const DwVfpRegister src, + const MemOperand& dst, const Condition cond = al); void vstr(const SwVfpRegister src, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vstr(const SwVfpRegister src, + const MemOperand& dst, const Condition cond = al); void vmov(const DwVfpRegister dst, diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc index f14d77af..961d3ce5 100644 --- a/src/arm/builtins-arm.cc +++ b/src/arm/builtins-arm.cc @@ -428,7 +428,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { GenerateLoadArrayFunction(masm, r1); if (FLAG_debug_code) { - // Initial map for the builtin Array function shoud be a map. + // Initial map for the builtin Array functions should be maps. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ tst(r2, Operand(kSmiTagMask)); __ Assert(ne, "Unexpected initial map for Array function"); @@ -458,11 +458,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { Label generic_constructor; if (FLAG_debug_code) { - // The array construct code is only set for the builtin Array function which - // always have a map. - GenerateLoadArrayFunction(masm, r2); - __ cmp(r1, r2); - __ Assert(eq, "Unexpected Array function"); + // The array construct code is only set for the builtin and internal + // Array functions which always have a map. // Initial map for the builtin Array function should be a map. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ tst(r2, Operand(kSmiTagMask)); @@ -1231,6 +1228,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // Change context eagerly in case we need the global receiver. __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Do not transform the receiver for strict mode functions. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, &shift_arguments); + + // Compute the receiver in non-strict mode. __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2)); __ ldr(r2, MemOperand(r2, -kPointerSize)); // r0: actual number of arguments @@ -1394,10 +1399,20 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Change context eagerly to get the right global object if necessary. __ ldr(r0, MemOperand(fp, kFunctionOffset)); __ ldr(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); + // Load the shared function info while the function is still in r0. + __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); // Compute the receiver. Label call_to_object, use_global_receiver, push_receiver; __ ldr(r0, MemOperand(fp, kRecvOffset)); + + // Do not transform the receiver for strict mode functions. + __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, &push_receiver); + + // Compute the receiver in non-strict mode. __ tst(r0, Operand(kSmiTagMask)); __ b(eq, &call_to_object); __ LoadRoot(r1, Heap::kNullValueRootIndex); diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index 1e7d5589..1c6d709f 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -398,8 +398,11 @@ class FloatingPointHelper : public AllStatic { Label* not_number); // Loads the number from object into dst as a 32-bit integer if possible. If - // the object is not a 32-bit integer control continues at the label - // not_int32. If VFP is supported double_scratch is used but not scratch2. + // the object cannot be converted to a 32-bit integer control continues at + // the label not_int32. If VFP is supported double_scratch is used + // but not scratch2. + // Floating point value in the 32-bit integer range will be rounded + // to an integer. static void LoadNumberAsInteger(MacroAssembler* masm, Register object, Register dst, @@ -409,6 +412,76 @@ class FloatingPointHelper : public AllStatic { DwVfpRegister double_scratch, Label* not_int32); + // Load the number from object into double_dst in the double format. + // Control will jump to not_int32 if the value cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be loaded. + static void LoadNumberAsInt32Double(MacroAssembler* masm, + Register object, + Destination destination, + DwVfpRegister double_dst, + Register dst1, + Register dst2, + Register heap_number_map, + Register scratch1, + Register scratch2, + SwVfpRegister single_scratch, + Label* not_int32); + + // Loads the number from object into dst as a 32-bit integer. + // Control will jump to not_int32 if the object cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be converted. + // scratch3 is not used when VFP3 is supported. + static void LoadNumberAsInt32(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch, + Label* not_int32); + + // Generate non VFP3 code to check if a double can be exactly represented by a + // 32-bit integer. This does not check for 0 or -0, which need + // to be checked for separately. + // Control jumps to not_int32 if the value is not a 32-bit integer, and falls + // through otherwise. + // src1 and src2 will be cloberred. + // + // Expected input: + // - src1: higher (exponent) part of the double value. + // - src2: lower (mantissa) part of the double value. + // Output status: + // - dst: 32 higher bits of the mantissa. (mantissa[51:20]) + // - src2: contains 1. + // - other registers are clobbered. + static void DoubleIs32BitInteger(MacroAssembler* masm, + Register src1, + Register src2, + Register dst, + Register scratch, + Label* not_int32); + + // Generates code to call a C function to do a double operation using core + // registers. (Used when VFP3 is not supported.) + // This code never falls through, but returns with a heap number containing + // the result in r0. + // Register heapnumber_result must be a heap number in which the + // result of the operation will be stored. + // Requires the following layout on entry: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). + static void CallCCodeForDoubleOperation(MacroAssembler* masm, + Token::Value op, + Register heap_number_result, + Register scratch); + private: static void LoadNumber(MacroAssembler* masm, FloatingPointHelper::Destination destination, @@ -560,6 +633,318 @@ void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm, } +void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, + Register object, + Destination destination, + DwVfpRegister double_dst, + Register dst1, + Register dst2, + Register heap_number_map, + Register scratch1, + Register scratch2, + SwVfpRegister single_scratch, + Label* not_int32) { + ASSERT(!scratch1.is(object) && !scratch2.is(object)); + ASSERT(!scratch1.is(scratch2)); + ASSERT(!heap_number_map.is(object) && + !heap_number_map.is(scratch1) && + !heap_number_map.is(scratch2)); + + Label done, obj_is_not_smi; + + __ JumpIfNotSmi(object, &obj_is_not_smi); + __ SmiUntag(scratch1, object); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ vmov(single_scratch, scratch1); + __ vcvt_f64_s32(double_dst, single_scratch); + if (destination == kCoreRegisters) { + __ vmov(dst1, dst2, double_dst); + } + } else { + Label fewer_than_20_useful_bits; + // Expected output: + // | dst1 | dst2 | + // | s | exp | mantissa | + + // Check for zero. + __ cmp(scratch1, Operand(0)); + __ mov(dst1, scratch1); + __ mov(dst2, scratch1); + __ b(eq, &done); + + // Preload the sign of the value. + __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC); + // Get the absolute value of the object (as an unsigned integer). + __ rsb(scratch1, scratch1, Operand(0), SetCC, mi); + + // Get mantisssa[51:20]. + + // Get the position of the first set bit. + __ CountLeadingZeros(dst2, scratch1, scratch2); + __ rsb(dst2, dst2, Operand(31)); + + // Set the exponent. + __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias)); + __ Bfi(dst1, scratch2, scratch2, + HeapNumber::kExponentShift, HeapNumber::kExponentBits); + + // Clear the first non null bit. + __ mov(scratch2, Operand(1)); + __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2)); + + __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); + // Get the number of bits to set in the lower part of the mantissa. + __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); + __ b(mi, &fewer_than_20_useful_bits); + // Set the higher 20 bits of the mantissa. + __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2)); + __ rsb(scratch2, scratch2, Operand(32)); + __ mov(dst2, Operand(scratch1, LSL, scratch2)); + __ b(&done); + + __ bind(&fewer_than_20_useful_bits); + __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ mov(scratch2, Operand(scratch1, LSL, scratch2)); + __ orr(dst1, dst1, scratch2); + // Set dst2 to 0. + __ mov(dst2, Operand(0)); + } + + __ b(&done); + + __ bind(&obj_is_not_smi); + if (FLAG_debug_code) { + __ AbortIfNotRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + } + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); + + // Load the number. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Load the double value. + __ sub(scratch1, object, Operand(kHeapObjectTag)); + __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); + + __ EmitVFPTruncate(kRoundToZero, + single_scratch, + double_dst, + scratch1, + scratch2, + kCheckForInexactConversion); + + // Jump to not_int32 if the operation did not succeed. + __ b(ne, not_int32); + + if (destination == kCoreRegisters) { + __ vmov(dst1, dst2, double_dst); + } + + } else { + ASSERT(!scratch1.is(object) && !scratch2.is(object)); + // Load the double value in the destination registers.. + __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); + + // Check for 0 and -0. + __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask)); + __ orr(scratch1, scratch1, Operand(dst2)); + __ cmp(scratch1, Operand(0)); + __ b(eq, &done); + + // Check that the value can be exactly represented by a 32-bit integer. + // Jump to not_int32 if that's not the case. + DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); + + // dst1 and dst2 were trashed. Reload the double value. + __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); + } + + __ bind(&done); +} + + +void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch, + Label* not_int32) { + ASSERT(!dst.is(object)); + ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); + ASSERT(!scratch1.is(scratch2) && + !scratch1.is(scratch3) && + !scratch2.is(scratch3)); + + Label done; + + // Untag the object into the destination register. + __ SmiUntag(dst, object); + // Just return if the object is a smi. + __ JumpIfSmi(object, &done); + + if (FLAG_debug_code) { + __ AbortIfNotRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + } + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); + + // Object is a heap number. + // Convert the floating point value to a 32-bit integer. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + SwVfpRegister single_scratch = double_scratch.low(); + // Load the double value. + __ sub(scratch1, object, Operand(kHeapObjectTag)); + __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset); + + __ EmitVFPTruncate(kRoundToZero, + single_scratch, + double_scratch, + scratch1, + scratch2, + kCheckForInexactConversion); + + // Jump to not_int32 if the operation did not succeed. + __ b(ne, not_int32); + // Get the result in the destination register. + __ vmov(dst, single_scratch); + + } else { + // Load the double value in the destination registers. + __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); + __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); + + // Check for 0 and -0. + __ bic(dst, scratch1, Operand(HeapNumber::kSignMask)); + __ orr(dst, scratch2, Operand(dst)); + __ cmp(dst, Operand(0)); + __ b(eq, &done); + + DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); + + // Registers state after DoubleIs32BitInteger. + // dst: mantissa[51:20]. + // scratch2: 1 + + // Shift back the higher bits of the mantissa. + __ mov(dst, Operand(dst, LSR, scratch3)); + // Set the implicit first bit. + __ rsb(scratch3, scratch3, Operand(32)); + __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); + // Set the sign. + __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + __ rsb(dst, dst, Operand(0), LeaveCC, mi); + } + + __ bind(&done); +} + + +void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, + Register src1, + Register src2, + Register dst, + Register scratch, + Label* not_int32) { + // Get exponent alone in scratch. + __ Ubfx(scratch, + src1, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + + // Substract the bias from the exponent. + __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC); + + // src1: higher (exponent) part of the double value. + // src2: lower (mantissa) part of the double value. + // scratch: unbiased exponent. + + // Fast cases. Check for obvious non 32-bit integer values. + // Negative exponent cannot yield 32-bit integers. + __ b(mi, not_int32); + // Exponent greater than 31 cannot yield 32-bit integers. + // Also, a positive value with an exponent equal to 31 is outside of the + // signed 32-bit integer range. + // Another way to put it is that if (exponent - signbit) > 30 then the + // number cannot be represented as an int32. + Register tmp = dst; + __ sub(tmp, scratch, Operand(src1, LSR, 31)); + __ cmp(tmp, Operand(30)); + __ b(gt, not_int32); + // - Bits [21:0] in the mantissa are not null. + __ tst(src2, Operand(0x3fffff)); + __ b(ne, not_int32); + + // Otherwise the exponent needs to be big enough to shift left all the + // non zero bits left. So we need the (30 - exponent) last bits of the + // 31 higher bits of the mantissa to be null. + // Because bits [21:0] are null, we can check instead that the + // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null. + + // Get the 32 higher bits of the mantissa in dst. + __ Ubfx(dst, + src2, + HeapNumber::kMantissaBitsInTopWord, + 32 - HeapNumber::kMantissaBitsInTopWord); + __ orr(dst, + dst, + Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + + // Create the mask and test the lower bits (of the higher bits). + __ rsb(scratch, scratch, Operand(32)); + __ mov(src2, Operand(1)); + __ mov(src1, Operand(src2, LSL, scratch)); + __ sub(src1, src1, Operand(1)); + __ tst(dst, src1); + __ b(ne, not_int32); +} + + +void FloatingPointHelper::CallCCodeForDoubleOperation( + MacroAssembler* masm, + Token::Value op, + Register heap_number_result, + Register scratch) { + // Using core registers: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). + + // Assert that heap_number_result is callee-saved. + // We currently always use r5 to pass it. + ASSERT(heap_number_result.is(r5)); + + // Push the current return address before the C call. Return will be + // through pop(pc) below. + __ push(lr); + __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. + // Call C routine that may not cause GC or other trouble. + __ CallCFunction(ExternalReference::double_fp_operation(op), 4); + // Store answer in the overwritable heap number. +#if !defined(USE_ARM_EABI) + // Double returned in fp coprocessor register 0 and 1, encoded as + // register cr8. Offsets must be divisible by 4 for coprocessor so we + // need to substract the tag from heap_number_result. + __ sub(scratch, heap_number_result, Operand(kHeapObjectTag)); + __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset)); +#else + // Double returned in registers 0 and 1. + __ Strd(r0, r1, FieldMemOperand(heap_number_result, + HeapNumber::kValueOffset)); +#endif + // Place heap_number_result in r0 and return to the pushed return address. + __ mov(r0, Operand(heap_number_result)); + __ pop(pc); +} + // See comment for class. void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { @@ -1296,6 +1681,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // This stub does not handle the inlined cases (Smis, Booleans, undefined). // The stub returns zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { + // This stub uses VFP3 instructions. + ASSERT(CpuFeatures::IsEnabled(VFP3)); + Label false_result; Label not_heap_number; Register scratch = r9.is(tos_) ? r7 : r9; @@ -2661,8 +3049,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Allocate new heap number for result. Register result = r5; - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); // Load the operands. if (smi_operands) { @@ -2704,33 +3092,11 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, __ add(r0, r0, Operand(kHeapObjectTag)); __ Ret(); } else { - // Using core registers: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - - // Push the current return address before the C call. Return will be - // through pop(pc) below. - __ push(lr); - __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. - // Call C routine that may not cause GC or other trouble. r5 is callee - // save. - __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); - // Store answer in the overwritable heap number. -#if !defined(USE_ARM_EABI) - // Double returned in fp coprocessor register 0 and 1, encoded as - // register cr8. Offsets must be divisible by 4 for coprocessor so we - // need to substract the tag from r5. - __ sub(scratch1, result, Operand(kHeapObjectTag)); - __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); -#else - // Double returned in registers 0 and 1. - __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); -#endif - // Plase result in r0 and return to the pushed return address. - __ mov(r0, Operand(result)); - __ pop(pc); + // Call the C function to handle the double operation. + FloatingPointHelper::CallCCodeForDoubleOperation(masm, + op_, + result, + scratch1); } break; } @@ -2776,7 +3142,6 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, break; case Token::SAR: // Use only the 5 least significant bits of the shift count. - __ and_(r2, r2, Operand(0x1f)); __ GetLeastBitsFromInt32(r2, r2, 5); __ mov(r2, Operand(r3, ASR, r2)); break; @@ -2811,8 +3176,14 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Allocate new heap number for result. __ bind(&result_not_a_smi); - __ AllocateHeapNumber( - r5, scratch1, scratch2, heap_number_map, gc_required); + Register result = r5; + if (smi_operands) { + __ AllocateHeapNumber( + result, scratch1, scratch2, heap_number_map, gc_required); + } else { + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); + } // r2: Answer as signed int32. // r5: Heap number to write answer into. @@ -2915,7 +3286,288 @@ void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { ASSERT(operands_type_ == TRBinaryOpIC::INT32); - GenerateTypeTransition(masm); + Register left = r1; + Register right = r0; + Register scratch1 = r7; + Register scratch2 = r9; + DwVfpRegister double_scratch = d0; + SwVfpRegister single_scratch = s3; + + Register heap_number_result = no_reg; + Register heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + Label call_runtime; + // Labels for type transition, used for wrong input or output types. + // Both label are currently actually bound to the same position. We use two + // different label to differentiate the cause leading to type transition. + Label transition; + + // Smi-smi fast case. + Label skip; + __ orr(scratch1, left, right); + __ JumpIfNotSmi(scratch1, &skip); + GenerateSmiSmiOperation(masm); + // Fall through if the result is not a smi. + __ bind(&skip); + + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: { + // Load both operands and check that they are 32-bit integer. + // Jump to type transition if they are not. The registers r0 and r1 (right + // and left) are preserved for the runtime call. + FloatingPointHelper::Destination destination = + CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? + FloatingPointHelper::kVFPRegisters : + FloatingPointHelper::kCoreRegisters; + + FloatingPointHelper::LoadNumberAsInt32Double(masm, + right, + destination, + d7, + r2, + r3, + heap_number_map, + scratch1, + scratch2, + s0, + &transition); + FloatingPointHelper::LoadNumberAsInt32Double(masm, + left, + destination, + d6, + r4, + r5, + heap_number_map, + scratch1, + scratch2, + s0, + &transition); + + if (destination == FloatingPointHelper::kVFPRegisters) { + CpuFeatures::Scope scope(VFP3); + Label return_heap_number; + switch (op_) { + case Token::ADD: + __ vadd(d5, d6, d7); + break; + case Token::SUB: + __ vsub(d5, d6, d7); + break; + case Token::MUL: + __ vmul(d5, d6, d7); + break; + case Token::DIV: + __ vdiv(d5, d6, d7); + break; + default: + UNREACHABLE(); + } + + if (op_ != Token::DIV) { + // These operations produce an integer result. + // Try to return a smi if we can. + // Otherwise return a heap number if allowed, or jump to type + // transition. + + __ EmitVFPTruncate(kRoundToZero, + single_scratch, + d5, + scratch1, + scratch2); + + if (result_type_ <= TRBinaryOpIC::INT32) { + // If the ne condition is set, result does + // not fit in a 32-bit integer. + __ b(ne, &transition); + } + + // Check if the result fits in a smi. + __ vmov(scratch1, single_scratch); + __ add(scratch2, scratch1, Operand(0x40000000), SetCC); + // If not try to return a heap number. + __ b(mi, &return_heap_number); + // Tag the result and return. + __ SmiTag(r0, scratch1); + __ Ret(); + } + + if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER + : TRBinaryOpIC::INT32) { + __ bind(&return_heap_number); + // We are using vfp registers so r5 is available. + heap_number_result = r5; + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); + __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); + __ vstr(d5, r0, HeapNumber::kValueOffset); + __ mov(r0, heap_number_result); + __ Ret(); + } + + // A DIV operation expecting an integer result falls through + // to type transition. + + } else { + // We preserved r0 and r1 to be able to call runtime. + // Save the left value on the stack. + __ Push(r5, r4); + + // Allocate a heap number to store the result. + heap_number_result = r5; + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); + + // Load the left value from the value saved on the stack. + __ Pop(r1, r0); + + // Call the C function to handle the double operation. + FloatingPointHelper::CallCCodeForDoubleOperation( + masm, op_, heap_number_result, scratch1); + } + + break; + } + + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: + case Token::SAR: + case Token::SHR: + case Token::SHL: { + Label return_heap_number; + Register scratch3 = r5; + // Convert operands to 32-bit integers. Right in r2 and left in r3. The + // registers r0 and r1 (right and left) are preserved for the runtime + // call. + FloatingPointHelper::LoadNumberAsInt32(masm, + left, + r3, + heap_number_map, + scratch1, + scratch2, + scratch3, + d0, + &transition); + FloatingPointHelper::LoadNumberAsInt32(masm, + right, + r2, + heap_number_map, + scratch1, + scratch2, + scratch3, + d0, + &transition); + + // The ECMA-262 standard specifies that, for shift operations, only the + // 5 least significant bits of the shift value should be used. + switch (op_) { + case Token::BIT_OR: + __ orr(r2, r3, Operand(r2)); + break; + case Token::BIT_XOR: + __ eor(r2, r3, Operand(r2)); + break; + case Token::BIT_AND: + __ and_(r2, r3, Operand(r2)); + break; + case Token::SAR: + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, ASR, r2)); + break; + case Token::SHR: + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, LSR, r2), SetCC); + // SHR is special because it is required to produce a positive answer. + // We only get a negative result if the shift value (r2) is 0. + // This result cannot be respresented as a signed 32-bit integer, try + // to return a heap number if we can. + // The non vfp3 code does not support this special case, so jump to + // runtime if we don't support it. + if (CpuFeatures::IsSupported(VFP3)) { + __ b(mi, + (result_type_ <= TRBinaryOpIC::INT32) ? &transition + : &return_heap_number); + } else { + __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition + : &call_runtime); + } + break; + case Token::SHL: + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, LSL, r2)); + break; + default: + UNREACHABLE(); + } + + // Check if the result fits in a smi. + __ add(scratch1, r2, Operand(0x40000000), SetCC); + // If not try to return a heap number. (We know the result is an int32.) + __ b(mi, &return_heap_number); + // Tag the result and return. + __ SmiTag(r0, r2); + __ Ret(); + + __ bind(&return_heap_number); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + heap_number_result = r5; + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); + + if (op_ != Token::SHR) { + // Convert the result to a floating point value. + __ vmov(double_scratch.low(), r2); + __ vcvt_f64_s32(double_scratch, double_scratch.low()); + } else { + // The result must be interpreted as an unsigned 32-bit integer. + __ vmov(double_scratch.low(), r2); + __ vcvt_f64_u32(double_scratch, double_scratch.low()); + } + + // Store the result. + __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); + __ vstr(double_scratch, r0, HeapNumber::kValueOffset); + __ mov(r0, heap_number_result); + __ Ret(); + } else { + // Tail call that writes the int32 in r2 to the heap number in r0, using + // r3 as scratch. r0 is preserved and returned. + WriteInt32ToHeapNumberStub stub(r2, r0, r3); + __ TailCallStub(&stub); + } + + break; + } + + default: + UNREACHABLE(); + } + + if (transition.is_linked()) { + __ bind(&transition); + GenerateTypeTransition(masm); + } + + __ bind(&call_runtime); + GenerateCallRuntime(masm); } @@ -2934,45 +3586,47 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime; + Label call_runtime, call_string_add_or_runtime; GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); - // If all else fails, use the runtime system to get the correct - // result. - __ bind(&call_runtime); + GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); - // Try to add strings before calling runtime. + __ bind(&call_string_add_or_runtime); if (op_ == Token::ADD) { GenerateAddStrings(masm); } - GenericBinaryOpStub stub(op_, mode_, r1, r0); - __ TailCallStub(&stub); + __ bind(&call_runtime); + GenerateCallRuntime(masm); } void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { ASSERT(op_ == Token::ADD); + Label left_not_string, call_runtime; Register left = r1; Register right = r0; - Label call_runtime; - // Check if first argument is a string. - __ JumpIfSmi(left, &call_runtime); + // Check if left argument is a string. + __ JumpIfSmi(left, &left_not_string); __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &call_runtime); + __ b(ge, &left_not_string); + + StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_left_stub); - // First argument is a a string, test second. + // Left operand is not a string, test right. + __ bind(&left_not_string); __ JumpIfSmi(right, &call_runtime); __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); __ b(ge, &call_runtime); - // First and second argument are strings. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_stub); + __ TailCallStub(&string_add_right_stub); // At least one argument is not a string. __ bind(&call_runtime); @@ -3061,32 +3715,47 @@ void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { void TranscendentalCacheStub::Generate(MacroAssembler* masm) { - // Argument is a number and is on stack and in r0. - Label runtime_call; + // Untagged case: double input in d2, double result goes + // into d2. + // Tagged case: tagged input on top of stack and in r0, + // tagged result (heap number) goes into r0. + Label input_not_smi; Label loaded; + Label calculate; + Label invalid_cache; + const Register scratch0 = r9; + const Register scratch1 = r7; + const Register cache_entry = r0; + const bool tagged = (argument_type_ == TAGGED); if (CpuFeatures::IsSupported(VFP3)) { - // Load argument and check if it is a smi. - __ JumpIfNotSmi(r0, &input_not_smi); - CpuFeatures::Scope scope(VFP3); - // Input is a smi. Convert to double and load the low and high words - // of the double into r2, r3. - __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); - __ b(&loaded); - - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ CheckMap(r0, - r1, - Heap::kHeapNumberMapRootIndex, - &runtime_call, - true); - // Input is a HeapNumber. Load it to a double register and store the - // low and high words into r2, r3. - __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); - + if (tagged) { + // Argument is a number and is on stack and in r0. + // Load argument and check if it is a smi. + __ JumpIfNotSmi(r0, &input_not_smi); + + // Input is a smi. Convert to double and load the low and high words + // of the double into r2, r3. + __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); + __ b(&loaded); + + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ CheckMap(r0, + r1, + Heap::kHeapNumberMapRootIndex, + &calculate, + true); + // Input is a HeapNumber. Load it to a double register and store the + // low and high words into r2, r3. + __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ vmov(r2, r3, d0); + } else { + // Input is untagged double in d2. Output goes to d2. + __ vmov(r2, r3, d2); + } __ bind(&loaded); // r2 = low 32 bits of double value // r3 = high 32 bits of double value @@ -3101,14 +3770,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // r2 = low 32 bits of double value. // r3 = high 32 bits of double value. // r1 = TranscendentalCache::hash(double value). - __ mov(r0, + __ mov(cache_entry, Operand(ExternalReference::transcendental_cache_array_address())); // r0 points to cache array. - __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0]))); + __ ldr(cache_entry, MemOperand(cache_entry, + type_ * sizeof(TranscendentalCache::caches_[0]))); // r0 points to the cache for the type type_. // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ cmp(r0, Operand(0, RelocInfo::NONE)); - __ b(eq, &runtime_call); + __ cmp(cache_entry, Operand(0, RelocInfo::NONE)); + __ b(eq, &invalid_cache); #ifdef DEBUG // Check that the layout of cache elements match expectations. @@ -3127,21 +3797,109 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. __ add(r1, r1, Operand(r1, LSL, 1)); - __ add(r0, r0, Operand(r1, LSL, 2)); + __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); // Check if cache matches: Double value is stored in uint32_t[2] array. - __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit()); + __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); __ cmp(r2, r4); - __ b(ne, &runtime_call); + __ b(ne, &calculate); __ cmp(r3, r5); - __ b(ne, &runtime_call); - // Cache hit. Load result, pop argument and return. - __ mov(r0, Operand(r6)); - __ pop(); + __ b(ne, &calculate); + // Cache hit. Load result, cleanup and return. + if (tagged) { + // Pop input value from stack and load result into r0. + __ pop(); + __ mov(r0, Operand(r6)); + } else { + // Load result into d2. + __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); + } + __ Ret(); + } // if (CpuFeatures::IsSupported(VFP3)) + + __ bind(&calculate); + if (tagged) { + __ bind(&invalid_cache); + __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); + } else { + if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE(); + CpuFeatures::Scope scope(VFP3); + + Label no_update; + Label skip_cache; + const Register heap_number_map = r5; + + // Call C function to calculate the result and update the cache. + // Register r0 holds precalculated cache entry address; preserve + // it on the stack and pop it into register cache_entry after the + // call. + __ push(cache_entry); + GenerateCallCFunction(masm, scratch0); + __ GetCFunctionDoubleResult(d2); + + // Try to update the cache. If we cannot allocate a + // heap number, we return the result without updating. + __ pop(cache_entry); + __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); + __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); + __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit()); + __ Ret(); + + __ bind(&invalid_cache); + // The cache is invalid. Call runtime which will recreate the + // cache. + __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); + __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ EnterInternalFrame(); + __ push(r0); + __ CallRuntime(RuntimeFunction(), 1); + __ LeaveInternalFrame(); + __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ Ret(); + + __ bind(&skip_cache); + // Call C function to calculate the result and answer directly + // without updating the cache. + GenerateCallCFunction(masm, scratch0); + __ GetCFunctionDoubleResult(d2); + __ bind(&no_update); + + // We return the value in d2 without adding it to the cache, but + // we cause a scavenging GC so that future allocations will succeed. + __ EnterInternalFrame(); + + // Allocate an aligned object larger than a HeapNumber. + ASSERT(4 * kPointerSize >= HeapNumber::kSize); + __ mov(scratch0, Operand(4 * kPointerSize)); + __ push(scratch0); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + __ LeaveInternalFrame(); __ Ret(); } +} - __ bind(&runtime_call); - __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); + +void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, + Register scratch) { + __ push(lr); + __ PrepareCallCFunction(2, scratch); + __ vmov(r0, r1, d2); + switch (type_) { + case TranscendentalCache::SIN: + __ CallCFunction(ExternalReference::math_sin_double_function(), 2); + break; + case TranscendentalCache::COS: + __ CallCFunction(ExternalReference::math_cos_double_function(), 2); + break; + case TranscendentalCache::LOG: + __ CallCFunction(ExternalReference::math_log_double_function(), 2); + break; + default: + UNIMPLEMENTED(); + break; + } + __ pop(lr); } @@ -3299,105 +4057,13 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // r0 holds the exception. - - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop the sp to the top of the handler. - __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); - __ ldr(sp, MemOperand(r3)); - - // Restore the next handler and frame pointer, discard handler state. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(r2); - __ str(r2, MemOperand(r3)); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); - __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. - - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of a - // JS entry frame. - __ cmp(fp, Operand(0, RelocInfo::NONE)); - // Set cp to NULL if fp is NULL. - __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); - // Restore cp otherwise. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ pop(pc); + __ Throw(r0); } void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, UncatchableExceptionType type) { - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop sp to the top stack handler. - __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); - __ ldr(sp, MemOperand(r3)); - - // Unwind the handlers until the ENTRY handler is found. - Label loop, done; - __ bind(&loop); - // Load the type of the current stack handler. - const int kStateOffset = StackHandlerConstants::kStateOffset; - __ ldr(r2, MemOperand(sp, kStateOffset)); - __ cmp(r2, Operand(StackHandler::ENTRY)); - __ b(eq, &done); - // Fetch the next handler in the list. - const int kNextOffset = StackHandlerConstants::kNextOffset; - __ ldr(sp, MemOperand(sp, kNextOffset)); - __ jmp(&loop); - __ bind(&done); - - // Set the top handler address to next handler past the current ENTRY handler. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(r2); - __ str(r2, MemOperand(r3)); - - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Top::k_external_caught_exception_address); - __ mov(r0, Operand(false, RelocInfo::NONE)); - __ mov(r2, Operand(external_caught)); - __ str(r0, MemOperand(r2)); - - // Set pending exception and r0 to out of memory exception. - Failure* out_of_memory = Failure::OutOfMemoryException(); - __ mov(r0, Operand(reinterpret_cast(out_of_memory))); - __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); - __ str(r0, MemOperand(r2)); - } - - // Stack layout at this point. See also StackHandlerConstants. - // sp -> state (ENTRY) - // fp - // lr - - // Discard handler state (r2 is not used) and restore frame pointer. - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); - __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of a - // JS entry frame. - __ cmp(fp, Operand(0, RelocInfo::NONE)); - // Set cp to NULL if fp is NULL. - __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); - // Restore cp otherwise. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ pop(pc); + __ ThrowUncatchable(type, r0); } @@ -3484,7 +4150,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // r0:r1: result // sp: stack pointer // fp: frame pointer - __ LeaveExitFrame(save_doubles_); + // Callee-saved register r4 still holds argc. + __ LeaveExitFrame(save_doubles_, r4); + __ mov(pc, lr); // check if we should retry or throw exception Label retry; @@ -3796,7 +4464,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // The offset was stored in r4 safepoint slot. // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) - __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4)); + __ LoadFromSafepointRegisterSlot(scratch, r4); __ sub(inline_site, lr, scratch); // Get the map location in scratch and patch it. __ GetRelocatedValueLocation(inline_site, scratch); @@ -4263,24 +4931,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); static const int kRegExpExecuteArguments = 7; - __ push(lr); - __ PrepareCallCFunction(kRegExpExecuteArguments, r0); + static const int kParameterRegisters = 4; + __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); + + // Stack pointer now points to cell where return address is to be written. + // Arguments are before that on the stack or in registers. - // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. + // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. __ mov(r0, Operand(1)); - __ str(r0, MemOperand(sp, 2 * kPointerSize)); + __ str(r0, MemOperand(sp, 3 * kPointerSize)); - // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. + // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area. __ mov(r0, Operand(address_of_regexp_stack_memory_address)); __ ldr(r0, MemOperand(r0, 0)); __ mov(r2, Operand(address_of_regexp_stack_memory_size)); __ ldr(r2, MemOperand(r2, 0)); __ add(r0, r0, Operand(r2)); - __ str(r0, MemOperand(sp, 1 * kPointerSize)); + __ str(r0, MemOperand(sp, 2 * kPointerSize)); - // Argument 5 (sp[0]): static offsets vector buffer. + // Argument 5 (sp[4]): static offsets vector buffer. __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); - __ str(r0, MemOperand(sp, 0 * kPointerSize)); + __ str(r0, MemOperand(sp, 1 * kPointerSize)); // For arguments 4 and 3 get string length, calculate start of string data and // calculate the shift of the index (0 for ASCII and 1 for two byte). @@ -4302,8 +4973,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Locate the code entry and call it. __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(r7, kRegExpExecuteArguments); - __ pop(lr); + DirectCEntryStub stub; + stub.GenerateCall(masm, r7); + + __ LeaveExitFrame(false, no_reg); // r0: result // subject: subject string (callee saved) @@ -4312,6 +4985,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check the result. Label success; + __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); __ b(eq, &success); Label failure; @@ -4324,12 +4998,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r0, Operand(ExternalReference::the_hole_value_location())); - __ ldr(r0, MemOperand(r0, 0)); - __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address))); + __ mov(r1, Operand(ExternalReference::the_hole_value_location())); __ ldr(r1, MemOperand(r1, 0)); + __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); + __ ldr(r0, MemOperand(r2, 0)); __ cmp(r0, r1); __ b(eq, &runtime); + + __ str(r1, MemOperand(r2, 0)); // Clear pending exception. + + // Check if the exception is a termination. If so, throw as uncatchable. + __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex); + __ cmp(r0, ip); + Label termination_exception; + __ b(eq, &termination_exception); + + __ Throw(r0); // Expects thrown value in r0. + + __ bind(&termination_exception); + __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0. + __ bind(&failure); // For failure and exception return null. __ mov(r0, Operand(Factory::null_value())); @@ -5508,18 +6196,19 @@ void StringCompareStub::Generate(MacroAssembler* masm) { void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; + Label string_add_runtime, call_builtin; + Builtins::JavaScript builtin_id = Builtins::ADD; + // Stack on entry: - // sp[0]: second argument. - // sp[4]: first argument. + // sp[0]: second argument (right). + // sp[4]: first argument (left). // Load the two arguments. __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. // Make sure that both arguments are strings if not known in advance. - if (string_check_) { - STATIC_ASSERT(kSmiTag == 0); + if (flags_ == NO_STRING_ADD_FLAGS) { __ JumpIfEitherSmi(r0, r1, &string_add_runtime); // Load instance types. __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); @@ -5531,13 +6220,27 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ tst(r4, Operand(kIsNotStringMask)); __ tst(r5, Operand(kIsNotStringMask), eq); __ b(ne, &string_add_runtime); + } else { + // Here at least one of the arguments is definitely a string. + // We convert the one that is not known to be a string. + if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); + GenerateConvertArgument( + masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin); + builtin_id = Builtins::STRING_ADD_RIGHT; + } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); + GenerateConvertArgument( + masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin); + builtin_id = Builtins::STRING_ADD_LEFT; + } } // Both arguments are strings. // r0: first string // r1: second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) { Label strings_not_empty; // Check if either of the strings are empty. In that case return the other. @@ -5565,8 +6268,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r1: second string // r2: length of first string // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) // Look at the length of the result of adding the two strings. Label string_add_flat_result, longer_than_two; // Adding two lengths can't overflow. @@ -5578,7 +6281,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(ne, &longer_than_two); // Check that both strings are non-external ascii strings. - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5626,7 +6329,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // If result is not supposed to be flat, allocate a cons string object. // If both strings are ascii the result is an ascii cons string. - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5674,11 +6377,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r1: second string // r2: length of first string // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) // r6: sum of lengths. __ bind(&string_add_flat_result); - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5776,6 +6479,60 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&string_add_runtime); __ TailCallRuntime(Runtime::kStringAdd, 2, 1); + + if (call_builtin.is_linked()) { + __ bind(&call_builtin); + __ InvokeBuiltin(builtin_id, JUMP_JS); + } +} + + +void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* slow) { + // First check if the argument is already a string. + Label not_string, done; + __ JumpIfSmi(arg, ¬_string); + __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE); + __ b(lt, &done); + + // Check the number to string cache. + Label not_cached; + __ bind(¬_string); + // Puts the cached result into scratch1. + NumberToStringStub::GenerateLookupNumberStringCache(masm, + arg, + scratch1, + scratch2, + scratch3, + scratch4, + false, + ¬_cached); + __ mov(arg, scratch1); + __ str(arg, MemOperand(sp, stack_offset)); + __ jmp(&done); + + // Check if the argument is a safe string wrapper. + __ bind(¬_cached); + __ JumpIfSmi(arg, slow); + __ CompareObjectType( + arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1. + __ b(ne, slow); + __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); + __ and_(scratch2, + scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ cmp(scratch2, + Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ b(ne, slow); + __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); + __ str(arg, MemOperand(sp, stack_offset)); + + __ bind(&done); } @@ -5950,17 +6707,26 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) { void DirectCEntryStub::GenerateCall(MacroAssembler* masm, - ApiFunction *function) { + ExternalReference function) { __ mov(lr, Operand(reinterpret_cast(GetCode().location()), RelocInfo::CODE_TARGET)); + __ mov(r2, Operand(function)); // Push return address (accessible to GC through exit frame pc). - __ mov(r2, - Operand(ExternalReference(function, ExternalReference::DIRECT_CALL))); __ str(pc, MemOperand(sp, 0)); __ Jump(r2); // Call the api function. } +void DirectCEntryStub::GenerateCall(MacroAssembler* masm, + Register target) { + __ mov(lr, Operand(reinterpret_cast(GetCode().location()), + RelocInfo::CODE_TARGET)); + // Push return address (accessible to GC through exit frame pc). + __ str(pc, MemOperand(sp, 0)); + __ Jump(target); // Call the C++ function. +} + + void GenerateFastPixelArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -6028,6 +6794,91 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, } +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register elements_map, + Register scratch1, + Register scratch2, + bool load_elements_from_receiver, + bool load_elements_map_from_elements, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range) { + // Register use: + // receiver - holds the receiver and is unchanged unless the + // store succeeds. + // key - holds the key (must be a smi) and is unchanged. + // value - holds the value (must be a smi) and is unchanged. + // elements - holds the element object of the receiver on entry if + // load_elements_from_receiver is false, otherwise used + // internally to store the pixel arrays elements and + // external array pointer. + // elements_map - holds the map of the element object if + // load_elements_map_from_elements is false, otherwise + // loaded with the element map. + // + Register external_pointer = elements; + Register untagged_key = scratch1; + Register untagged_value = scratch2; + + if (load_elements_from_receiver) { + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + } + + // By passing NULL as not_pixel_array, callers signal that they have already + // verified that the receiver has pixel array elements. + if (not_pixel_array != NULL) { + if (load_elements_map_from_elements) { + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + } + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(elements_map, ip); + __ b(ne, not_pixel_array); + } else { + if (FLAG_debug_code) { + // Map check should have already made sure that elements is a pixel array. + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(elements_map, ip); + __ Assert(eq, "Elements isn't a pixel array"); + } + } + + // Some callers already have verified that the key is a smi. key_not_smi is + // set to NULL as a sentinel for that case. Otherwise, add an explicit check + // to ensure the key is a smi must be added. + if (key_not_smi != NULL) { + __ JumpIfNotSmi(key, key_not_smi); + } else { + if (FLAG_debug_code) { + __ AbortIfNotSmi(key); + } + } + + __ SmiUntag(untagged_key, key); + + // Perform bounds check. + __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset)); + __ cmp(untagged_key, scratch2); + __ b(hs, out_of_range); // unsigned check handles negative keys. + + __ JumpIfNotSmi(value, value_not_smi); + __ SmiUntag(untagged_value, value); + + // Clamp the value to [0..255]. + __ Usat(untagged_value, 8, Operand(untagged_value)); + // Get the pointer to the external array. This clobbers elements. + __ ldr(external_pointer, + FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); + __ strb(untagged_value, MemOperand(external_pointer, untagged_key)); + __ Ret(); +} + + #undef __ } } // namespace v8::internal diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h index bf7d6354..e3ef3391 100644 --- a/src/arm/code-stubs-arm.h +++ b/src/arm/code-stubs-arm.h @@ -38,13 +38,22 @@ namespace internal { // TranscendentalCache runtime function. class TranscendentalCacheStub: public CodeStub { public: - explicit TranscendentalCacheStub(TranscendentalCache::Type type) - : type_(type) {} + enum ArgumentType { + TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, + UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits + }; + + TranscendentalCacheStub(TranscendentalCache::Type type, + ArgumentType argument_type) + : type_(type), argument_type_(argument_type) { } void Generate(MacroAssembler* masm); private: TranscendentalCache::Type type_; + ArgumentType argument_type_; + void GenerateCallCFunction(MacroAssembler* masm, Register scratch); + Major MajorKey() { return TranscendentalCache; } - int MinorKey() { return type_; } + int MinorKey() { return type_ | argument_type_; } Runtime::FunctionId RuntimeFunction(); }; @@ -335,24 +344,36 @@ class TypeRecordingBinaryOpStub: public CodeStub { // Flag that indicates how to generate code for the stub StringAddStub. enum StringAddFlags { NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. + // Omit left string check in stub (left is definitely a string). + NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0, + // Omit right string check in stub (right is definitely a string). + NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1, + // Omit both string checks in stub. + NO_STRING_CHECK_IN_STUB = + NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB }; class StringAddStub: public CodeStub { public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } + explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} private: Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } + int MinorKey() { return flags_; } void Generate(MacroAssembler* masm); - // Should the stub check whether arguments are strings? - bool string_check_; + void GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* slow); + + const StringAddFlags flags_; }; @@ -580,7 +601,8 @@ class DirectCEntryStub: public CodeStub { public: DirectCEntryStub() {} void Generate(MacroAssembler* masm); - void GenerateCall(MacroAssembler* masm, ApiFunction *function); + void GenerateCall(MacroAssembler* masm, ExternalReference function); + void GenerateCall(MacroAssembler* masm, Register target); private: Major MajorKey() { return DirectCEntry; } @@ -589,14 +611,14 @@ class DirectCEntryStub: public CodeStub { }; -// Generate code the to load an element from a pixel array. The receiver is -// assumed to not be a smi and to have elements, the caller must guarantee this -// precondition. If the receiver does not have elements that are pixel arrays, -// the generated code jumps to not_pixel_array. If key is not a smi, then the -// generated code branches to key_not_smi. Callers can specify NULL for -// key_not_smi to signal that a smi check has already been performed on key so -// that the smi check is not generated . If key is not a valid index within the -// bounds of the pixel array, the generated code jumps to out_of_range. +// Generate code to load an element from a pixel array. The receiver is assumed +// to not be a smi and to have elements, the caller must guarantee this +// precondition. If key is not a smi, then the generated code branches to +// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi +// check has already been performed on key so that the smi check is not +// generated. If key is not a valid index within the bounds of the pixel array, +// the generated code jumps to out_of_range. receiver, key and elements are +// unchanged throughout the generated code sequence. void GenerateFastPixelArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -609,6 +631,35 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, Label* key_not_smi, Label* out_of_range); +// Generate code to store an element into a pixel array, clamping values between +// [0..255]. The receiver is assumed to not be a smi and to have elements, the +// caller must guarantee this precondition. If key is not a smi, then the +// generated code branches to key_not_smi. Callers can specify NULL for +// key_not_smi to signal that a smi check has already been performed on key so +// that the smi check is not generated. If value is not a smi, the generated +// code will branch to value_not_smi. If the receiver doesn't have pixel array +// elements, the generated code will branch to not_pixel_array, unless +// not_pixel_array is NULL, in which case the caller must ensure that the +// receiver has pixel array elements. If key is not a valid index within the +// bounds of the pixel array, the generated code jumps to out_of_range. If +// load_elements_from_receiver is true, then the elements of receiver is loaded +// into elements, otherwise elements is assumed to already be the receiver's +// elements. If load_elements_map_from_elements is true, elements_map is loaded +// from elements, otherwise it is assumed to already contain the element map. +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register elements_map, + Register scratch1, + Register scratch2, + bool load_elements_from_receiver, + bool load_elements_map_from_elements, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range); } } // namespace v8::internal diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index c8271107..d32b0091 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -1938,8 +1938,9 @@ void CodeGenerator::DeclareGlobals(Handle pairs) { frame_->EmitPush(cp); frame_->EmitPush(Operand(pairs)); frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0))); + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); - frame_->CallRuntime(Runtime::kDeclareGlobals, 3); + frame_->CallRuntime(Runtime::kDeclareGlobals, 4); // The result is discarded. } @@ -3287,7 +3288,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { // context slot followed by initialization. frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); } else { - frame_->CallRuntime(Runtime::kStoreContextSlot, 3); + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + frame_->CallRuntime(Runtime::kStoreContextSlot, 4); } // Storing a variable must keep the (new) value on the expression // stack. This is necessary for compiling assignment expressions. @@ -3637,7 +3639,8 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { Load(key); Load(value); if (property->emit_store()) { - frame_->CallRuntime(Runtime::kSetProperty, 3); + frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes + frame_->CallRuntime(Runtime::kSetProperty, 4); } else { frame_->Drop(3); } @@ -5170,11 +5173,11 @@ class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode { // Set the bit in the map to indicate that it has been checked safe for // default valueOf and set true result. - __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); + __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); __ orr(scratch1_, scratch1_, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); - __ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); + __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); __ mov(map_result_, Operand(1)); __ jmp(exit_label()); __ bind(&false_result); @@ -5656,7 +5659,8 @@ void CodeGenerator::GenerateMathSin(ZoneList* args) { ASSERT_EQ(args->length(), 1); Load(args->at(0)); if (CpuFeatures::IsSupported(VFP3)) { - TranscendentalCacheStub stub(TranscendentalCache::SIN); + TranscendentalCacheStub stub(TranscendentalCache::SIN, + TranscendentalCacheStub::TAGGED); frame_->SpillAllButCopyTOSToR0(); frame_->CallStub(&stub, 1); } else { @@ -5670,7 +5674,8 @@ void CodeGenerator::GenerateMathCos(ZoneList* args) { ASSERT_EQ(args->length(), 1); Load(args->at(0)); if (CpuFeatures::IsSupported(VFP3)) { - TranscendentalCacheStub stub(TranscendentalCache::COS); + TranscendentalCacheStub stub(TranscendentalCache::COS, + TranscendentalCacheStub::TAGGED); frame_->SpillAllButCopyTOSToR0(); frame_->CallStub(&stub, 1); } else { @@ -5684,7 +5689,8 @@ void CodeGenerator::GenerateMathLog(ZoneList* args) { ASSERT_EQ(args->length(), 1); Load(args->at(0)); if (CpuFeatures::IsSupported(VFP3)) { - TranscendentalCacheStub stub(TranscendentalCache::LOG); + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::TAGGED); frame_->SpillAllButCopyTOSToR0(); frame_->CallStub(&stub, 1); } else { @@ -5844,15 +5850,20 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { if (property != NULL) { Load(property->obj()); Load(property->key()); - frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2); + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3); frame_->EmitPush(r0); } else if (variable != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this()); Slot* slot = variable->AsSlot(); if (variable->is_global()) { LoadGlobal(); frame_->EmitPush(Operand(variable->name())); - frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2); + frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode))); + frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3); frame_->EmitPush(r0); } else if (slot != NULL && slot->type() == Slot::LOOKUP) { @@ -6669,8 +6680,12 @@ class DeferredReferenceSetKeyedValue: public DeferredCode { public: DeferredReferenceSetKeyedValue(Register value, Register key, - Register receiver) - : value_(value), key_(key), receiver_(receiver) { + Register receiver, + StrictModeFlag strict_mode) + : value_(value), + key_(key), + receiver_(receiver), + strict_mode_(strict_mode) { set_comment("[ DeferredReferenceSetKeyedValue"); } @@ -6680,6 +6695,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode { Register value_; Register key_; Register receiver_; + StrictModeFlag strict_mode_; }; @@ -6701,7 +6717,9 @@ void DeferredReferenceSetKeyedValue::Generate() { { Assembler::BlockConstPoolScope block_const_pool(masm_); // Call keyed store IC. It has the arguments value, key and receiver in r0, // r1 and r2. - Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle ic(Builtins::builtin( + (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // The call must be followed by a nop instruction to indicate that the // keyed store has been inlined. @@ -6719,8 +6737,12 @@ class DeferredReferenceSetNamedValue: public DeferredCode { public: DeferredReferenceSetNamedValue(Register value, Register receiver, - Handle name) - : value_(value), receiver_(receiver), name_(name) { + Handle name, + StrictModeFlag strict_mode) + : value_(value), + receiver_(receiver), + name_(name), + strict_mode_(strict_mode) { set_comment("[ DeferredReferenceSetNamedValue"); } @@ -6730,6 +6752,7 @@ class DeferredReferenceSetNamedValue: public DeferredCode { Register value_; Register receiver_; Handle name_; + StrictModeFlag strict_mode_; }; @@ -6749,7 +6772,9 @@ void DeferredReferenceSetNamedValue::Generate() { { Assembler::BlockConstPoolScope block_const_pool(masm_); // Call keyed store IC. It has the arguments value, key and receiver in r0, // r1 and r2. - Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle ic(Builtins::builtin( + (strict_mode_ == kStrictMode) ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // The call must be followed by a nop instruction to indicate that the // named store has been inlined. @@ -6938,7 +6963,8 @@ void CodeGenerator::EmitNamedStore(Handle name, bool is_contextual) { Register receiver = r1; DeferredReferenceSetNamedValue* deferred = - new DeferredReferenceSetNamedValue(value, receiver, name); + new DeferredReferenceSetNamedValue( + value, receiver, name, strict_mode_flag()); // Check that the receiver is a heap object. __ tst(receiver, Operand(kSmiTagMask)); @@ -7124,7 +7150,8 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type, // The deferred code expects value, key and receiver in registers. DeferredReferenceSetKeyedValue* deferred = - new DeferredReferenceSetKeyedValue(value, key, receiver); + new DeferredReferenceSetKeyedValue( + value, key, receiver, strict_mode_flag()); // Check that the value is a smi. As this inlined code does not set the // write barrier it is only possible to store smi values. @@ -7209,7 +7236,7 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type, deferred->BindExit(); } else { - frame()->CallKeyedStoreIC(); + frame()->CallKeyedStoreIC(strict_mode_flag()); } } diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h index 7ac38ed3..e6033a89 100644 --- a/src/arm/constants-arm.h +++ b/src/arm/constants-arm.h @@ -385,7 +385,10 @@ enum VFPConversionMode { kDefaultRoundToZero = 1 }; +// This mask does not include the "inexact" or "input denormal" cumulative +// exceptions flags, because we usually don't want to check for it. static const uint32_t kVFPExceptionMask = 0xf; +static const uint32_t kVFPInexactExceptionBit = 1 << 4; static const uint32_t kVFPFlushToZeroMask = 1 << 24; static const uint32_t kVFPInvalidExceptionBit = 1; @@ -411,6 +414,11 @@ enum VFPRoundingMode { static const uint32_t kVFPRoundingModeMask = 3 << 22; +enum CheckForInexactConversion { + kCheckForInexactConversion, + kDontCheckForInexactConversion +}; + // ----------------------------------------------------------------------------- // Hints. diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc index 507954d9..51c84b33 100644 --- a/src/arm/cpu-arm.cc +++ b/src/arm/cpu-arm.cc @@ -50,6 +50,11 @@ void CPU::Setup() { void CPU::FlushICache(void* start, size_t size) { + // Nothing to do flushing no instructions. + if (size == 0) { + return; + } + #if defined (USE_SIMULATOR) // Not generating ARM instructions for C-code. This means that we are // building an ARM emulator based target. We should notify the simulator diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc index caec55af..9a5aa902 100644 --- a/src/arm/deoptimizer-arm.cc +++ b/src/arm/deoptimizer-arm.cc @@ -124,14 +124,62 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { - UNIMPLEMENTED(); + const int kInstrSize = Assembler::kInstrSize; + // The call of the stack guard check has the following form: + // e1 5d 00 0c cmp sp, + // 2a 00 00 01 bcs ok + // e5 9f c? ?? ldr ip, [pc, ] + // e1 2f ff 3c blx ip + ASSERT(Memory::int32_at(pc_after - kInstrSize) == + (al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code())); + ASSERT(Assembler::IsLdrPcImmediateOffset( + Assembler::instr_at(pc_after - 2 * kInstrSize))); + + // We patch the code to the following form: + // e1 5d 00 0c cmp sp, + // e1 a0 00 00 mov r0, r0 (NOP) + // e5 9f c? ?? ldr ip, [pc, ] + // e1 2f ff 3c blx ip + // and overwrite the constant containing the + // address of the stack check stub. + + // Replace conditional jump with NOP. + CodePatcher patcher(pc_after - 3 * kInstrSize, 1); + patcher.masm()->nop(); + + // Replace the stack check address in the constant pool + // with the entry address of the replacement code. + uint32_t stack_check_address_offset = Memory::uint16_at(pc_after - + 2 * kInstrSize) & 0xfff; + Address stack_check_address_pointer = pc_after + stack_check_address_offset; + ASSERT(Memory::uint32_at(stack_check_address_pointer) == + reinterpret_cast(check_code->entry())); + Memory::uint32_at(stack_check_address_pointer) = + reinterpret_cast(replacement_code->entry()); } void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { - UNIMPLEMENTED(); + const int kInstrSize = Assembler::kInstrSize; + ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c); + ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5); + ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f); + + // Replace NOP with conditional jump. + CodePatcher patcher(pc_after - 3 * kInstrSize, 1); + patcher.masm()->b(+4, cs); + + // Replace the stack check address in the constant pool + // with the entry address of the replacement code. + uint32_t stack_check_address_offset = Memory::uint16_at(pc_after - + 2 * kInstrSize) & 0xfff; + Address stack_check_address_pointer = pc_after + stack_check_address_offset; + ASSERT(Memory::uint32_at(stack_check_address_pointer) == + reinterpret_cast(replacement_code->entry())); + Memory::uint32_at(stack_check_address_pointer) = + reinterpret_cast(check_code->entry()); } @@ -381,14 +429,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, fp_value, output_offset, value); } - // The context can be gotten from the function so long as we don't - // optimize functions that need local contexts. + // For the bottommost output frame the context can be gotten from the input + // frame. For all subsequent output frames it can be gotten from the function + // so long as we don't inline functions that need local contexts. output_offset -= kPointerSize; input_offset -= kPointerSize; - value = reinterpret_cast(function->context()); - // The context for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); + if (is_bottommost) { + value = input_->GetFrameSlot(input_offset); + } else { + value = reinterpret_cast(function->context()); + } output_frame->SetFrameSlot(output_offset, value); if (is_topmost) { output_frame->SetRegister(cp.code(), value); diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index 2685fcb7..5f5de3a9 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -219,46 +219,47 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { Move(dot_arguments_slot, r3, r1, r2); } - { Comment cmnt(masm_, "[ Declarations"); - // For named function expressions, declare the function name as a - // constant. - if (scope()->is_function_scope() && scope()->function() != NULL) { - EmitDeclaration(scope()->function(), Variable::CONST, NULL); - } - // Visit all the explicit declarations unless there is an illegal - // redeclaration. - if (scope()->HasIllegalRedeclaration()) { - scope()->VisitIllegalRedeclaration(this); - } else { - VisitDeclarations(scope()->declarations()); - } - } - if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); } - // Check the stack for overflow or break request. - { Comment cmnt(masm_, "[ Stack check"); - PrepareForBailout(info->function(), NO_REGISTERS); - Label ok; - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &ok); - StackCheckStub stub; - __ CallStub(&stub); - __ bind(&ok); - } + // Visit the declarations and body unless there is an illegal + // redeclaration. + if (scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ Declarations"); + scope()->VisitIllegalRedeclaration(this); - { Comment cmnt(masm_, "[ Body"); - ASSERT(loop_depth() == 0); - VisitStatements(function()->body()); - ASSERT(loop_depth() == 0); + } else { + { Comment cmnt(masm_, "[ Declarations"); + // For named function expressions, declare the function name as a + // constant. + if (scope()->is_function_scope() && scope()->function() != NULL) { + EmitDeclaration(scope()->function(), Variable::CONST, NULL); + } + VisitDeclarations(scope()->declarations()); + } + + { Comment cmnt(masm_, "[ Stack check"); + PrepareForBailout(info->function(), NO_REGISTERS); + Label ok; + __ LoadRoot(ip, Heap::kStackLimitRootIndex); + __ cmp(sp, Operand(ip)); + __ b(hs, &ok); + StackCheckStub stub; + __ CallStub(&stub); + __ bind(&ok); + } + + { Comment cmnt(masm_, "[ Body"); + ASSERT(loop_depth() == 0); + VisitStatements(function()->body()); + ASSERT(loop_depth() == 0); + } } + // Always emit a 'return undefined' in case control fell off the end of + // the body. { Comment cmnt(masm_, "[ return ;"); - // Emit a 'return undefined' in case control fell off the end of the - // body. __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); } EmitReturnSequence(); @@ -338,13 +339,6 @@ void FullCodeGenerator::EmitReturnSequence() { } -FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand( - Token::Value op, Expression* left, Expression* right) { - ASSERT(ShouldInlineSmiCase(op)); - return kNoConstants; -} - - void FullCodeGenerator::EffectContext::Plug(Slot* slot) const { } @@ -563,13 +557,38 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const { void FullCodeGenerator::DoTest(Label* if_true, Label* if_false, Label* fall_through) { - // Call the runtime to find the boolean value of the source and then - // translate it into control flow to the pair of labels. - __ push(result_register()); - __ CallRuntime(Runtime::kToBool, 1); - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(r0, ip); - Split(eq, if_true, if_false, fall_through); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Emit the inlined tests assumed by the stub. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(result_register(), ip); + __ b(eq, if_false); + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(result_register(), ip); + __ b(eq, if_true); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(result_register(), ip); + __ b(eq, if_false); + STATIC_ASSERT(kSmiTag == 0); + __ tst(result_register(), result_register()); + __ b(eq, if_false); + __ JumpIfSmi(result_register(), if_true); + + // Call the ToBoolean stub for all other cases. + ToBooleanStub stub(result_register()); + __ CallStub(&stub); + __ tst(result_register(), result_register()); + } else { + // Call the runtime to find the boolean value of the source and then + // translate it into control flow to the pair of labels. + __ push(result_register()); + __ CallRuntime(Runtime::kToBool, 1); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(r0, ip); + } + + // The stub returns nonzero for true. + Split(ne, if_true, if_false, fall_through); } @@ -684,10 +703,11 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, // We bypass the general EmitSlotSearch because we know more about // this specific context. - // The variable in the decl always resides in the current context. + // The variable in the decl always resides in the current function + // context. ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); if (FLAG_debug_code) { - // Check if we have the correct context pointer. + // Check that we're not inside a 'with'. __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); __ cmp(r1, cp); __ Check(eq, "Unexpected declaration in current context."); @@ -756,7 +776,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, prop->key()->AsLiteral()->handle()->IsSmi()); __ mov(r1, Operand(prop->key()->AsLiteral()->handle())); - Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle ic(Builtins::builtin(is_strict() + ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); // Value in r0 is ignored (declarations are statements). } @@ -772,10 +794,11 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) { void FullCodeGenerator::DeclareGlobals(Handle pairs) { // Call the runtime to declare the globals. // The context is the first argument. - __ mov(r1, Operand(pairs)); - __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0))); - __ Push(cp, r1, r0); - __ CallRuntime(Runtime::kDeclareGlobals, 3); + __ mov(r2, Operand(pairs)); + __ mov(r1, Operand(Smi::FromInt(is_eval() ? 1 : 0))); + __ mov(r0, Operand(Smi::FromInt(strict_mode_flag()))); + __ Push(cp, r2, r1, r0); + __ CallRuntime(Runtime::kDeclareGlobals, 4); // Return value is ignored. } @@ -784,9 +807,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Comment cmnt(masm_, "[ SwitchStatement"); Breakable nested_statement(this, stmt); SetStatementPosition(stmt); + // Keep the switch value on the stack until a case matches. VisitForStackValue(stmt->tag()); - PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); ZoneList* clauses = stmt->cases(); @@ -875,8 +898,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(r0, ip); __ b(eq, &exit); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(r0, ip); + Register null_value = r5; + __ LoadRoot(null_value, Heap::kNullValueRootIndex); + __ cmp(r0, null_value); __ b(eq, &exit); // Convert the object to a JS object. @@ -890,12 +914,62 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ bind(&done_convert); __ push(r0); - // BUG(867): Check cache validity in generated code. This is a fast - // case for the JSObject::IsSimpleEnum cache validity checks. If we - // cannot guarantee cache validity, call the runtime system to check - // cache validity or get the property names in a fixed array. + // Check cache validity in generated code. This is a fast case for + // the JSObject::IsSimpleEnum cache validity checks. If we cannot + // guarantee cache validity, call the runtime system to check cache + // validity or get the property names in a fixed array. + Label next, call_runtime; + // Preload a couple of values used in the loop. + Register empty_fixed_array_value = r6; + __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); + Register empty_descriptor_array_value = r7; + __ LoadRoot(empty_descriptor_array_value, + Heap::kEmptyDescriptorArrayRootIndex); + __ mov(r1, r0); + __ bind(&next); + + // Check that there are no elements. Register r1 contains the + // current JS object we've reached through the prototype chain. + __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ cmp(r2, empty_fixed_array_value); + __ b(ne, &call_runtime); + + // Check that instance descriptors are not empty so that we can + // check for an enum cache. Leave the map in r2 for the subsequent + // prototype load. + __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOffset)); + __ cmp(r3, empty_descriptor_array_value); + __ b(eq, &call_runtime); + + // Check that there is an enum cache in the non-empty instance + // descriptors (r3). This is the case if the next enumeration + // index field does not contain a smi. + __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset)); + __ JumpIfSmi(r3, &call_runtime); + + // For all objects but the receiver, check that the cache is empty. + Label check_prototype; + __ cmp(r1, r0); + __ b(eq, &check_prototype); + __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset)); + __ cmp(r3, empty_fixed_array_value); + __ b(ne, &call_runtime); + + // Load the prototype from the map and loop if non-null. + __ bind(&check_prototype); + __ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset)); + __ cmp(r1, null_value); + __ b(ne, &next); + + // The enum cache is valid. Load the map of the object being + // iterated over and use the cache for the iteration. + Label use_cache; + __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ b(&use_cache); // Get the set of properties to enumerate. + __ bind(&call_runtime); __ push(r0); // Duplicate the enumerable object on the stack. __ CallRuntime(Runtime::kGetPropertyNamesFast, 1); @@ -910,6 +984,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ b(ne, &fixed_array); // We got a map in register r0. Get the enumeration cache from it. + __ bind(&use_cache); __ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset)); __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset)); __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset)); @@ -998,8 +1073,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { void FullCodeGenerator::EmitNewClosure(Handle info, bool pretenure) { // Use the fast case closure allocation code that allocates in new - // space for nested functions that don't need literals cloning. - if (scope()->is_function_scope() && + // space for nested functions that don't need literals cloning. If + // we're running with the --always-opt or the --prepare-always-opt + // flag, we need to use the runtime function so that the new function + // we are creating here gets a chance to have its code optimized and + // doesn't just get a copy of the existing unoptimized code. + if (!FLAG_always_opt && + !FLAG_prepare_always_opt && + scope()->is_function_scope() && info->num_literals() == 0 && !pretenure) { FastNewClosureStub stub; @@ -1027,7 +1108,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( Slot* slot, Label* slow) { ASSERT(slot->type() == Slot::CONTEXT); - Register current = cp; + Register context = cp; Register next = r3; Register temp = r4; @@ -1035,22 +1116,25 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( if (s->num_heap_slots() > 0) { if (s->calls_eval()) { // Check that extension is NULL. - __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX)); __ tst(temp, temp); __ b(ne, slow); } - __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX)); + __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX)); __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset)); // Walk the rest of the chain without clobbering cp. - current = next; + context = next; } } // Check that last extension is NULL. - __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX)); __ tst(temp, temp); __ b(ne, slow); - __ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX)); - return ContextOperand(temp, slot->index()); + + // This function is used only for loads, not stores, so it's safe to + // return an cp-based operand (the write barrier cannot be allowed to + // destroy the cp register). + return ContextOperand(context, slot->index()); } @@ -1250,18 +1334,19 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { Comment cmnt(masm_, "[ RegExpLiteral"); Label materialized; // Registers will be used as follows: + // r5 = materialized value (RegExp literal) // r4 = JS function, literals array // r3 = literal index // r2 = RegExp pattern // r1 = RegExp flags - // r0 = temp + materialized value (RegExp literal) + // r0 = RegExp literal clone __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); int literal_offset = FixedArray::kHeaderSize + expr->literal_index() * kPointerSize; - __ ldr(r0, FieldMemOperand(r4, literal_offset)); + __ ldr(r5, FieldMemOperand(r4, literal_offset)); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r0, ip); + __ cmp(r5, ip); __ b(ne, &materialized); // Create regexp literal using runtime function. @@ -1271,20 +1356,27 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { __ mov(r1, Operand(expr->flags())); __ Push(r4, r3, r2, r1); __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); + __ mov(r5, r0); __ bind(&materialized); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; - __ push(r0); + Label allocated, runtime_allocate; + __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); + __ jmp(&allocated); + + __ bind(&runtime_allocate); + __ push(r5); __ mov(r0, Operand(Smi::FromInt(size))); __ push(r0); __ CallRuntime(Runtime::kAllocateInNewSpace, 1); + __ pop(r5); + __ bind(&allocated); // After this, registers are used as follows: // r0: Newly allocated regexp. - // r1: Materialized regexp. + // r5: Materialized regexp. // r2: temp. - __ pop(r1); - __ CopyFields(r0, r1, r2.bit(), size / kPointerSize); + __ CopyFields(r0, r5, r2.bit(), size / kPointerSize); context()->Plug(r0); } @@ -1350,7 +1442,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { VisitForStackValue(key); VisitForStackValue(value); if (property->emit_store()) { - __ CallRuntime(Runtime::kSetProperty, 3); + __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes + __ push(r0); + __ CallRuntime(Runtime::kSetProperty, 4); } else { __ Drop(3); } @@ -1528,14 +1622,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } Token::Value op = expr->binary_op(); - ConstantOperand constant = ShouldInlineSmiCase(op) - ? GetConstantOperand(op, expr->target(), expr->value()) - : kNoConstants; - ASSERT(constant == kRightConstant || constant == kNoConstants); - if (constant == kNoConstants) { - __ push(r0); // Left operand goes on the stack. - VisitForAccumulatorValue(expr->value()); - } + __ push(r0); // Left operand goes on the stack. + VisitForAccumulatorValue(expr->value()); OverwriteMode mode = expr->value()->ResultOverwriteAllowed() ? OVERWRITE_RIGHT @@ -1547,8 +1635,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { op, mode, expr->target(), - expr->value(), - constant); + expr->value()); } else { EmitBinaryOp(op, mode); } @@ -1601,11 +1688,99 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, Token::Value op, OverwriteMode mode, - Expression* left, - Expression* right, - ConstantOperand constant) { - ASSERT(constant == kNoConstants); // Only handled case. - EmitBinaryOp(op, mode); + Expression* left_expr, + Expression* right_expr) { + Label done, smi_case, stub_call; + + Register scratch1 = r2; + Register scratch2 = r3; + + // Get the arguments. + Register left = r1; + Register right = r0; + __ pop(left); + + // Perform combined smi check on both operands. + __ orr(scratch1, left, Operand(right)); + STATIC_ASSERT(kSmiTag == 0); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(scratch1, &smi_case); + + __ bind(&stub_call); + TypeRecordingBinaryOpStub stub(op, mode); + EmitCallIC(stub.GetCode(), &patch_site); + __ jmp(&done); + + __ bind(&smi_case); + // Smi case. This code works the same way as the smi-smi case in the type + // recording binary operation stub, see + // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments. + switch (op) { + case Token::SAR: + __ b(&stub_call); + __ GetLeastBitsFromSmi(scratch1, right, 5); + __ mov(right, Operand(left, ASR, scratch1)); + __ bic(right, right, Operand(kSmiTagMask)); + break; + case Token::SHL: { + __ b(&stub_call); + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ mov(scratch1, Operand(scratch1, LSL, scratch2)); + __ add(scratch2, scratch1, Operand(0x40000000), SetCC); + __ b(mi, &stub_call); + __ SmiTag(right, scratch1); + break; + } + case Token::SHR: { + __ b(&stub_call); + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ mov(scratch1, Operand(scratch1, LSR, scratch2)); + __ tst(scratch1, Operand(0xc0000000)); + __ b(ne, &stub_call); + __ SmiTag(right, scratch1); + break; + } + case Token::ADD: + __ add(scratch1, left, Operand(right), SetCC); + __ b(vs, &stub_call); + __ mov(right, scratch1); + break; + case Token::SUB: + __ sub(scratch1, left, Operand(right), SetCC); + __ b(vs, &stub_call); + __ mov(right, scratch1); + break; + case Token::MUL: { + __ SmiUntag(ip, right); + __ smull(scratch1, scratch2, left, ip); + __ mov(ip, Operand(scratch1, ASR, 31)); + __ cmp(ip, Operand(scratch2)); + __ b(ne, &stub_call); + __ tst(scratch1, Operand(scratch1)); + __ mov(right, Operand(scratch1), LeaveCC, ne); + __ b(ne, &done); + __ add(scratch2, right, Operand(left), SetCC); + __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); + __ b(mi, &stub_call); + break; + } + case Token::BIT_OR: + __ orr(right, left, Operand(right)); + break; + case Token::BIT_AND: + __ and_(right, left, Operand(right)); + break; + case Token::BIT_XOR: + __ eor(right, left, Operand(right)); + break; + default: + UNREACHABLE(); + } + + __ bind(&done); + context()->Plug(r0); } @@ -1650,18 +1825,32 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { __ mov(r1, r0); __ pop(r0); // Restore value. __ mov(r2, Operand(prop->key()->AsLiteral()->handle())); - Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); break; } case KEYED_PROPERTY: { __ push(r0); // Preserve value. - VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - __ mov(r1, r0); - __ pop(r2); + if (prop->is_synthetic()) { + ASSERT(prop->obj()->AsVariableProxy() != NULL); + ASSERT(prop->key()->AsLiteral() != NULL); + { AccumulatorValueContext for_object(this); + EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + } + __ mov(r2, r0); + __ mov(r1, Operand(prop->key()->AsLiteral()->handle())); + } else { + VisitForStackValue(prop->obj()); + VisitForAccumulatorValue(prop->key()); + __ mov(r1, r0); + __ pop(r2); + } __ pop(r0); // Restore value. - Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); break; } @@ -1685,39 +1874,65 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // r2, and the global object in r1. __ mov(r2, Operand(var->name())); __ ldr(r1, GlobalObjectOperand()); - Handle ic(Builtins::builtin(is_strict() - ? Builtins::StoreIC_Initialize_Strict - : Builtins::StoreIC_Initialize)); + Handle ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); - } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) { - // Perform the assignment for non-const variables and for initialization - // of const variables. Const assignments are simply skipped. - Label done; + } else if (op == Token::INIT_CONST) { + // Like var declarations, const declarations are hoisted to function + // scope. However, unlike var initializers, const initializers are able + // to drill a hole to that function context, even from inside a 'with' + // context. We thus bypass the normal static scope lookup. + Slot* slot = var->AsSlot(); + Label skip; + switch (slot->type()) { + case Slot::PARAMETER: + // No const parameters. + UNREACHABLE(); + break; + case Slot::LOCAL: + // Detect const reinitialization by checking for the hole value. + __ ldr(r1, MemOperand(fp, SlotOffset(slot))); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r1, ip); + __ b(ne, &skip); + __ str(result_register(), MemOperand(fp, SlotOffset(slot))); + break; + case Slot::CONTEXT: { + __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); + __ ldr(r2, ContextOperand(r1, slot->index())); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r2, ip); + __ b(ne, &skip); + __ str(r0, ContextOperand(r1, slot->index())); + int offset = Context::SlotOffset(slot->index()); + __ mov(r3, r0); // Preserve the stored value in r0. + __ RecordWrite(r1, Operand(offset), r3, r2); + break; + } + case Slot::LOOKUP: + __ push(r0); + __ mov(r0, Operand(slot->var()->name())); + __ Push(cp, r0); // Context and name. + __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); + break; + } + __ bind(&skip); + + } else if (var->mode() != Variable::CONST) { + // Perform the assignment for non-const variables. Const assignments + // are simply skipped. Slot* slot = var->AsSlot(); switch (slot->type()) { case Slot::PARAMETER: case Slot::LOCAL: - if (op == Token::INIT_CONST) { - // Detect const reinitialization by checking for the hole value. - __ ldr(r1, MemOperand(fp, SlotOffset(slot))); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r1, ip); - __ b(ne, &done); - } // Perform the assignment. __ str(result_register(), MemOperand(fp, SlotOffset(slot))); break; case Slot::CONTEXT: { MemOperand target = EmitSlotSearch(slot, r1); - if (op == Token::INIT_CONST) { - // Detect const reinitialization by checking for the hole value. - __ ldr(r2, target); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r2, ip); - __ b(ne, &done); - } // Perform the assignment and issue the write barrier. __ str(result_register(), target); // RecordWrite may destroy all its register arguments. @@ -1728,20 +1943,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, } case Slot::LOOKUP: - // Call the runtime for the assignment. The runtime will ignore - // const reinitialization. + // Call the runtime for the assignment. __ push(r0); // Value. - __ mov(r0, Operand(slot->var()->name())); - __ Push(cp, r0); // Context and name. - if (op == Token::INIT_CONST) { - // The runtime will ignore const redeclaration. - __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); - } else { - __ CallRuntime(Runtime::kStoreContextSlot, 3); - } + __ mov(r1, Operand(slot->var()->name())); + __ mov(r0, Operand(Smi::FromInt(strict_mode_flag()))); + __ Push(cp, r1, r0); // Context, name, strict mode. + __ CallRuntime(Runtime::kStoreContextSlot, 4); break; } - __ bind(&done); } } @@ -1774,7 +1983,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { __ pop(r1); } - Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. @@ -1818,7 +2029,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { __ pop(r2); } - Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. @@ -1933,6 +2146,29 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) { } +void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, + int arg_count) { + // Push copy of the first argument or undefined if it doesn't exist. + if (arg_count > 0) { + __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); + } else { + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + } + __ push(r1); + + // Push the receiver of the enclosing function and do runtime call. + __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize)); + __ push(r1); + // Push the strict mode flag. + __ mov(r1, Operand(Smi::FromInt(strict_mode_flag()))); + __ push(r1); + + __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP + ? Runtime::kResolvePossiblyDirectEvalNoLookup + : Runtime::kResolvePossiblyDirectEval, 4); +} + + void FullCodeGenerator::VisitCall(Call* expr) { #ifdef DEBUG // We want to verify that RecordJSReturnSite gets called on all paths @@ -1962,26 +2198,31 @@ void FullCodeGenerator::VisitCall(Call* expr) { VisitForStackValue(args->at(i)); } - // Push copy of the function - found below the arguments. - __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ push(r1); - - // Push copy of the first argument or undefined if it doesn't exist. - if (arg_count > 0) { - __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); - __ push(r1); - } else { - __ push(r2); + // If we know that eval can only be shadowed by eval-introduced + // variables we attempt to load the global eval function directly + // in generated code. If we succeed, there is no need to perform a + // context lookup in the runtime system. + Label done; + if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) { + Label slow; + EmitLoadGlobalSlotCheckExtensions(var->AsSlot(), + NOT_INSIDE_TYPEOF, + &slow); + // Push the function and resolve eval. + __ push(r0); + EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count); + __ jmp(&done); + __ bind(&slow); } - // Push the receiver of the enclosing function and do runtime call. - __ ldr(r1, - MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize)); - __ push(r1); - // Push the strict mode flag. - __ mov(r1, Operand(Smi::FromInt(strict_mode_flag()))); + // Push copy of the function (found below the arguments) and + // resolve eval. + __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ push(r1); - __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); + EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count); + if (done.is_linked()) { + __ bind(&done); + } // The runtime call returns a pair of values in r0 (function) and // r1 (receiver). Touch up the stack with the right values. @@ -2796,37 +3037,43 @@ void FullCodeGenerator::EmitStringCompare(ZoneList* args) { void FullCodeGenerator::EmitMathSin(ZoneList* args) { - // Load the argument on the stack and call the runtime. + // Load the argument on the stack and call the stub. + TranscendentalCacheStub stub(TranscendentalCache::SIN, + TranscendentalCacheStub::TAGGED); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_sin, 1); + __ CallStub(&stub); context()->Plug(r0); } void FullCodeGenerator::EmitMathCos(ZoneList* args) { - // Load the argument on the stack and call the runtime. + // Load the argument on the stack and call the stub. + TranscendentalCacheStub stub(TranscendentalCache::COS, + TranscendentalCacheStub::TAGGED); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_cos, 1); + __ CallStub(&stub); context()->Plug(r0); } -void FullCodeGenerator::EmitMathSqrt(ZoneList* args) { - // Load the argument on the stack and call the runtime function. +void FullCodeGenerator::EmitMathLog(ZoneList* args) { + // Load the argument on the stack and call the stub. + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::TAGGED); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_sqrt, 1); + __ CallStub(&stub); context()->Plug(r0); } -void FullCodeGenerator::EmitMathLog(ZoneList* args) { +void FullCodeGenerator::EmitMathSqrt(ZoneList* args) { // Load the argument on the stack and call the runtime function. ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_log, 1); + __ CallRuntime(Runtime::kMath_sqrt, 1); context()->Plug(r0); } @@ -2866,7 +3113,79 @@ void FullCodeGenerator::EmitSwapElements(ZoneList* args) { VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); + Label done; + Label slow_case; + Register object = r0; + Register index1 = r1; + Register index2 = r2; + Register elements = r3; + Register scratch1 = r4; + Register scratch2 = r5; + + __ ldr(object, MemOperand(sp, 2 * kPointerSize)); + // Fetch the map and check if array is in fast case. + // Check that object doesn't require security checks and + // has no indexed interceptor. + __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_OBJECT_TYPE); + __ b(lt, &slow_case); + // Map is now in scratch1. + + __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); + __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); + __ b(ne, &slow_case); + + // Check the object's elements are in fast case and writable. + __ ldr(elements, FieldMemOperand(object, JSObject::kElementsOffset)); + __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(scratch1, ip); + __ b(ne, &slow_case); + + // Check that both indices are smis. + __ ldr(index1, MemOperand(sp, 1 * kPointerSize)); + __ ldr(index2, MemOperand(sp, 0)); + __ JumpIfNotBothSmi(index1, index2, &slow_case); + + // Check that both indices are valid. + __ ldr(scratch1, FieldMemOperand(object, JSArray::kLengthOffset)); + __ cmp(scratch1, index1); + __ cmp(scratch1, index2, hi); + __ b(ls, &slow_case); + + // Bring the address of the elements into index1 and index2. + __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(index1, + scratch1, + Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(index2, + scratch1, + Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize)); + + // Swap elements. + __ ldr(scratch1, MemOperand(index1, 0)); + __ ldr(scratch2, MemOperand(index2, 0)); + __ str(scratch1, MemOperand(index2, 0)); + __ str(scratch2, MemOperand(index1, 0)); + + Label new_space; + __ InNewSpace(elements, scratch1, eq, &new_space); + // Possible optimization: do a check that both values are Smis + // (or them and test against Smi mask.) + + __ mov(scratch1, elements); + __ RecordWriteHelper(elements, index1, scratch2); + __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements. + + __ bind(&new_space); + // We are done. Drop elements from the stack, and return undefined. + __ Drop(3); + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + __ jmp(&done); + + __ bind(&slow_case); __ CallRuntime(Runtime::kSwapElements, 3); + + __ bind(&done); context()->Plug(r0); } @@ -2985,16 +3304,248 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList* args) { void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList* args) { ASSERT(args->length() == 1); VisitForAccumulatorValue(args->at(0)); + + if (FLAG_debug_code) { + __ AbortIfNotString(r0); + } + __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset)); __ IndexFromHash(r0, r0); + context()->Plug(r0); } void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList* args) { + Label bailout, done, one_char_separator, long_separator, + non_trivial_array, not_size_one_array, loop, + empty_separator_loop, one_char_separator_loop, + one_char_separator_loop_entry, long_separator_loop; + + ASSERT(args->length() == 2); + VisitForStackValue(args->at(1)); + VisitForAccumulatorValue(args->at(0)); + + // All aliases of the same register have disjoint lifetimes. + Register array = r0; + Register elements = no_reg; // Will be r0. + Register result = no_reg; // Will be r0. + Register separator = r1; + Register array_length = r2; + Register result_pos = no_reg; // Will be r2 + Register string_length = r3; + Register string = r4; + Register element = r5; + Register elements_end = r6; + Register scratch1 = r7; + Register scratch2 = r9; + + // Separator operand is on the stack. + __ pop(separator); + + // Check that the array is a JSArray. + __ JumpIfSmi(array, &bailout); + __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE); + __ b(ne, &bailout); + + // Check that the array has fast elements. + __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); + __ tst(scratch2, Operand(1 << Map::kHasFastElements)); + __ b(eq, &bailout); + + // If the array has length zero, return the empty string. + __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset)); + __ SmiUntag(array_length, SetCC); + __ b(ne, &non_trivial_array); + __ LoadRoot(r0, Heap::kEmptyStringRootIndex); + __ b(&done); + + __ bind(&non_trivial_array); + + // Get the FixedArray containing array's elements. + elements = array; + __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset)); + array = no_reg; // End of array's live range. + + // Check that all array elements are sequential ASCII strings, and + // accumulate the sum of their lengths, as a smi-encoded value. + __ mov(string_length, Operand(0)); + __ add(element, + elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2)); + // Loop condition: while (element < elements_end). + // Live values in registers: + // elements: Fixed array of strings. + // array_length: Length of the fixed array of strings (not smi) + // separator: Separator string + // string_length: Accumulated sum of string lengths (smi). + // element: Current array element. + // elements_end: Array end. + if (FLAG_debug_code) { + __ cmp(array_length, Operand(0)); + __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin"); + } + __ bind(&loop); + __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); + __ JumpIfSmi(string, &bailout); + __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); + __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); + __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset)); + __ add(string_length, string_length, Operand(scratch1)); + __ b(vs, &bailout); + __ cmp(element, elements_end); + __ b(lt, &loop); + + // If array_length is 1, return elements[0], a string. + __ cmp(array_length, Operand(1)); + __ b(ne, ¬_size_one_array); + __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize)); + __ b(&done); + + __ bind(¬_size_one_array); + + // Live values in registers: + // separator: Separator string + // array_length: Length of the array. + // string_length: Sum of string lengths (smi). + // elements: FixedArray of strings. + + // Check that the separator is a flat ASCII string. + __ JumpIfSmi(separator, &bailout); + __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset)); + __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); + + // Add (separator length times array_length) - separator length to the + // string_length to get the length of the result string. array_length is not + // smi but the other values are, so the result is a smi + __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); + __ sub(string_length, string_length, Operand(scratch1)); + __ smull(scratch2, ip, array_length, scratch1); + // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are + // zero. + __ cmp(ip, Operand(0)); + __ b(ne, &bailout); + __ tst(scratch2, Operand(0x80000000)); + __ b(ne, &bailout); + __ add(string_length, string_length, Operand(scratch2)); + __ b(vs, &bailout); + __ SmiUntag(string_length); + + // Get first element in the array to free up the elements register to be used + // for the result. + __ add(element, + elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + result = elements; // End of live range for elements. + elements = no_reg; + // Live values in registers: + // element: First array element + // separator: Separator string + // string_length: Length of result string (not smi) + // array_length: Length of the array. + __ AllocateAsciiString(result, + string_length, + scratch1, + scratch2, + elements_end, + &bailout); + // Prepare for looping. Set up elements_end to end of the array. Set + // result_pos to the position of the result where to write the first + // character. + __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2)); + result_pos = array_length; // End of live range for array_length. + array_length = no_reg; + __ add(result_pos, + result, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + + // Check the length of the separator. + __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); + __ cmp(scratch1, Operand(Smi::FromInt(1))); + __ b(eq, &one_char_separator); + __ b(gt, &long_separator); + + // Empty separator case + __ bind(&empty_separator_loop); + // Live values in registers: + // result_pos: the position to which we are currently copying characters. + // element: Current array element. + // elements_end: Array end. + + // Copy next array element to the result. + __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); + __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); + __ SmiUntag(string_length); + __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ CopyBytes(string, result_pos, string_length, scratch1); + __ cmp(element, elements_end); + __ b(lt, &empty_separator_loop); // End while (element < elements_end). + ASSERT(result.is(r0)); + __ b(&done); + + // One-character separator case + __ bind(&one_char_separator); + // Replace separator with its ascii character value. + __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize)); + // Jump into the loop after the code that copies the separator, so the first + // element is not preceded by a separator + __ jmp(&one_char_separator_loop_entry); + + __ bind(&one_char_separator_loop); + // Live values in registers: + // result_pos: the position to which we are currently copying characters. + // element: Current array element. + // elements_end: Array end. + // separator: Single separator ascii char (in lower byte). + + // Copy the separator character to the result. + __ strb(separator, MemOperand(result_pos, 1, PostIndex)); + + // Copy next array element to the result. + __ bind(&one_char_separator_loop_entry); + __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); + __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); + __ SmiUntag(string_length); + __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ CopyBytes(string, result_pos, string_length, scratch1); + __ cmp(element, elements_end); + __ b(lt, &one_char_separator_loop); // End while (element < elements_end). + ASSERT(result.is(r0)); + __ b(&done); + + // Long separator case (separator is more than one character). Entry is at the + // label long_separator below. + __ bind(&long_separator_loop); + // Live values in registers: + // result_pos: the position to which we are currently copying characters. + // element: Current array element. + // elements_end: Array end. + // separator: Separator string. + + // Copy the separator to the result. + __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset)); + __ SmiUntag(string_length); + __ add(string, + separator, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ CopyBytes(string, result_pos, string_length, scratch1); + + __ bind(&long_separator); + __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); + __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); + __ SmiUntag(string_length); + __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ CopyBytes(string, result_pos, string_length, scratch1); + __ cmp(element, elements_end); + __ b(lt, &long_separator_loop); // End while (element < elements_end). + ASSERT(result.is(r0)); + __ b(&done); + + __ bind(&bailout); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + __ bind(&done); context()->Plug(r0); - return; } @@ -3043,19 +3594,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { Comment cmnt(masm_, "[ UnaryOperation (DELETE)"); Property* prop = expr->expression()->AsProperty(); Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); - if (prop == NULL && var == NULL) { - // Result of deleting non-property, non-variable reference is true. - // The subexpression may have side effects. - VisitForEffect(expr->expression()); - context()->Plug(true); - } else if (var != NULL && - !var->is_global() && - var->AsSlot() != NULL && - var->AsSlot()->type() != Slot::LOOKUP) { - // Result of deleting non-global, non-dynamic variables is false. - // The subexpression does not have side effects. - context()->Plug(false); - } else if (prop != NULL) { + + if (prop != NULL) { if (prop->is_synthetic()) { // Result of deleting parameters is false, even when they rewrite // to accesses on the arguments object. @@ -3063,23 +3603,41 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { } else { VisitForStackValue(prop->obj()); VisitForStackValue(prop->key()); + __ mov(r1, Operand(Smi::FromInt(strict_mode_flag()))); + __ push(r1); __ InvokeBuiltin(Builtins::DELETE, CALL_JS); context()->Plug(r0); } - } else if (var->is_global()) { - __ ldr(r1, GlobalObjectOperand()); - __ mov(r0, Operand(var->name())); - __ Push(r1, r0); - __ InvokeBuiltin(Builtins::DELETE, CALL_JS); - context()->Plug(r0); + } else if (var != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this()); + if (var->is_global()) { + __ ldr(r2, GlobalObjectOperand()); + __ mov(r1, Operand(var->name())); + __ mov(r0, Operand(Smi::FromInt(kNonStrictMode))); + __ Push(r2, r1, r0); + __ InvokeBuiltin(Builtins::DELETE, CALL_JS); + context()->Plug(r0); + } else if (var->AsSlot() != NULL && + var->AsSlot()->type() != Slot::LOOKUP) { + // Result of deleting non-global, non-dynamic variables is false. + // The subexpression does not have side effects. + context()->Plug(false); + } else { + // Non-global variable. Call the runtime to try to delete from the + // context where the variable was introduced. + __ push(context_register()); + __ mov(r2, Operand(var->name())); + __ push(r2); + __ CallRuntime(Runtime::kDeleteContextSlot, 2); + context()->Plug(r0); + } } else { - // Non-global variable. Call the runtime to try to delete from the - // context where the variable was introduced. - __ push(context_register()); - __ mov(r2, Operand(var->name())); - __ push(r2); - __ CallRuntime(Runtime::kDeleteContextSlot, 2); - context()->Plug(r0); + // Result of deleting non-property, non-variable reference is true. + // The subexpression may have side effects. + VisitForEffect(expr->expression()); + context()->Plug(true); } break; } @@ -3093,17 +3651,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::NOT: { Comment cmnt(masm_, "[ UnaryOperation (NOT)"); - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - - // Notice that the labels are swapped. - context()->PrepareTest(&materialize_true, &materialize_false, - &if_false, &if_true, &fall_through); - if (context()->IsTest()) ForwardBailoutToChild(expr); - VisitForControl(expr->expression(), if_true, if_false, fall_through); - context()->Plug(if_false, if_true); // Labels swapped. + if (context()->IsEffect()) { + // Unary NOT has no side effects so it's only necessary to visit the + // subexpression. Match the optimizing compiler by not branching. + VisitForEffect(expr->expression()); + } else { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + + // Notice that the labels are swapped. + context()->PrepareTest(&materialize_true, &materialize_false, + &if_false, &if_true, &fall_through); + if (context()->IsTest()) ForwardBailoutToChild(expr); + VisitForControl(expr->expression(), if_true, if_false, fall_through); + context()->Plug(if_false, if_true); // Labels swapped. + } break; } @@ -3135,9 +3699,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { bool can_overwrite = expr->expression()->ResultOverwriteAllowed(); UnaryOverwriteMode overwrite = can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - GenericUnaryOpStub stub(Token::SUB, - overwrite, - NO_UNARY_FLAGS); + GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS); // GenericUnaryOpStub expects the argument to be in the // accumulator register r0. VisitForAccumulatorValue(expr->expression()); @@ -3270,13 +3832,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Inline smi case if we are in a loop. Label stub_call, done; + JumpPatchSite patch_site(masm_); + int count_value = expr->op() == Token::INC ? 1 : -1; if (ShouldInlineSmiCase(expr->op())) { __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC); __ b(vs, &stub_call); // We could eliminate this smi check if we split the code at // the first smi check before calling ToNumber. - __ JumpIfSmi(r0, &done); + patch_site.EmitJumpIfSmi(r0, &done); + __ bind(&stub_call); // Call stub. Undo operation first. __ sub(r0, r0, Operand(Smi::FromInt(count_value))); @@ -3286,8 +3851,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Record position before stub call. SetSourcePosition(expr->position()); - GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0); - __ CallStub(&stub); + TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE); + EmitCallIC(stub.GetCode(), &patch_site); __ bind(&done); // Store the value returned in r0. @@ -3315,7 +3880,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case NAMED_PROPERTY: { __ mov(r2, Operand(prop->key()->AsLiteral()->handle())); __ pop(r1); - Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -3330,7 +3897,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case KEYED_PROPERTY: { __ pop(r1); // Key. __ pop(r2); // Receiver. - Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -3408,71 +3977,52 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op, PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); if (check->Equals(Heap::number_symbol())) { - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, if_true); + __ JumpIfSmi(r0, if_true); __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(r0, ip); Split(eq, if_true, if_false, fall_through); } else if (check->Equals(Heap::string_symbol())) { - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, if_false); + __ JumpIfSmi(r0, if_false); // Check for undetectable objects => false. - __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE); + __ b(ge, if_false); __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); - __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); - __ cmp(r1, Operand(1 << Map::kIsUndetectable)); - __ b(eq, if_false); - __ ldrb(r1, FieldMemOperand(r0, Map::kInstanceTypeOffset)); - __ cmp(r1, Operand(FIRST_NONSTRING_TYPE)); - Split(lt, if_true, if_false, fall_through); + __ tst(r1, Operand(1 << Map::kIsUndetectable)); + Split(eq, if_true, if_false, fall_through); } else if (check->Equals(Heap::boolean_symbol())) { - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(r0, ip); + __ CompareRoot(r0, Heap::kTrueValueRootIndex); __ b(eq, if_true); - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(r0, ip); + __ CompareRoot(r0, Heap::kFalseValueRootIndex); Split(eq, if_true, if_false, fall_through); } else if (check->Equals(Heap::undefined_symbol())) { - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r0, ip); + __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); __ b(eq, if_true); - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, if_false); + __ JumpIfSmi(r0, if_false); // Check for undetectable objects => true. __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); - __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); - __ cmp(r1, Operand(1 << Map::kIsUndetectable)); - Split(eq, if_true, if_false, fall_through); + __ tst(r1, Operand(1 << Map::kIsUndetectable)); + Split(ne, if_true, if_false, fall_through); + } else if (check->Equals(Heap::function_symbol())) { - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, if_false); - __ CompareObjectType(r0, r1, r0, JS_FUNCTION_TYPE); - __ b(eq, if_true); - // Regular expressions => 'function' (they are callable). - __ CompareInstanceType(r1, r0, JS_REGEXP_TYPE); - Split(eq, if_true, if_false, fall_through); + __ JumpIfSmi(r0, if_false); + __ CompareObjectType(r0, r1, r0, FIRST_FUNCTION_CLASS_TYPE); + Split(ge, if_true, if_false, fall_through); + } else if (check->Equals(Heap::object_symbol())) { - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, if_false); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(r0, ip); + __ JumpIfSmi(r0, if_false); + __ CompareRoot(r0, Heap::kNullValueRootIndex); __ b(eq, if_true); - // Regular expressions => 'function', not 'object'. - __ CompareObjectType(r0, r1, r0, JS_REGEXP_TYPE); - __ b(eq, if_false); - // Check for undetectable objects => false. - __ ldrb(r0, FieldMemOperand(r1, Map::kBitFieldOffset)); - __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); - __ cmp(r0, Operand(1 << Map::kIsUndetectable)); - __ b(eq, if_false); // Check for JS objects => true. - __ ldrb(r0, FieldMemOperand(r1, Map::kInstanceTypeOffset)); - __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE)); - __ b(lt, if_false); - __ cmp(r0, Operand(LAST_JS_OBJECT_TYPE)); - Split(le, if_true, if_false, fall_through); + __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE); + __ b(lo, if_false); + __ CompareInstanceType(r0, r1, FIRST_FUNCTION_CLASS_TYPE); + __ b(hs, if_false); + // Check for undetectable objects => false. + __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); + __ tst(r1, Operand(1 << Map::kIsUndetectable)); + Split(eq, if_true, if_false, fall_through); } else { if (if_false != fall_through) __ jmp(if_false); } @@ -3644,11 +4194,43 @@ Register FullCodeGenerator::context_register() { void FullCodeGenerator::EmitCallIC(Handle ic, RelocInfo::Mode mode) { ASSERT(mode == RelocInfo::CODE_TARGET || mode == RelocInfo::CODE_TARGET_CONTEXT); + switch (ic->kind()) { + case Code::LOAD_IC: + __ IncrementCounter(&Counters::named_load_full, 1, r1, r2); + break; + case Code::KEYED_LOAD_IC: + __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2); + break; + case Code::STORE_IC: + __ IncrementCounter(&Counters::named_store_full, 1, r1, r2); + break; + case Code::KEYED_STORE_IC: + __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2); + default: + break; + } + __ Call(ic, mode); } void FullCodeGenerator::EmitCallIC(Handle ic, JumpPatchSite* patch_site) { + switch (ic->kind()) { + case Code::LOAD_IC: + __ IncrementCounter(&Counters::named_load_full, 1, r1, r2); + break; + case Code::KEYED_LOAD_IC: + __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2); + break; + case Code::STORE_IC: + __ IncrementCounter(&Counters::named_store_full, 1, r1, r2); + break; + case Code::KEYED_STORE_IC: + __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2); + default: + break; + } + __ Call(ic, RelocInfo::CODE_TARGET); if (patch_site != NULL && patch_site->is_bound()) { patch_site->EmitPatchInfo(); diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc index 8c76458e..0fc68187 100644 --- a/src/arm/ic-arm.cc +++ b/src/arm/ic-arm.cc @@ -115,6 +115,9 @@ static void GenerateStringDictionaryProbes(MacroAssembler* masm, Register name, Register scratch1, Register scratch2) { + // Assert that name contains a string. + if (FLAG_debug_code) __ AbortIfNotString(name); + // Compute the capacity mask. const int kCapacityOffset = StringDictionary::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize; @@ -843,7 +846,14 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { // -- lr : return address // ----------------------------------- + // Check if the name is a string. + Label miss; + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &miss); + __ IsObjectJSStringType(r2, r0, &miss); + GenerateCallNormal(masm, argc); + __ bind(&miss); GenerateMiss(masm, argc); } @@ -1390,7 +1400,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { } -void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { +void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ---------- S t a t e -------------- // -- r0 : value // -- r1 : key @@ -1401,11 +1412,16 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { // Push receiver, key and value for runtime call. __ Push(r2, r1, r0); - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes + __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode. + __ Push(r1, r0); + + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); } -void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { +void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ---------- S t a t e -------------- // -- r0 : value // -- r1 : key @@ -1460,29 +1476,25 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // r0: value. // r1: key. // r2: receiver. - GenerateRuntimeSetProperty(masm); + GenerateRuntimeSetProperty(masm, strict_mode); // Check whether the elements is a pixel array. // r4: elements map. __ bind(&check_pixel_array); - __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); - __ cmp(r4, ip); - __ b(ne, &slow); - // Check that the value is a smi. If a conversion is needed call into the - // runtime to convert and clamp. - __ JumpIfNotSmi(value, &slow); - __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key. - __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset)); - __ cmp(r4, Operand(ip)); - __ b(hs, &slow); - __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. - __ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255]. - - // Get the pointer to the external array. This clobbers elements. - __ ldr(elements, - FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); - __ strb(r5, MemOperand(elements, r4)); // Elements is now external array. - __ Ret(); + GenerateFastPixelArrayStore(masm, + r2, + r1, + r0, + elements, + r4, + r5, + r6, + false, + false, + NULL, + &slow, + &slow, + &slow); // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one @@ -1534,7 +1546,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { void StoreIC::GenerateMegamorphic(MacroAssembler* masm, - Code::ExtraICState extra_ic_state) { + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : receiver @@ -1546,7 +1558,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm, Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, NOT_IN_LOOP, MONOMORPHIC, - extra_ic_state); + strict_mode); StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); // Cache miss: Jump to runtime. @@ -1640,7 +1652,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { } -void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { +void StoreIC::GenerateGlobalProxy(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : receiver @@ -1650,8 +1663,12 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { __ Push(r1, r2, r0); + __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes + __ mov(r0, Operand(Smi::FromInt(strict_mode))); + __ Push(r1, r0); + // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); } diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc index 82de5d3e..e79465cb 100644 --- a/src/arm/lithium-arm.cc +++ b/src/arm/lithium-arm.cc @@ -346,7 +346,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { } -void LStoreNamed::PrintDataTo(StringStream* stream) { +void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("."); stream->Add(*String::cast(*name())->ToCString()); @@ -355,7 +355,16 @@ void LStoreNamed::PrintDataTo(StringStream* stream) { } -void LStoreKeyed::PrintDataTo(StringStream* stream) { +void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); @@ -364,8 +373,18 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) { } -LChunk::LChunk(HGraph* graph) +void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + +LChunk::LChunk(CompilationInfo* info, HGraph* graph) : spill_slot_count_(0), + info_(info), graph_(graph), instructions_(32), pointer_maps_(8), @@ -456,7 +475,7 @@ int LChunk::GetParameterStackSlot(int index) const { // shift all parameter indexes down by the number of parameters, and // make sure they end up negative so they are distinguishable from // spill slots. - int result = index - graph()->info()->scope()->num_parameters() - 1; + int result = index - info()->scope()->num_parameters() - 1; ASSERT(result < 0); return result; } @@ -464,7 +483,7 @@ int LChunk::GetParameterStackSlot(int index) const { // A parameter relative to ebp in the arguments stub. int LChunk::ParameterAt(int index) { ASSERT(-1 <= index); // -1 is the receiver. - return (1 + graph()->info()->scope()->num_parameters() - index) * + return (1 + info()->scope()->num_parameters() - index) * kPointerSize; } @@ -503,7 +522,7 @@ Representation LChunk::LookupLiteralRepresentation( LChunk* LChunkBuilder::Build() { ASSERT(is_unused()); - chunk_ = new LChunk(graph()); + chunk_ = new LChunk(info(), graph()); HPhase phase("Building chunk", chunk_); status_ = BUILDING; const ZoneList* blocks = graph()->blocks(); @@ -520,8 +539,8 @@ LChunk* LChunkBuilder::Build() { void LChunkBuilder::Abort(const char* format, ...) { if (FLAG_trace_bailout) { - SmartPointer debug_name = graph()->debug_name()->ToCString(); - PrintF("Aborting LChunk building in @\"%s\": ", *debug_name); + SmartPointer name(info()->shared_info()->DebugName()->ToCString()); + PrintF("Aborting LChunk building in @\"%s\": ", *name); va_list arguments; va_start(arguments, format); OS::VPrint(format, arguments); @@ -855,6 +874,7 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, ASSERT(instr->representation().IsDouble()); ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble()); + ASSERT(op != Token::MOD); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); LArithmeticD* result = new LArithmeticD(op, left, right); @@ -1136,8 +1156,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( HInstanceOfKnownGlobal* instr) { LInstanceOfKnownGlobal* result = new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4)); - MarkAsSaveDoubles(result); - return AssignEnvironment(AssignPointerMap(DefineFixed(result, r0))); + return MarkAsCall(DefineFixed(result, r0), instr); } @@ -1193,34 +1212,30 @@ LInstruction* LChunkBuilder::DoCallConstantFunction( LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { BuiltinFunctionId op = instr->op(); - LOperand* input = UseRegisterAtStart(instr->value()); - LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL; - LUnaryMathOperation* result = new LUnaryMathOperation(input, temp); - switch (op) { - case kMathAbs: - return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); - case kMathFloor: - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - case kMathSqrt: - return DefineSameAsFirst(result); - case kMathRound: - Abort("MathRound LUnaryMathOperation not implemented"); - return NULL; - case kMathPowHalf: - Abort("MathPowHalf LUnaryMathOperation not implemented"); - return NULL; - case kMathLog: - Abort("MathLog LUnaryMathOperation not implemented"); - return NULL; - case kMathCos: - Abort("MathCos LUnaryMathOperation not implemented"); - return NULL; - case kMathSin: - Abort("MathSin LUnaryMathOperation not implemented"); - return NULL; - default: - UNREACHABLE(); - return NULL; + if (op == kMathLog || op == kMathSin || op == kMathCos) { + LOperand* input = UseFixedDouble(instr->value(), d2); + LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL); + return MarkAsCall(DefineFixedDouble(result, d2), instr); + } else { + LOperand* input = UseRegisterAtStart(instr->value()); + LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL; + LUnaryMathOperation* result = new LUnaryMathOperation(input, temp); + switch (op) { + case kMathAbs: + return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); + case kMathFloor: + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + case kMathSqrt: + return DefineSameAsFirst(result); + case kMathRound: + return AssignEnvironment(DefineAsRegister(result)); + case kMathPowHalf: + Abort("MathPowHalf LUnaryMathOperation not implemented"); + return NULL; + default: + UNREACHABLE(); + return NULL; + } } } @@ -1418,8 +1433,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { LInstruction* LChunkBuilder::DoPower(HPower* instr) { - Abort("LPower instruction not implemented on ARM"); - return NULL; + ASSERT(instr->representation().IsDouble()); + // We call a C function for double power. It can't trigger a GC. + // We need to use fixed result register for the call. + Representation exponent_type = instr->right()->representation(); + ASSERT(instr->left()->representation().IsDouble()); + LOperand* left = UseFixedDouble(instr->left(), d1); + LOperand* right = exponent_type.IsDouble() ? + UseFixedDouble(instr->right(), d2) : + UseFixed(instr->right(), r0); + LPower* result = new LPower(left, right); + return MarkAsCall(DefineFixedDouble(result, d3), + instr, + CAN_DEOPTIMIZE_EAGERLY); } @@ -1491,6 +1517,15 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) { } +LInstruction* LChunkBuilder::DoGetCachedArrayIndex( + HGetCachedArrayIndex* instr) { + ASSERT(instr->value()->representation().IsTagged()); + LOperand* value = UseRegisterAtStart(instr->value()); + + return DefineAsRegister(new LGetCachedArrayIndex(value)); +} + + LInstruction* LChunkBuilder::DoHasCachedArrayIndex( HHasCachedArrayIndex* instr) { ASSERT(instr->value()->representation().IsTagged()); @@ -1700,11 +1735,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context = UseTempRegister(instr->context()); + LOperand* context; LOperand* value; if (instr->NeedsWriteBarrier()) { + context = UseTempRegister(instr->context()); value = UseTempRegister(instr->value()); } else { + context = UseRegister(instr->context()); value = UseRegister(instr->value()); } return new LStoreContextSlot(context, value); @@ -1797,6 +1834,13 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( } +LInstruction* LChunkBuilder::DoStorePixelArrayElement( + HStorePixelArrayElement* instr) { + Abort("DoStorePixelArrayElement not implemented"); + return NULL; +} + + LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* obj = UseFixed(instr->object(), r2); LOperand* key = UseFixed(instr->key(), r1); @@ -1902,8 +1946,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object (we bail out in all other - // cases). + // There are no real uses of the arguments object. + // arguments.length and element access are supported directly on + // stack arguments, and any real arguments object use causes a bailout. + // So this value is never used. return NULL; } diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h index 8d2573d9..9cbcc3b9 100644 --- a/src/arm/lithium-arm.h +++ b/src/arm/lithium-arm.h @@ -42,8 +42,6 @@ class LCodeGen; #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ V(ControlInstruction) \ V(Call) \ - V(StoreKeyed) \ - V(StoreNamed) \ LITHIUM_CONCRETE_INSTRUCTION_LIST(V) @@ -94,6 +92,7 @@ class LCodeGen; V(FixedArrayLength) \ V(FunctionLiteral) \ V(Gap) \ + V(GetCachedArrayIndex) \ V(GlobalObject) \ V(GlobalReceiver) \ V(Goto) \ @@ -134,6 +133,7 @@ class LCodeGen; V(OuterContext) \ V(Parameter) \ V(PixelArrayLength) \ + V(Power) \ V(PushArgument) \ V(RegExpLiteral) \ V(Return) \ @@ -728,6 +728,17 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> { }; +class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { + public: + explicit LGetCachedArrayIndex(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index") + DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex) +}; + + class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { public: explicit LHasCachedArrayIndex(LOperand* value) { @@ -1046,6 +1057,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> { }; +class LPower: public LTemplateInstruction<1, 2, 0> { + public: + LPower(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + DECLARE_CONCRETE_INSTRUCTION(Power, "power") + DECLARE_HYDROGEN_ACCESSOR(Power) +}; + + class LArithmeticD: public LTemplateInstruction<1, 2, 0> { public: LArithmeticD(Token::Value op, LOperand* left, LOperand* right) @@ -1498,32 +1521,22 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { }; -class LStoreNamed: public LTemplateInstruction<0, 2, 0> { +class LStoreNamedField: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamed(LOperand* obj, LOperand* val) { + LStoreNamedField(LOperand* obj, LOperand* val) { inputs_[0] = obj; inputs_[1] = val; } - DECLARE_INSTRUCTION(StoreNamed) - DECLARE_HYDROGEN_ACCESSOR(StoreNamed) + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) virtual void PrintDataTo(StringStream* stream); LOperand* object() { return inputs_[0]; } LOperand* value() { return inputs_[1]; } - Handle name() const { return hydrogen()->name(); } -}; - - -class LStoreNamedField: public LStoreNamed { - public: - LStoreNamedField(LOperand* obj, LOperand* val) - : LStoreNamed(obj, val) { } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) + Handle name() const { return hydrogen()->name(); } bool is_in_object() { return hydrogen()->is_in_object(); } int offset() { return hydrogen()->offset(); } bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } @@ -1531,25 +1544,35 @@ class LStoreNamedField: public LStoreNamed { }; -class LStoreNamedGeneric: public LStoreNamed { +class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamedGeneric(LOperand* obj, LOperand* val) - : LStoreNamed(obj, val) { } + LStoreNamedGeneric(LOperand* obj, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = val; + } DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) + + virtual void PrintDataTo(StringStream* stream); + + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + Handle name() const { return hydrogen()->name(); } }; -class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { + LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) { inputs_[0] = obj; inputs_[1] = key; inputs_[2] = val; } - DECLARE_INSTRUCTION(StoreKeyed) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store-keyed-fast-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) virtual void PrintDataTo(StringStream* stream); @@ -1559,23 +1582,21 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedFastElement: public LStoreKeyed { +class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) {} - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) -}; + LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = key; + inputs_[2] = val; + } + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") -class LStoreKeyedGeneric: public LStoreKeyed { - public: - LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) { } + virtual void PrintDataTo(StringStream* stream); - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + LOperand* object() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } }; @@ -1808,7 +1829,7 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> { class LChunkBuilder; class LChunk: public ZoneObject { public: - explicit LChunk(HGraph* graph); + explicit LChunk(CompilationInfo* info, HGraph* graph); void AddInstruction(LInstruction* instruction, HBasicBlock* block); LConstantOperand* DefineConstantOperand(HConstant* constant); @@ -1821,6 +1842,7 @@ class LChunk: public ZoneObject { int ParameterAt(int index); int GetParameterStackSlot(int index) const; int spill_slot_count() const { return spill_slot_count_; } + CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } const ZoneList* instructions() const { return &instructions_; } void AddGapMove(int index, LOperand* from, LOperand* to); @@ -1857,6 +1879,7 @@ class LChunk: public ZoneObject { private: int spill_slot_count_; + CompilationInfo* info_; HGraph* const graph_; ZoneList instructions_; ZoneList pointer_maps_; @@ -1866,8 +1889,9 @@ class LChunk: public ZoneObject { class LChunkBuilder BASE_EMBEDDED { public: - LChunkBuilder(HGraph* graph, LAllocator* allocator) + LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) : chunk_(NULL), + info_(info), graph_(graph), status_(UNUSED), current_instruction_(NULL), @@ -1896,6 +1920,7 @@ class LChunkBuilder BASE_EMBEDDED { }; LChunk* chunk() const { return chunk_; } + CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } bool is_unused() const { return status_ == UNUSED; } @@ -2002,6 +2027,7 @@ class LChunkBuilder BASE_EMBEDDED { HArithmeticBinaryOperation* instr); LChunk* chunk_; + CompilationInfo* info_; HGraph* const graph_; Status status_; HInstruction* current_instruction_; diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 057ac241..afe90159 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -26,6 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "arm/lithium-codegen-arm.h" +#include "arm/lithium-gap-resolver-arm.h" #include "code-stubs.h" #include "stub-cache.h" @@ -54,157 +55,6 @@ class SafepointGenerator : public PostCallGenerator { }; -class LGapNode: public ZoneObject { - public: - explicit LGapNode(LOperand* operand) - : operand_(operand), resolved_(false), visited_id_(-1) { } - - LOperand* operand() const { return operand_; } - bool IsResolved() const { return !IsAssigned() || resolved_; } - void MarkResolved() { - ASSERT(!IsResolved()); - resolved_ = true; - } - int visited_id() const { return visited_id_; } - void set_visited_id(int id) { - ASSERT(id > visited_id_); - visited_id_ = id; - } - - bool IsAssigned() const { return assigned_from_.is_set(); } - LGapNode* assigned_from() const { return assigned_from_.get(); } - void set_assigned_from(LGapNode* n) { assigned_from_.set(n); } - - private: - LOperand* operand_; - SetOncePointer assigned_from_; - bool resolved_; - int visited_id_; -}; - - -LGapResolver::LGapResolver() - : nodes_(32), - identified_cycles_(4), - result_(16), - next_visited_id_(0) { -} - - -const ZoneList* LGapResolver::Resolve( - const ZoneList* moves, - LOperand* marker_operand) { - nodes_.Rewind(0); - identified_cycles_.Rewind(0); - result_.Rewind(0); - next_visited_id_ = 0; - - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) RegisterMove(move); - } - - for (int i = 0; i < identified_cycles_.length(); ++i) { - ResolveCycle(identified_cycles_[i], marker_operand); - } - - int unresolved_nodes; - do { - unresolved_nodes = 0; - for (int j = 0; j < nodes_.length(); j++) { - LGapNode* node = nodes_[j]; - if (!node->IsResolved() && node->assigned_from()->IsResolved()) { - AddResultMove(node->assigned_from(), node); - node->MarkResolved(); - } - if (!node->IsResolved()) ++unresolved_nodes; - } - } while (unresolved_nodes > 0); - return &result_; -} - - -void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) { - AddResultMove(from->operand(), to->operand()); -} - - -void LGapResolver::AddResultMove(LOperand* from, LOperand* to) { - result_.Add(LMoveOperands(from, to)); -} - - -void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) { - ZoneList cycle_operands(8); - cycle_operands.Add(marker_operand); - LGapNode* cur = start; - do { - cur->MarkResolved(); - cycle_operands.Add(cur->operand()); - cur = cur->assigned_from(); - } while (cur != start); - cycle_operands.Add(marker_operand); - - for (int i = cycle_operands.length() - 1; i > 0; --i) { - LOperand* from = cycle_operands[i]; - LOperand* to = cycle_operands[i - 1]; - AddResultMove(from, to); - } -} - - -bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) { - ASSERT(a != b); - LGapNode* cur = a; - while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) { - cur->set_visited_id(visited_id); - cur = cur->assigned_from(); - } - - return cur == b; -} - - -bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) { - ASSERT(a != b); - return CanReach(a, b, next_visited_id_++); -} - - -void LGapResolver::RegisterMove(LMoveOperands move) { - if (move.source()->IsConstantOperand()) { - // Constant moves should be last in the machine code. Therefore add them - // first to the result set. - AddResultMove(move.source(), move.destination()); - } else { - LGapNode* from = LookupNode(move.source()); - LGapNode* to = LookupNode(move.destination()); - if (to->IsAssigned() && to->assigned_from() == from) { - move.Eliminate(); - return; - } - ASSERT(!to->IsAssigned()); - if (CanReach(from, to)) { - // This introduces a cycle. Save. - identified_cycles_.Add(from); - } - to->set_assigned_from(from); - } -} - - -LGapNode* LGapResolver::LookupNode(LOperand* operand) { - for (int i = 0; i < nodes_.length(); ++i) { - if (nodes_[i]->operand()->Equals(operand)) return nodes_[i]; - } - - // No node found => create a new one. - LGapNode* result = new LGapNode(operand); - nodes_.Add(result); - return result; -} - - #define __ masm()-> bool LCodeGen::GenerateCode() { @@ -230,8 +80,8 @@ void LCodeGen::FinishCode(Handle code) { void LCodeGen::Abort(const char* format, ...) { if (FLAG_trace_bailout) { - SmartPointer debug_name = graph()->debug_name()->ToCString(); - PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name); + SmartPointer name(info()->shared_info()->DebugName()->ToCString()); + PrintF("Aborting LCodeGen in @\"%s\": ", *name); va_list arguments; va_start(arguments, format); OS::VPrint(format, arguments); @@ -294,6 +144,44 @@ bool LCodeGen::GeneratePrologue() { } } + // Possibly allocate a local context. + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment(";;; Allocate local context"); + // Argument to NewContext is the function, which is in r1. + __ push(r1); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kNewContext, 1); + } + RecordSafepoint(Safepoint::kNoDeoptimizationIndex); + // Context is returned in both r0 and cp. It replaces the context + // passed to us. It's saved in the stack and kept live in cp. + __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + // Copy any necessary parameters into the context. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Slot* slot = scope()->parameter(i)->AsSlot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ ldr(r0, MemOperand(fp, parameter_offset)); + // Store it in the context. + __ mov(r1, Operand(Context::SlotOffset(slot->index()))); + __ str(r0, MemOperand(cp, r1)); + // Update the write barrier. This clobbers all involved + // registers, so we have to use two more registers to avoid + // clobbering cp. + __ mov(r2, Operand(cp)); + __ RecordWrite(r2, Operand(r1), r3, r0); + } + } + Comment(";;; End allocate local context"); + } + // Trace the call. if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); @@ -464,7 +352,6 @@ Operand LCodeGen::ToOperand(LOperand* op) { MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - // TODO(regis): Revisit. ASSERT(!op->IsRegister()); ASSERT(!op->IsDoubleRegister()); ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); @@ -480,6 +367,21 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const { } +MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { + ASSERT(op->IsDoubleStackSlot()); + int index = op->index(); + if (index >= 0) { + // Local or spill slot. Skip the frame pointer, function, context, + // and the first word of the double in the fixed part of the frame. + return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize); + } else { + // Incoming parameter. Skip the return address and the first word of + // the double. + return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize); + } +} + + void LCodeGen::WriteTranslation(LEnvironment* environment, Translation* translation) { if (environment == NULL) return; @@ -671,7 +573,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle code) { Handle data = Factory::NewDeoptimizationInputData(length, TENURED); - data->SetTranslationByteArray(*translations_.CreateByteArray()); + Handle translations = translations_.CreateByteArray(); + data->SetTranslationByteArray(*translations); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); Handle literals = @@ -751,6 +654,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, } +void LCodeGen::RecordSafepoint(int deoptimization_index) { + LPointerMap empty_pointers(RelocInfo::kNoPosition); + RecordSafepoint(&empty_pointers, deoptimization_index); +} + + void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index) { @@ -787,116 +696,7 @@ void LCodeGen::DoLabel(LLabel* label) { void LCodeGen::DoParallelMove(LParallelMove* move) { - // d0 must always be a scratch register. - DoubleRegister dbl_scratch = d0; - LUnallocated marker_operand(LUnallocated::NONE); - - Register core_scratch = scratch0(); - bool destroys_core_scratch = false; - - const ZoneList* moves = - resolver_.Resolve(move->move_operands(), &marker_operand); - for (int i = moves->length() - 1; i >= 0; --i) { - LMoveOperands move = moves->at(i); - LOperand* from = move.source(); - LOperand* to = move.destination(); - ASSERT(!from->IsDoubleRegister() || - !ToDoubleRegister(from).is(dbl_scratch)); - ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch)); - ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch)); - ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch)); - if (from == &marker_operand) { - if (to->IsRegister()) { - __ mov(ToRegister(to), core_scratch); - ASSERT(destroys_core_scratch); - } else if (to->IsStackSlot()) { - __ str(core_scratch, ToMemOperand(to)); - ASSERT(destroys_core_scratch); - } else if (to->IsDoubleRegister()) { - __ vmov(ToDoubleRegister(to), dbl_scratch); - } else { - ASSERT(to->IsDoubleStackSlot()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); - } - } else if (to == &marker_operand) { - if (from->IsRegister() || from->IsConstantOperand()) { - __ mov(core_scratch, ToOperand(from)); - destroys_core_scratch = true; - } else if (from->IsStackSlot()) { - __ ldr(core_scratch, ToMemOperand(from)); - destroys_core_scratch = true; - } else if (from->IsDoubleRegister()) { - __ vmov(dbl_scratch, ToDoubleRegister(from)); - } else { - ASSERT(from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); - } - } else if (from->IsConstantOperand()) { - if (to->IsRegister()) { - __ mov(ToRegister(to), ToOperand(from)); - } else { - ASSERT(to->IsStackSlot()); - __ mov(ip, ToOperand(from)); - __ str(ip, ToMemOperand(to)); - } - } else if (from->IsRegister()) { - if (to->IsRegister()) { - __ mov(ToRegister(to), ToOperand(from)); - } else { - ASSERT(to->IsStackSlot()); - __ str(ToRegister(from), ToMemOperand(to)); - } - } else if (to->IsRegister()) { - ASSERT(from->IsStackSlot()); - __ ldr(ToRegister(to), ToMemOperand(from)); - } else if (from->IsStackSlot()) { - ASSERT(to->IsStackSlot()); - __ ldr(ip, ToMemOperand(from)); - __ str(ip, ToMemOperand(to)); - } else if (from->IsDoubleRegister()) { - if (to->IsDoubleRegister()) { - __ vmov(ToDoubleRegister(to), ToDoubleRegister(from)); - } else { - ASSERT(to->IsDoubleStackSlot()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset()); - } - } else if (to->IsDoubleRegister()) { - ASSERT(from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(ToDoubleRegister(to), ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset()); - } else { - ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); - } - } - - if (destroys_core_scratch) { - __ ldr(core_scratch, MemOperand(fp, -kPointerSize)); - } - - LInstruction* next = GetNextInstruction(); - if (next != NULL && next->IsLazyBailout()) { - int pc = masm()->pc_offset(); - safepoints_.SetPcAfterGap(pc); - } + resolver_.Resolve(move); } @@ -966,7 +766,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) { } case CodeStub::TranscendentalCache: { __ ldr(r0, MemOperand(sp, 0)); - TranscendentalCacheStub stub(instr->transcendental_type()); + TranscendentalCacheStub stub(instr->transcendental_type(), + TranscendentalCacheStub::TAGGED); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } @@ -987,7 +788,7 @@ void LCodeGen::DoModI(LModI* instr) { DeferredModI(LCodeGen* codegen, LModI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { - codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD); + codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD); } private: LModI* instr_; @@ -1016,7 +817,7 @@ void LCodeGen::DoModI(LModI* instr) { __ bind(&ok); } - // Try a few common cases before using the generic stub. + // Try a few common cases before using the stub. Label call_stub; const int kUnfolds = 3; // Skip if either side is negative. @@ -1044,7 +845,7 @@ void LCodeGen::DoModI(LModI* instr) { __ and_(result, scratch, Operand(left)); __ bind(&call_stub); - // Call the generic stub. The numbers in r0 and r1 have + // Call the stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. DeferredModI* deferred = new DeferredModI(this, instr); __ TrySmiTag(left, &deoptimize, scratch); @@ -1070,7 +871,7 @@ void LCodeGen::DoDivI(LDivI* instr) { DeferredDivI(LCodeGen* codegen, LDivI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { - codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV); + codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV); } private: LDivI* instr_; @@ -1123,7 +924,7 @@ void LCodeGen::DoDivI(LDivI* instr) { __ mov(result, Operand(left, ASR, 2), LeaveCC, eq); __ b(eq, &done); - // Call the generic stub. The numbers in r0 and r1 have + // Call the stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. DeferredDivI* deferred = new DeferredDivI(this, instr); @@ -1145,19 +946,33 @@ void LCodeGen::DoDivI(LDivI* instr) { template -void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, - Token::Value op) { +void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, + Token::Value op) { Register left = ToRegister(instr->InputAt(0)); Register right = ToRegister(instr->InputAt(1)); __ PushSafepointRegistersAndDoubles(); - GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right); + // Move left to r1 and right to r0 for the stub call. + if (left.is(r1)) { + __ Move(r0, right); + } else if (left.is(r0) && right.is(r1)) { + __ Swap(r0, r1, r2); + } else if (left.is(r0)) { + ASSERT(!right.is(r1)); + __ mov(r1, r0); + __ mov(r0, right); + } else { + ASSERT(!left.is(r0) && !right.is(r0)); + __ mov(r0, right); + __ mov(r1, left); + } + TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT); __ CallStub(&stub); RecordSafepointWithRegistersAndDoubles(instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); // Overwrite the stored value of r0 with the result of the stub. - __ StoreToSafepointRegistersAndDoublesSlot(r0); + __ StoreToSafepointRegistersAndDoublesSlot(r0, r0); __ PopSafepointRegistersAndDoubles(); } @@ -1413,7 +1228,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { __ vmov(r2, r3, right); __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4); // Move the result in the double result register. - __ vmov(ToDoubleRegister(instr->result()), r0, r1); + __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result())); // Restore r0-r3. __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); @@ -1431,10 +1246,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->InputAt(1)).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); - // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current - // GenericBinaryOpStub: - // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); - GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0); + TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -1896,14 +1708,45 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { } +void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + + if (FLAG_debug_code) { + __ AbortIfNotString(input); + } + + __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset)); + __ IndexFromHash(result, result); +} + + void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { - Abort("DoHasCachedArrayIndex unimplemented."); + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + + ASSERT(instr->hydrogen()->value()->representation().IsTagged()); + __ ldr(scratch, + FieldMemOperand(input, String::kHashFieldOffset)); + __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); + __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); + __ LoadRoot(result, Heap::kFalseValueRootIndex, ne); } void LCodeGen::DoHasCachedArrayIndexAndBranch( LHasCachedArrayIndexAndBranch* instr) { - Abort("DoHasCachedArrayIndexAndBranch unimplemented."); + Register input = ToRegister(instr->InputAt(0)); + Register scratch = scratch0(); + + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + __ ldr(scratch, + FieldMemOperand(input, String::kHashFieldOffset)); + __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); + EmitBranch(true_block, false_block, eq); } @@ -2146,15 +1989,11 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, __ bind(&before_push_delta); __ BlockConstPoolFor(kAdditionalDelta); __ mov(temp, Operand(delta * kPointerSize)); - __ StoreToSafepointRegisterSlot(temp); - __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); - ASSERT_EQ(kAdditionalDelta, - masm_->InstructionsGeneratedSince(&before_push_delta)); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); + __ StoreToSafepointRegisterSlot(temp, temp); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); // Put the result value into the result register slot and // restore all registers. - __ StoreToSafepointRegisterSlot(result); + __ StoreToSafepointRegisterSlot(result, result); __ PopSafepointRegisters(); } @@ -2274,17 +2113,13 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); - __ ldr(result, - MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); - __ ldr(result, ContextOperand(result, instr->slot_index())); + __ ldr(result, ContextOperand(context, instr->slot_index())); } void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { Register context = ToRegister(instr->context()); Register value = ToRegister(instr->value()); - __ ldr(context, - MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); __ str(value, ContextOperand(context, instr->slot_index())); if (instr->needs_write_barrier()) { int offset = Context::SlotOffset(instr->slot_index()); @@ -2603,7 +2438,7 @@ void LCodeGen::CallKnownFunction(Handle function, LInstruction* instr) { // Change context if needed. bool change_context = - (graph()->info()->closure()->context() != function->context()) || + (info()->closure()->context() != function->context()) || scope()->contains_with() || (scope()->num_heap_slots() > 0); if (change_context) { @@ -2687,7 +2522,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { // Set the pointer to the new heap number in tmp. if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input); + __ LoadFromSafepointRegisterSlot(input, input); __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); __ bind(&allocated); @@ -2698,7 +2533,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); - __ str(tmp1, masm()->SafepointRegisterSlot(input)); + __ StoreToSafepointRegisterSlot(tmp1, input); __ PopSafepointRegisters(); __ bind(&done); @@ -2752,41 +2587,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { } -// Truncates a double using a specific rounding mode. -// Clears the z flag (ne condition) if an overflow occurs. -void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode, - SwVfpRegister result, - DwVfpRegister double_input, - Register scratch1, - Register scratch2) { - Register prev_fpscr = scratch1; - Register scratch = scratch2; - - // Set custom FPCSR: - // - Set rounding mode. - // - Clear vfp cumulative exception flags. - // - Make sure Flush-to-zero mode control bit is unset. - __ vmrs(prev_fpscr); - __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask | - kVFPRoundingModeMask | - kVFPFlushToZeroMask)); - __ orr(scratch, scratch, Operand(rounding_mode)); - __ vmsr(scratch); - - // Convert the argument to an integer. - __ vcvt_s32_f64(result, - double_input, - kFPSCRRounding); - - // Retrieve FPSCR. - __ vmrs(scratch); - // Restore FPSCR. - __ vmsr(prev_fpscr); - // Check for vfp exceptions. - __ tst(scratch, Operand(kVFPExceptionMask)); -} - - void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); @@ -2794,11 +2594,11 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { Register scratch1 = scratch0(); Register scratch2 = ToRegister(instr->TempAt(0)); - EmitVFPTruncate(kRoundToMinusInf, - single_scratch, - input, - scratch1, - scratch2); + __ EmitVFPTruncate(kRoundToMinusInf, + single_scratch, + input, + scratch1, + scratch2); DeoptimizeIf(ne, instr->environment()); // Move the result back to general purpose register r0. @@ -2815,6 +2615,30 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { } +void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { + DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register scratch1 = scratch0(); + Register scratch2 = result; + __ EmitVFPTruncate(kRoundToNearest, + double_scratch0().low(), + input, + scratch1, + scratch2); + DeoptimizeIf(ne, instr->environment()); + __ vmov(result, double_scratch0().low()); + + // Test for -0. + Label done; + __ cmp(result, Operand(0)); + __ b(ne, &done); + __ vmov(scratch1, input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); + __ bind(&done); +} + + void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); ASSERT(ToDoubleRegister(instr->result()).is(input)); @@ -2822,6 +2646,88 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { } +void LCodeGen::DoPower(LPower* instr) { + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + Register scratch = scratch0(); + DoubleRegister result_reg = ToDoubleRegister(instr->result()); + Representation exponent_type = instr->hydrogen()->right()->representation(); + if (exponent_type.IsDouble()) { + // Prepare arguments and call C function. + __ PrepareCallCFunction(4, scratch); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ vmov(r2, r3, ToDoubleRegister(right)); + __ CallCFunction(ExternalReference::power_double_double_function(), 4); + } else if (exponent_type.IsInteger32()) { + ASSERT(ToRegister(right).is(r0)); + // Prepare arguments and call C function. + __ PrepareCallCFunction(4, scratch); + __ mov(r2, ToRegister(right)); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ CallCFunction(ExternalReference::power_double_int_function(), 4); + } else { + ASSERT(exponent_type.IsTagged()); + ASSERT(instr->hydrogen()->left()->representation().IsDouble()); + + Register right_reg = ToRegister(right); + + // Check for smi on the right hand side. + Label non_smi, call; + __ JumpIfNotSmi(right_reg, &non_smi); + + // Untag smi and convert it to a double. + __ SmiUntag(right_reg); + SwVfpRegister single_scratch = double_scratch0().low(); + __ vmov(single_scratch, right_reg); + __ vcvt_f64_s32(result_reg, single_scratch); + __ jmp(&call); + + // Heap number map check. + __ bind(&non_smi); + __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch, Operand(ip)); + DeoptimizeIf(ne, instr->environment()); + int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag; + __ add(scratch, right_reg, Operand(value_offset)); + __ vldr(result_reg, scratch, 0); + + // Prepare arguments and call C function. + __ bind(&call); + __ PrepareCallCFunction(4, scratch); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ vmov(r2, r3, result_reg); + __ CallCFunction(ExternalReference::power_double_double_function(), 4); + } + // Store the result in the result register. + __ GetCFunctionDoubleResult(result_reg); +} + + +void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { + ASSERT(ToDoubleRegister(instr->result()).is(d2)); + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::UNTAGGED); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { + ASSERT(ToDoubleRegister(instr->result()).is(d2)); + TranscendentalCacheStub stub(TranscendentalCache::COS, + TranscendentalCacheStub::UNTAGGED); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { + ASSERT(ToDoubleRegister(instr->result()).is(d2)); + TranscendentalCacheStub stub(TranscendentalCache::SIN, + TranscendentalCacheStub::UNTAGGED); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { switch (instr->op()) { case kMathAbs: @@ -2830,9 +2736,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { case kMathFloor: DoMathFloor(instr); break; + case kMathRound: + DoMathRound(instr); + break; case kMathSqrt: DoMathSqrt(instr); break; + case kMathCos: + DoMathCos(instr); + break; + case kMathSin: + DoMathSin(instr); + break; + case kMathLog: + DoMathLog(instr); + break; default: Abort("Unimplemented type of LUnaryMathOperation."); UNREACHABLE(); @@ -2944,9 +2862,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { // Name is always in r2. __ mov(r2, Operand(instr->name())); - Handle ic(Builtins::builtin(info_->is_strict() - ? Builtins::StoreIC_Initialize_Strict - : Builtins::StoreIC_Initialize)); + Handle ic(Builtins::builtin( + info_->is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -2988,7 +2906,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { ASSERT(ToRegister(instr->key()).is(r1)); ASSERT(ToRegister(instr->value()).is(r0)); - Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle ic(Builtins::builtin( + info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -3129,8 +3049,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { __ AbortIfNotSmi(r0); } __ SmiUntag(r0); - MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result); - __ str(r0, result_stack_slot); + __ StoreToSafepointRegisterSlot(r0, result); __ PopSafepointRegisters(); } @@ -3211,9 +3130,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { // register is stored, as this register is in the pointer map, but contains an // integer value. __ mov(ip, Operand(0)); - int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); - __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize)); - + __ StoreToSafepointRegisterSlot(ip, reg); __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); @@ -3224,7 +3141,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { __ bind(&done); __ sub(ip, reg, Operand(kHeapObjectTag)); __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset); - __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize)); + __ StoreToSafepointRegisterSlot(reg, reg); __ PopSafepointRegisters(); } @@ -3269,8 +3186,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); - int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); - __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize)); + __ StoreToSafepointRegisterSlot(r0, reg); __ PopSafepointRegisters(); } @@ -3456,30 +3372,36 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { Register scratch1 = scratch0(); Register scratch2 = ToRegister(instr->TempAt(0)); - VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf - : kRoundToNearest; + __ EmitVFPTruncate(kRoundToZero, + single_scratch, + double_input, + scratch1, + scratch2); - EmitVFPTruncate(rounding_mode, - single_scratch, - double_input, - scratch1, - scratch2); // Deoptimize if we had a vfp invalid exception. DeoptimizeIf(ne, instr->environment()); + // Retrieve the result. __ vmov(result_reg, single_scratch); - if (instr->truncating() && - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ cmp(result_reg, Operand(0)); - __ b(ne, &done); - // Check for -0. - __ vmov(scratch1, double_input.high()); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); + if (!instr->truncating()) { + // Convert result back to double and compare with input + // to check if the conversion was exact. + __ vmov(single_scratch, result_reg); + __ vcvt_f64_s32(double_scratch0(), single_scratch); + __ VFPCompareAndSetFlags(double_scratch0(), double_input); DeoptimizeIf(ne, instr->environment()); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label done; + __ cmp(result_reg, Operand(0)); + __ b(ne, &done); + // Check for -0. + __ vmov(scratch1, double_input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); - __ bind(&done); + __ bind(&done); + } } } @@ -3750,37 +3672,30 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Condition final_branch_condition = kNoCondition; Register scratch = scratch0(); if (type_name->Equals(Heap::number_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, true_label); + __ JumpIfSmi(input, true_label); __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(input, Operand(ip)); final_branch_condition = eq; } else if (type_name->Equals(Heap::string_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); + __ JumpIfSmi(input, false_label); + __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE); + __ b(ge, false_label); __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsUndetectable)); - __ b(ne, false_label); - __ CompareInstanceType(input, scratch, FIRST_NONSTRING_TYPE); - final_branch_condition = lo; + final_branch_condition = eq; } else if (type_name->Equals(Heap::boolean_symbol())) { - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(input, ip); + __ CompareRoot(input, Heap::kTrueValueRootIndex); __ b(eq, true_label); - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(input, ip); + __ CompareRoot(input, Heap::kFalseValueRootIndex); final_branch_condition = eq; } else if (type_name->Equals(Heap::undefined_symbol())) { - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(input, ip); + __ CompareRoot(input, Heap::kUndefinedValueRootIndex); __ b(eq, true_label); - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); + __ JumpIfSmi(input, false_label); // Check for undetectable objects => true. __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); @@ -3788,32 +3703,22 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = ne; } else if (type_name->Equals(Heap::function_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - __ CompareObjectType(input, input, scratch, JS_FUNCTION_TYPE); - __ b(eq, true_label); - // Regular expressions => 'function' (they are callable). - __ CompareInstanceType(input, scratch, JS_REGEXP_TYPE); - final_branch_condition = eq; + __ JumpIfSmi(input, false_label); + __ CompareObjectType(input, input, scratch, FIRST_FUNCTION_CLASS_TYPE); + final_branch_condition = ge; } else if (type_name->Equals(Heap::object_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(input, ip); + __ JumpIfSmi(input, false_label); + __ CompareRoot(input, Heap::kNullValueRootIndex); __ b(eq, true_label); - // Regular expressions => 'function', not 'object'. - __ CompareObjectType(input, input, scratch, JS_REGEXP_TYPE); - __ b(eq, false_label); + __ CompareObjectType(input, input, scratch, FIRST_JS_OBJECT_TYPE); + __ b(lo, false_label); + __ CompareInstanceType(input, scratch, FIRST_FUNCTION_CLASS_TYPE); + __ b(hs, false_label); // Check for undetectable objects => false. __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsUndetectable)); - __ b(ne, false_label); - // Check for JS objects => true. - __ CompareInstanceType(input, scratch, FIRST_JS_OBJECT_TYPE); - __ b(lo, false_label); - __ CompareInstanceType(input, scratch, LAST_JS_OBJECT_TYPE); - final_branch_condition = ls; + final_branch_condition = eq; } else { final_branch_condition = ne; @@ -3888,7 +3793,9 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) { void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { Register object = ToRegister(instr->object()); Register key = ToRegister(instr->key()); - __ Push(object, key); + Register strict = scratch0(); + __ mov(strict, Operand(Smi::FromInt(strict_mode_flag()))); + __ Push(object, key, strict); ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); LPointerMap* pointers = instr->pointer_map(); LEnvironment* env = instr->deoptimization_environment(); diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h index 7bc6689f..23e0c44b 100644 --- a/src/arm/lithium-codegen-arm.h +++ b/src/arm/lithium-codegen-arm.h @@ -29,7 +29,7 @@ #define V8_ARM_LITHIUM_CODEGEN_ARM_H_ #include "arm/lithium-arm.h" - +#include "arm/lithium-gap-resolver-arm.h" #include "deoptimizer.h" #include "safepoint-table.h" #include "scopes.h" @@ -39,31 +39,8 @@ namespace internal { // Forward declarations. class LDeferredCode; -class LGapNode; class SafepointGenerator; -class LGapResolver BASE_EMBEDDED { - public: - LGapResolver(); - const ZoneList* Resolve(const ZoneList* moves, - LOperand* marker_operand); - - private: - LGapNode* LookupNode(LOperand* operand); - bool CanReach(LGapNode* a, LGapNode* b, int visited_id); - bool CanReach(LGapNode* a, LGapNode* b); - void RegisterMove(LMoveOperands move); - void AddResultMove(LOperand* from, LOperand* to); - void AddResultMove(LGapNode* from, LGapNode* to); - void ResolveCycle(LGapNode* start, LOperand* marker_operand); - - ZoneList nodes_; - ZoneList identified_cycles_; - ZoneList result_; - int next_visited_id_; -}; - - class LCodeGen BASE_EMBEDDED { public: LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) @@ -76,13 +53,39 @@ class LCodeGen BASE_EMBEDDED { deoptimizations_(4), deoptimization_literals_(8), inlined_function_count_(0), - scope_(chunk->graph()->info()->scope()), + scope_(info->scope()), status_(UNUSED), deferred_(8), - osr_pc_offset_(-1) { + osr_pc_offset_(-1), + resolver_(this) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } + + // Simple accessors. + MacroAssembler* masm() const { return masm_; } + CompilationInfo* info() const { return info_; } + + // Support for converting LOperands to assembler types. + // LOperand must be a register. + Register ToRegister(LOperand* op) const; + + // LOperand is loaded into scratch, unless already a register. + Register EmitLoadRegister(LOperand* op, Register scratch); + + // LOperand must be a double register. + DoubleRegister ToDoubleRegister(LOperand* op) const; + + // LOperand is loaded into dbl_scratch, unless already a double register. + DoubleRegister EmitLoadDoubleRegister(LOperand* op, + SwVfpRegister flt_scratch, + DoubleRegister dbl_scratch); + int ToInteger32(LConstantOperand* op) const; + Operand ToOperand(LOperand* op); + MemOperand ToMemOperand(LOperand* op) const; + // Returns a MemOperand pointing to the high word of a DoubleStackSlot. + MemOperand ToHighMemOperand(LOperand* op) const; + // Try to generate code for the entire chunk, but it may fail if the // chunk contains constructs we cannot handle. Returns true if the // code generation attempt succeeded. @@ -94,8 +97,8 @@ class LCodeGen BASE_EMBEDDED { // Deferred code support. template - void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, - Token::Value op); + void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, + Token::Value op); void DoDeferredNumberTagD(LNumberTagD* instr); void DoDeferredNumberTagI(LNumberTagI* instr); void DoDeferredTaggedToI(LTaggedToI* instr); @@ -129,10 +132,13 @@ class LCodeGen BASE_EMBEDDED { bool is_done() const { return status_ == DONE; } bool is_aborted() const { return status_ == ABORTED; } + int strict_mode_flag() const { + return info()->is_strict() ? kStrictMode : kNonStrictMode; + } + LChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } HGraph* graph() const { return chunk_->graph(); } - MacroAssembler* masm() const { return masm_; } Register scratch0() { return r9; } DwVfpRegister double_scratch0() { return d0; } @@ -198,34 +204,15 @@ class LCodeGen BASE_EMBEDDED { Register ToRegister(int index) const; DoubleRegister ToDoubleRegister(int index) const; - // LOperand must be a register. - Register ToRegister(LOperand* op) const; - - // LOperand is loaded into scratch, unless already a register. - Register EmitLoadRegister(LOperand* op, Register scratch); - - // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; - - // LOperand is loaded into dbl_scratch, unless already a double register. - DoubleRegister EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DoubleRegister dbl_scratch); - - int ToInteger32(LConstantOperand* op) const; - Operand ToOperand(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; - // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr); - void EmitVFPTruncate(VFPRoundingMode rounding_mode, - SwVfpRegister result, - DwVfpRegister double_input, - Register scratch1, - Register scratch2); void DoMathFloor(LUnaryMathOperation* instr); + void DoMathRound(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr); + void DoMathLog(LUnaryMathOperation* instr); + void DoMathCos(LUnaryMathOperation* instr); + void DoMathSin(LUnaryMathOperation* instr); // Support for recording safepoint and position information. void RecordSafepoint(LPointerMap* pointers, @@ -233,6 +220,7 @@ class LCodeGen BASE_EMBEDDED { int arguments, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); + void RecordSafepoint(int deoptimization_index); void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index); diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc new file mode 100644 index 00000000..1a2326b7 --- /dev/null +++ b/src/arm/lithium-gap-resolver-arm.cc @@ -0,0 +1,303 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "arm/lithium-gap-resolver-arm.h" +#include "arm/lithium-codegen-arm.h" + +namespace v8 { +namespace internal { + +static const Register kSavedValueRegister = { 9 }; +static const DoubleRegister kSavedDoubleValueRegister = { 0 }; + +LGapResolver::LGapResolver(LCodeGen* owner) + : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false), + saved_destination_(NULL) { } + + +void LGapResolver::Resolve(LParallelMove* parallel_move) { + ASSERT(moves_.is_empty()); + // Build up a worklist of moves. + BuildInitialMoveList(parallel_move); + + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands move = moves_[i]; + // Skip constants to perform them last. They don't block other moves + // and skipping such moves with register destinations keeps those + // registers free for the whole algorithm. + if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { + root_index_ = i; // Any cycle is found when by reaching this move again. + PerformMove(i); + if (in_cycle_) { + RestoreValue(); + } + } + } + + // Perform the moves with constant sources. + for (int i = 0; i < moves_.length(); ++i) { + if (!moves_[i].IsEliminated()) { + ASSERT(moves_[i].source()->IsConstantOperand()); + EmitMove(i); + } + } + + moves_.Rewind(0); +} + + +void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { + // Perform a linear sweep of the moves to add them to the initial list of + // moves to perform, ignoring any move that is redundant (the source is + // the same as the destination, the destination is ignored and + // unallocated, or the move was already eliminated). + const ZoneList* moves = parallel_move->move_operands(); + for (int i = 0; i < moves->length(); ++i) { + LMoveOperands move = moves->at(i); + if (!move.IsRedundant()) moves_.Add(move); + } + Verify(); +} + + +void LGapResolver::PerformMove(int index) { + // Each call to this function performs a move and deletes it from the move + // graph. We first recursively perform any move blocking this one. We + // mark a move as "pending" on entry to PerformMove in order to detect + // cycles in the move graph. + + // We can only find a cycle, when doing a depth-first traversal of moves, + // be encountering the starting move again. So by spilling the source of + // the starting move, we break the cycle. All moves are then unblocked, + // and the starting move is completed by writing the spilled value to + // its destination. All other moves from the spilled source have been + // completed prior to breaking the cycle. + // An additional complication is that moves to MemOperands with large + // offsets (more than 1K or 4K) require us to spill this spilled value to + // the stack, to free up the register. + ASSERT(!moves_[index].IsPending()); + ASSERT(!moves_[index].IsRedundant()); + + // Clear this move's destination to indicate a pending move. The actual + // destination is saved in a stack allocated local. Multiple moves can + // be pending because this function is recursive. + ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. + LOperand* destination = moves_[index].destination(); + moves_[index].set_destination(NULL); + + // Perform a depth-first traversal of the move graph to resolve + // dependencies. Any unperformed, unpending move with a source the same + // as this one's destination blocks this one so recursively perform all + // such moves. + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands other_move = moves_[i]; + if (other_move.Blocks(destination) && !other_move.IsPending()) { + PerformMove(i); + // If there is a blocking, pending move it must be moves_[root_index_] + // and all other moves with the same source as moves_[root_index_] are + // sucessfully executed (because they are cycle-free) by this loop. + } + } + + // We are about to resolve this move and don't need it marked as + // pending, so restore its destination. + moves_[index].set_destination(destination); + + // The move may be blocked on a pending move, which must be the starting move. + // In this case, we have a cycle, and we save the source of this move to + // a scratch register to break it. + LMoveOperands other_move = moves_[root_index_]; + if (other_move.Blocks(destination)) { + ASSERT(other_move.IsPending()); + BreakCycle(index); + return; + } + + // This move is no longer blocked. + EmitMove(index); +} + + +void LGapResolver::Verify() { +#ifdef ENABLE_SLOW_ASSERTS + // No operand should be the destination for more than one move. + for (int i = 0; i < moves_.length(); ++i) { + LOperand* destination = moves_[i].destination(); + for (int j = i + 1; j < moves_.length(); ++j) { + SLOW_ASSERT(!destination->Equals(moves_[j].destination())); + } + } +#endif +} + +#define __ ACCESS_MASM(cgen_->masm()) + +void LGapResolver::BreakCycle(int index) { + // We save in a register the value that should end up in the source of + // moves_[root_index]. After performing all moves in the tree rooted + // in that move, we save the value to that source. + ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); + ASSERT(!in_cycle_); + in_cycle_ = true; + LOperand* source = moves_[index].source(); + saved_destination_ = moves_[index].destination(); + if (source->IsRegister()) { + __ mov(kSavedValueRegister, cgen_->ToRegister(source)); + } else if (source->IsStackSlot()) { + __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); + } else if (source->IsDoubleRegister()) { + __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source)); + } else if (source->IsDoubleStackSlot()) { + __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source)); + } else { + UNREACHABLE(); + } + // This move will be done by restoring the saved value to the destination. + moves_[index].Eliminate(); +} + + +void LGapResolver::RestoreValue() { + ASSERT(in_cycle_); + ASSERT(saved_destination_ != NULL); + + // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister. + if (saved_destination_->IsRegister()) { + __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister); + } else if (saved_destination_->IsStackSlot()) { + __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); + } else if (saved_destination_->IsDoubleRegister()) { + __ vmov(cgen_->ToDoubleRegister(saved_destination_), + kSavedDoubleValueRegister); + } else if (saved_destination_->IsDoubleStackSlot()) { + __ vstr(kSavedDoubleValueRegister, + cgen_->ToMemOperand(saved_destination_)); + } else { + UNREACHABLE(); + } + + in_cycle_ = false; + saved_destination_ = NULL; +} + + +void LGapResolver::EmitMove(int index) { + LOperand* source = moves_[index].source(); + LOperand* destination = moves_[index].destination(); + + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + + if (source->IsRegister()) { + Register source_register = cgen_->ToRegister(source); + if (destination->IsRegister()) { + __ mov(cgen_->ToRegister(destination), source_register); + } else { + ASSERT(destination->IsStackSlot()); + __ str(source_register, cgen_->ToMemOperand(destination)); + } + + } else if (source->IsStackSlot()) { + MemOperand source_operand = cgen_->ToMemOperand(source); + if (destination->IsRegister()) { + __ ldr(cgen_->ToRegister(destination), source_operand); + } else { + ASSERT(destination->IsStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + if (in_cycle_) { + if (!destination_operand.OffsetIsUint12Encodable()) { + // ip is overwritten while saving the value to the destination. + // Therefore we can't use ip. It is OK if the read from the source + // destroys ip, since that happens before the value is read. + __ vldr(kSavedDoubleValueRegister.low(), source_operand); + __ vstr(kSavedDoubleValueRegister.low(), destination_operand); + } else { + __ ldr(ip, source_operand); + __ str(ip, destination_operand); + } + } else { + __ ldr(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, destination_operand); + } + } + + } else if (source->IsConstantOperand()) { + Operand source_operand = cgen_->ToOperand(source); + if (destination->IsRegister()) { + __ mov(cgen_->ToRegister(destination), source_operand); + } else { + ASSERT(destination->IsStackSlot()); + ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. + MemOperand destination_operand = cgen_->ToMemOperand(destination); + __ mov(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, cgen_->ToMemOperand(destination)); + } + + } else if (source->IsDoubleRegister()) { + DoubleRegister source_register = cgen_->ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + __ vmov(cgen_->ToDoubleRegister(destination), source_register); + } else { + ASSERT(destination->IsDoubleStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + __ vstr(source_register, destination_operand); + } + + } else if (source->IsDoubleStackSlot()) { + MemOperand source_operand = cgen_->ToMemOperand(source); + if (destination->IsDoubleRegister()) { + __ vldr(cgen_->ToDoubleRegister(destination), source_operand); + } else { + ASSERT(destination->IsDoubleStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + if (in_cycle_) { + // kSavedDoubleValueRegister was used to break the cycle, + // but kSavedValueRegister is free. + MemOperand source_high_operand = + cgen_->ToHighMemOperand(source); + MemOperand destination_high_operand = + cgen_->ToHighMemOperand(destination); + __ ldr(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, destination_operand); + __ ldr(kSavedValueRegister, source_high_operand); + __ str(kSavedValueRegister, destination_high_operand); + } else { + __ vldr(kSavedDoubleValueRegister, source_operand); + __ vstr(kSavedDoubleValueRegister, destination_operand); + } + } + } else { + UNREACHABLE(); + } + + moves_[index].Eliminate(); +} + + +#undef __ + +} } // namespace v8::internal diff --git a/src/arm/lithium-gap-resolver-arm.h b/src/arm/lithium-gap-resolver-arm.h new file mode 100644 index 00000000..334d2920 --- /dev/null +++ b/src/arm/lithium-gap-resolver-arm.h @@ -0,0 +1,84 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ +#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ + +#include "v8.h" + +#include "lithium.h" + +namespace v8 { +namespace internal { + +class LCodeGen; +class LGapResolver; + +class LGapResolver BASE_EMBEDDED { + public: + + explicit LGapResolver(LCodeGen* owner); + + // Resolve a set of parallel moves, emitting assembler instructions. + void Resolve(LParallelMove* parallel_move); + + private: + // Build the initial list of moves. + void BuildInitialMoveList(LParallelMove* parallel_move); + + // Perform the move at the moves_ index in question (possibly requiring + // other moves to satisfy dependencies). + void PerformMove(int index); + + // If a cycle is found in the series of moves, save the blocking value to + // a scratch register. The cycle must be found by hitting the root of the + // depth-first search. + void BreakCycle(int index); + + // After a cycle has been resolved, restore the value from the scratch + // register to its proper destination. + void RestoreValue(); + + // Emit a move and remove it from the move graph. + void EmitMove(int index); + + // Verify the move list before performing moves. + void Verify(); + + LCodeGen* cgen_; + + // List of moves not yet resolved. + ZoneList moves_; + + int root_index_; + bool in_cycle_; + LOperand* saved_destination_; +}; + +} } // namespace v8::internal + +#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index 5d8df1af..d431f6a9 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -271,6 +271,29 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, } +void MacroAssembler::Bfi(Register dst, + Register src, + Register scratch, + int lsb, + int width, + Condition cond) { + ASSERT(0 <= lsb && lsb < 32); + ASSERT(0 <= width && width < 32); + ASSERT(lsb + width < 32); + ASSERT(!scratch.is(dst)); + if (width == 0) return; + if (!CpuFeatures::IsSupported(ARMv7)) { + int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); + bic(dst, dst, Operand(mask)); + and_(scratch, src, Operand((1 << width) - 1)); + mov(scratch, Operand(scratch, LSL, lsb)); + orr(dst, dst, scratch); + } else { + bfi(dst, src, lsb, width, cond); + } +} + + void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { ASSERT(lsb < 32); if (!CpuFeatures::IsSupported(ARMv7)) { @@ -485,18 +508,19 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() { PopSafepointRegisters(); } -void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) { - str(reg, SafepointRegistersAndDoublesSlot(reg)); +void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, + Register dst) { + str(src, SafepointRegistersAndDoublesSlot(dst)); } -void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) { - str(reg, SafepointRegisterSlot(reg)); +void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { + str(src, SafepointRegisterSlot(dst)); } -void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) { - ldr(reg, SafepointRegisterSlot(reg)); +void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { + ldr(dst, SafepointRegisterSlot(src)); } @@ -714,7 +738,8 @@ int MacroAssembler::ActivationFrameAlignment() { } -void MacroAssembler::LeaveExitFrame(bool save_doubles) { +void MacroAssembler::LeaveExitFrame(bool save_doubles, + Register argument_count) { // Optionally restore all double registers. if (save_doubles) { for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { @@ -736,12 +761,20 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { str(r3, MemOperand(ip)); #endif - // Tear down the exit frame, pop the arguments, and return. Callee-saved - // register r4 still holds argc. + // Tear down the exit frame, pop the arguments, and return. mov(sp, Operand(fp)); ldm(ia_w, sp, fp.bit() | lr.bit()); - add(sp, sp, Operand(r4, LSL, kPointerSizeLog2)); - mov(pc, lr); + if (argument_count.is_valid()) { + add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); + } +} + +void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { +#if !defined(USE_ARM_EABI) + UNREACHABLE(); +#else + vmov(dst, r0, r1); +#endif } @@ -929,8 +962,8 @@ void MacroAssembler::IsInstanceJSObjectType(Register map, void MacroAssembler::IsObjectJSStringType(Register object, - Register scratch, - Label* fail) { + Register scratch, + Label* fail) { ASSERT(kNotStringTag != 0); ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); @@ -1005,6 +1038,117 @@ void MacroAssembler::PopTryHandler() { } +void MacroAssembler::Throw(Register value) { + // r0 is expected to hold the exception. + if (!value.is(r0)) { + mov(r0, value); + } + + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop the sp to the top of the handler. + mov(r3, Operand(ExternalReference(Top::k_handler_address))); + ldr(sp, MemOperand(r3)); + + // Restore the next handler and frame pointer, discard handler state. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(r2); + str(r2, MemOperand(r3)); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); + ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. + + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of a + // JS entry frame. + cmp(fp, Operand(0, RelocInfo::NONE)); + // Set cp to NULL if fp is NULL. + mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); + // Restore cp otherwise. + ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); +#ifdef DEBUG + if (FLAG_debug_code) { + mov(lr, Operand(pc)); + } +#endif + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + pop(pc); +} + + +void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, + Register value) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // r0 is expected to hold the exception. + if (!value.is(r0)) { + mov(r0, value); + } + + // Drop sp to the top stack handler. + mov(r3, Operand(ExternalReference(Top::k_handler_address))); + ldr(sp, MemOperand(r3)); + + // Unwind the handlers until the ENTRY handler is found. + Label loop, done; + bind(&loop); + // Load the type of the current stack handler. + const int kStateOffset = StackHandlerConstants::kStateOffset; + ldr(r2, MemOperand(sp, kStateOffset)); + cmp(r2, Operand(StackHandler::ENTRY)); + b(eq, &done); + // Fetch the next handler in the list. + const int kNextOffset = StackHandlerConstants::kNextOffset; + ldr(sp, MemOperand(sp, kNextOffset)); + jmp(&loop); + bind(&done); + + // Set the top handler address to next handler past the current ENTRY handler. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(r2); + str(r2, MemOperand(r3)); + + if (type == OUT_OF_MEMORY) { + // Set external caught exception to false. + ExternalReference external_caught(Top::k_external_caught_exception_address); + mov(r0, Operand(false, RelocInfo::NONE)); + mov(r2, Operand(external_caught)); + str(r0, MemOperand(r2)); + + // Set pending exception and r0 to out of memory exception. + Failure* out_of_memory = Failure::OutOfMemoryException(); + mov(r0, Operand(reinterpret_cast(out_of_memory))); + mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); + str(r0, MemOperand(r2)); + } + + // Stack layout at this point. See also StackHandlerConstants. + // sp -> state (ENTRY) + // fp + // lr + + // Discard handler state (r2 is not used) and restore frame pointer. + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); + ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of a + // JS entry frame. + cmp(fp, Operand(0, RelocInfo::NONE)); + // Set cp to NULL if fp is NULL. + mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); + // Restore cp otherwise. + ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); +#ifdef DEBUG + if (FLAG_debug_code) { + mov(lr, Operand(pc)); + } +#endif + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + pop(pc); +} + + void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label* miss) { @@ -1102,6 +1246,8 @@ void MacroAssembler::AllocateInNewSpace(int object_size, ASSERT(!result.is(scratch1)); ASSERT(!result.is(scratch2)); ASSERT(!scratch1.is(scratch2)); + ASSERT(!scratch1.is(ip)); + ASSERT(!scratch2.is(ip)); // Make object size into bytes. if ((flags & SIZE_IN_WORDS) != 0) { @@ -1391,6 +1537,14 @@ void MacroAssembler::CompareInstanceType(Register map, } +void MacroAssembler::CompareRoot(Register obj, + Heap::RootListIndex index) { + ASSERT(!obj.is(ip)); + LoadRoot(ip, index); + cmp(obj, ip); +} + + void MacroAssembler::CheckMap(Register obj, Register scratch, Handle map, @@ -1497,7 +1651,7 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( - ApiFunction* function, int stack_space) { + ExternalReference function, int stack_space) { ExternalReference next_address = ExternalReference::handle_scope_next_address(); const int kNextOffset = 0; @@ -1554,9 +1708,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( cmp(r4, r5); b(ne, &promote_scheduled_exception); - // LeaveExitFrame expects unwind space to be in r4. + // LeaveExitFrame expects unwind space to be in a register. mov(r4, Operand(stack_space)); - LeaveExitFrame(false); + LeaveExitFrame(false, r4); + mov(pc, lr); bind(&promote_scheduled_exception); MaybeObject* result = TryTailCallExternalReference( @@ -1696,9 +1851,9 @@ void MacroAssembler::ConvertToInt32(Register source, ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); // Get exponent alone in scratch2. Ubfx(scratch2, - scratch, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); + scratch, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); // Load dest with zero. We use this either for the final shift or // for the answer. mov(dest, Operand(0, RelocInfo::NONE)); @@ -1761,6 +1916,52 @@ void MacroAssembler::ConvertToInt32(Register source, } +void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, + SwVfpRegister result, + DwVfpRegister double_input, + Register scratch1, + Register scratch2, + CheckForInexactConversion check_inexact) { + ASSERT(CpuFeatures::IsSupported(VFP3)); + CpuFeatures::Scope scope(VFP3); + Register prev_fpscr = scratch1; + Register scratch = scratch2; + + int32_t check_inexact_conversion = + (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; + + // Set custom FPCSR: + // - Set rounding mode. + // - Clear vfp cumulative exception flags. + // - Make sure Flush-to-zero mode control bit is unset. + vmrs(prev_fpscr); + bic(scratch, + prev_fpscr, + Operand(kVFPExceptionMask | + check_inexact_conversion | + kVFPRoundingModeMask | + kVFPFlushToZeroMask)); + // 'Round To Nearest' is encoded by 0b00 so no bits need to be set. + if (rounding_mode != kRoundToNearest) { + orr(scratch, scratch, Operand(rounding_mode)); + } + vmsr(scratch); + + // Convert the argument to an integer. + vcvt_s32_f64(result, + double_input, + (rounding_mode == kRoundToZero) ? kDefaultRoundToZero + : kFPSCRRounding); + + // Retrieve FPSCR. + vmrs(scratch); + // Restore FPSCR. + vmsr(prev_fpscr); + // Check for vfp exceptions. + tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion)); +} + + void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits) { @@ -2041,11 +2242,22 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) { ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); } - // The context may be an intermediate context, not a function context. - ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); - } else { // Slot is in the current function context. - // The context may be an intermediate context, not a function context. - ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX))); + } else { + // Slot is in the current function context. Move it into the + // destination register in case we store into it (the write barrier + // cannot be allowed to destroy the context in esi). + mov(dst, cp); + } + + // We should not have found a 'with' context by walking the context chain + // (i.e., the static scope chain and runtime context chain do not agree). + // A variable occurring in such a scope should have slot type LOOKUP and + // not CONTEXT. + if (FLAG_debug_code) { + ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); + cmp(dst, ip); + Check(eq, "Yo dawg, I heard you liked function contexts " + "so I put function contexts in all your contexts"); } } @@ -2122,12 +2334,23 @@ void MacroAssembler::AbortIfNotSmi(Register object) { } +void MacroAssembler::AbortIfNotString(Register object) { + STATIC_ASSERT(kSmiTag == 0); + tst(object, Operand(kSmiTagMask)); + Assert(ne, "Operand is not a string"); + push(object); + ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); + CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); + pop(object); + Assert(lo, "Operand is not a string"); +} + + + void MacroAssembler::AbortIfNotRootValue(Register src, Heap::RootListIndex root_value_index, const char* message) { - ASSERT(!src.is(ip)); - LoadRoot(ip, root_value_index); - cmp(src, ip); + CompareRoot(src, root_value_index); Assert(eq, message); } @@ -2243,6 +2466,60 @@ void MacroAssembler::CopyFields(Register dst, } +void MacroAssembler::CopyBytes(Register src, + Register dst, + Register length, + Register scratch) { + Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; + + // Align src before copying in word size chunks. + bind(&align_loop); + cmp(length, Operand(0)); + b(eq, &done); + bind(&align_loop_1); + tst(src, Operand(kPointerSize - 1)); + b(eq, &word_loop); + ldrb(scratch, MemOperand(src, 1, PostIndex)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + sub(length, length, Operand(1), SetCC); + b(ne, &byte_loop_1); + + // Copy bytes in word size chunks. + bind(&word_loop); + if (FLAG_debug_code) { + tst(src, Operand(kPointerSize - 1)); + Assert(eq, "Expecting alignment for CopyBytes"); + } + cmp(length, Operand(kPointerSize)); + b(lt, &byte_loop); + ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); +#if CAN_USE_UNALIGNED_ACCESSES + str(scratch, MemOperand(dst, kPointerSize, PostIndex)); +#else + strb(scratch, MemOperand(dst, 1, PostIndex)); + mov(scratch, Operand(scratch, LSR, 8)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + mov(scratch, Operand(scratch, LSR, 8)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + mov(scratch, Operand(scratch, LSR, 8)); + strb(scratch, MemOperand(dst, 1, PostIndex)); +#endif + sub(length, length, Operand(kPointerSize)); + b(&word_loop); + + // Copy the last bytes if any left. + bind(&byte_loop); + cmp(length, Operand(0)); + b(eq, &done); + bind(&byte_loop_1); + ldrb(scratch, MemOperand(src, 1, PostIndex)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + sub(length, length, Operand(1), SetCC); + b(ne, &byte_loop_1); + bind(&done); +} + + void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. Register source, // Input. Register scratch) { diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 36e4a1fe..aaf4458e 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -121,6 +121,15 @@ class MacroAssembler: public Assembler { Condition cond = al); void Sbfx(Register dst, Register src, int lsb, int width, Condition cond = al); + // The scratch register is not used for ARMv7. + // scratch can be the same register as src (in which case it is trashed), but + // not the same as dst. + void Bfi(Register dst, + Register src, + Register scratch, + int lsb, + int width, + Condition cond = al); void Bfc(Register dst, int lsb, int width, Condition cond = al); void Usat(Register dst, int satpos, const Operand& src, Condition cond = al); @@ -234,18 +243,30 @@ class MacroAssembler: public Assembler { } } + // Pop two registers. Pops rightmost register first (from lower address). + void Pop(Register src1, Register src2, Condition cond = al) { + ASSERT(!src1.is(src2)); + if (src1.code() > src2.code()) { + ldm(ia_w, sp, src1.bit() | src2.bit(), cond); + } else { + ldr(src2, MemOperand(sp, 4, PostIndex), cond); + ldr(src1, MemOperand(sp, 4, PostIndex), cond); + } + } + // Push and pop the registers that can hold pointers, as defined by the // RegList constant kSafepointSavedRegisters. void PushSafepointRegisters(); void PopSafepointRegisters(); void PushSafepointRegistersAndDoubles(); void PopSafepointRegistersAndDoubles(); - void StoreToSafepointRegisterSlot(Register reg); - void StoreToSafepointRegistersAndDoublesSlot(Register reg); - void LoadFromSafepointRegisterSlot(Register reg); - static int SafepointRegisterStackIndex(int reg_code); - static MemOperand SafepointRegisterSlot(Register reg); - static MemOperand SafepointRegistersAndDoublesSlot(Register reg); + // Store value in register src in the safepoint stack slot for + // register dst. + void StoreToSafepointRegisterSlot(Register src, Register dst); + void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); + // Load the value of the src register from its safepoint stack slot + // into register dst. + void LoadFromSafepointRegisterSlot(Register dst, Register src); // Load two consecutive registers with two consecutive memory locations. void Ldrd(Register dst1, @@ -297,7 +318,9 @@ class MacroAssembler: public Assembler { void EnterExitFrame(bool save_doubles, int stack_space = 0); // Leave the current exit frame. Expects the return value in r0. - void LeaveExitFrame(bool save_doubles); + // Expect the number of values, pushed prior to the exit frame, to + // remove in a register (or no_reg, if there is nothing to remove). + void LeaveExitFrame(bool save_doubles, Register argument_count); // Get the actual activation frame alignment for target environment. static int ActivationFrameAlignment(); @@ -371,6 +394,13 @@ class MacroAssembler: public Assembler { // Must preserve the result register. void PopTryHandler(); + // Passes thrown value (in r0) to the handler of top of the try handler chain. + void Throw(Register value); + + // Propagates an uncatchable exception to the top of the current JS stack's + // handler chain. + void ThrowUncatchable(UncatchableExceptionType type, Register value); + // --------------------------------------------------------------------------- // Inline caching support @@ -487,6 +517,14 @@ class MacroAssembler: public Assembler { // Copies a fixed number of fields of heap objects from src to dst. void CopyFields(Register dst, Register src, RegList temps, int field_count); + // Copies a number of bytes from src to dst. All registers are clobbered. On + // exit src and dst will point to the place just after where the last byte was + // read or written and length will be zero. + void CopyBytes(Register src, + Register dst, + Register length, + Register scratch); + // --------------------------------------------------------------------------- // Support functions. @@ -539,6 +577,11 @@ class MacroAssembler: public Assembler { bool is_heap_object); + // Compare the object in a register to a value from the root list. + // Uses the ip register as scratch. + void CompareRoot(Register obj, Heap::RootListIndex index); + + // Load and check the instance type of an object for being a string. // Loads the type into the second argument register. // Returns a condition that will be enabled if the object was a string. @@ -603,6 +646,19 @@ class MacroAssembler: public Assembler { DwVfpRegister double_scratch, Label *not_int32); +// Truncates a double using a specific rounding mode. +// Clears the z flag (ne condition) if an overflow occurs. +// If exact_conversion is true, the z flag is also cleared if the conversion +// was inexact, ie. if the double value could not be converted exactly +// to a 32bit integer. + void EmitVFPTruncate(VFPRoundingMode rounding_mode, + SwVfpRegister result, + DwVfpRegister double_input, + Register scratch1, + Register scratch2, + CheckForInexactConversion check + = kDontCheckForInexactConversion); + // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // instruction. On pre-ARM5 hardware this routine gives the wrong answer // for 0 (31 instead of 32). Source and scratch can be the same in which case @@ -674,11 +730,13 @@ class MacroAssembler: public Assembler { void CallCFunction(ExternalReference function, int num_arguments); void CallCFunction(Register function, int num_arguments); + void GetCFunctionDoubleResult(const DoubleRegister dst); + // Calls an API function. Allocates HandleScope, extracts returned value // from handle and propagates exceptions. Restores context. // stack_space - space to be unwound on exit (includes the call js // arguments space and the additional space allocated for the fast call). - MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function, + MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function, int stack_space); // Jump to a runtime routine. @@ -765,11 +823,11 @@ class MacroAssembler: public Assembler { mov(reg, scratch); } - void SmiUntag(Register reg) { - mov(reg, Operand(reg, ASR, kSmiTagSize)); + void SmiUntag(Register reg, SBit s = LeaveCC) { + mov(reg, Operand(reg, ASR, kSmiTagSize), s); } - void SmiUntag(Register dst, Register src) { - mov(dst, Operand(src, ASR, kSmiTagSize)); + void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { + mov(dst, Operand(src, ASR, kSmiTagSize), s); } // Jump the register contains a smi. @@ -791,6 +849,9 @@ class MacroAssembler: public Assembler { void AbortIfSmi(Register object); void AbortIfNotSmi(Register object); + // Abort execution if argument is a string. Used in debug code. + void AbortIfNotString(Register object); + // Abort execution if argument is not the root value with the given index. void AbortIfNotRootValue(Register src, Heap::RootListIndex root_value_index, @@ -871,10 +932,19 @@ class MacroAssembler: public Assembler { Register scratch1, Register scratch2); + // Compute memory operands for safepoint stack slots. + static int SafepointRegisterStackIndex(int reg_code); + MemOperand SafepointRegisterSlot(Register reg); + MemOperand SafepointRegistersAndDoublesSlot(Register reg); + bool generating_stub_; bool allow_stub_calls_; // This handle will be patched with the code object on installation. Handle code_object_; + + // Needs access to SafepointRegisterStackIndex for optimized frame + // traversal. + friend class OptimizedFrame; }; diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc index 94da0424..1f6ed671 100644 --- a/src/arm/regexp-macro-assembler-arm.cc +++ b/src/arm/regexp-macro-assembler-arm.cc @@ -57,48 +57,57 @@ namespace internal { * - r13/sp : points to tip of C stack. * * The remaining registers are free for computations. - * * Each call to a public method should retain this convention. + * * The stack will have the following structure: - * - direct_call (if 1, direct call from JavaScript code, if 0 call - * through the runtime system) - * - stack_area_base (High end of the memory area to use as - * backtracking stack) - * - int* capture_array (int[num_saved_registers_], for output). - * --- sp when called --- - * - link address - * - backup of registers r4..r11 - * - end of input (Address of end of string) - * - start of input (Address of first character in string) - * - start index (character index of start) - * --- frame pointer ---- - * - void* input_string (location of a handle containing the string) - * - Offset of location before start of input (effectively character - * position -1). Used to initialize capture registers to a non-position. - * - At start (if 1, we are starting at the start of the - * string, otherwise 0) - * - register 0 (Only positions must be stored in the first - * - register 1 num_saved_registers_ registers) - * - ... - * - register num_registers-1 - * --- sp --- + * - fp[48] direct_call (if 1, direct call from JavaScript code, + * if 0, call through the runtime system). + * - fp[44] stack_area_base (High end of the memory area to use as + * backtracking stack). + * - fp[40] int* capture_array (int[num_saved_registers_], for output). + * - fp[36] secondary link/return address used by native call. + * --- sp when called --- + * - fp[32] return address (lr). + * - fp[28] old frame pointer (r11). + * - fp[0..24] backup of registers r4..r10. + * --- frame pointer ---- + * - fp[-4] end of input (Address of end of string). + * - fp[-8] start of input (Address of first character in string). + * - fp[-12] start index (character index of start). + * - fp[-16] void* input_string (location of a handle containing the string). + * - fp[-20] Offset of location before start of input (effectively character + * position -1). Used to initialize capture registers to a + * non-position. + * - fp[-24] At start (if 1, we are starting at the start of the + * string, otherwise 0) + * - fp[-28] register 0 (Only positions must be stored in the first + * - register 1 num_saved_registers_ registers) + * - ... + * - register num_registers-1 + * --- sp --- * * The first num_saved_registers_ registers are initialized to point to * "character -1" in the string (i.e., char_size() bytes before the first * character of the string). The remaining registers start out as garbage. * * The data up to the return address must be placed there by the calling - * code, by calling the code entry as cast to a function with the signature: + * code and the remaining arguments are passed in registers, e.g. by calling the + * code entry as cast to a function with the signature: * int (*match)(String* input_string, * int start_index, * Address start, * Address end, + * Address secondary_return_address, // Only used by native call. * int* capture_output_array, - * bool at_start, * byte* stack_area_base, - * bool direct_call) + * bool direct_call = false) * The call is performed by NativeRegExpMacroAssembler::Execute() - * (in regexp-macro-assembler.cc). + * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro + * in arm/simulator-arm.h. + * When calling as a non-direct call (i.e., from C++ code), the return address + * area is overwritten with the LR register by the RegExp code. When doing a + * direct call from generated code, the return address is placed there by + * the calling code, as in a normal exit frame. */ #define __ ACCESS_MASM(masm_) @@ -598,16 +607,17 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // Entry code: __ bind(&entry_label_); - // Push Link register. // Push arguments // Save callee-save registers. // Start new stack frame. + // Store link register in existing stack-cell. // Order here should correspond to order of offset constants in header file. RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit(); RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit(); __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit()); - // Set frame pointer just above the arguments. + // Set frame pointer in space for it if this is not a direct call + // from generated code. __ add(frame_pointer(), sp, Operand(4 * kPointerSize)); __ push(r0); // Make room for "position - 1" constant (value is irrelevant). __ push(r0); // Make room for "at start" constant (value is irrelevant). @@ -764,10 +774,9 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { if (stack_overflow_label_.is_linked()) { SafeCallTarget(&stack_overflow_label_); // Reached if the backtrack-stack limit has been hit. - Label grow_failed; - // Call GrowStack(backtrack_stackpointer()) + // Call GrowStack(backtrack_stackpointer(), &stack_base) static const int num_arguments = 2; __ PrepareCallCFunction(num_arguments, r0); __ mov(r0, backtrack_stackpointer()); diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h index b487ba59..d9d0b356 100644 --- a/src/arm/regexp-macro-assembler-arm.h +++ b/src/arm/regexp-macro-assembler-arm.h @@ -122,8 +122,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { static const int kStoredRegisters = kFramePointer; // Return address (stored from link register, read into pc on return). static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize; + static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; // Stack parameters placed by caller. - static const int kRegisterOutput = kReturnAddress + kPointerSize; + static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize; static const int kStackHighEnd = kRegisterOutput + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize; diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc index 8104747f..f475a18b 100644 --- a/src/arm/simulator-arm.cc +++ b/src/arm/simulator-arm.cc @@ -1005,7 +1005,9 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) { intptr_t* ptr = reinterpret_cast(addr); return *ptr; } - PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr); + PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast(instr)); UNIMPLEMENTED(); return 0; #endif @@ -1023,7 +1025,9 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) { *ptr = value; return; } - PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr); + PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast(instr)); UNIMPLEMENTED(); #endif } @@ -1038,7 +1042,9 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) { uint16_t* ptr = reinterpret_cast(addr); return *ptr; } - PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr); + PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast(instr)); UNIMPLEMENTED(); return 0; #endif @@ -1072,7 +1078,9 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) { *ptr = value; return; } - PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr); + PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast(instr)); UNIMPLEMENTED(); #endif } @@ -1089,7 +1097,9 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) { *ptr = value; return; } - PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr); + PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast(instr)); UNIMPLEMENTED(); #endif } @@ -1531,7 +1541,11 @@ typedef double (*SimulatorRuntimeFPCall)(int32_t arg0, // This signature supports direct call in to API function native callback // (refer to InvocationCallback in v8.h). -typedef v8::Handle (*SimulatorRuntimeApiCall)(int32_t arg0); +typedef v8::Handle (*SimulatorRuntimeDirectApiCall)(int32_t arg0); + +// This signature supports direct call to accessor getter callback. +typedef v8::Handle (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, + int32_t arg1); // Software interrupt instructions are used by the simulator to call into the // C-based V8 runtime. @@ -1572,14 +1586,12 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { CHECK(stack_aligned); double result = target(arg0, arg1, arg2, arg3); SetFpResult(result); - } else if (redirection->type() == ExternalReference::DIRECT_CALL) { - SimulatorRuntimeApiCall target = - reinterpret_cast(external); + } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { + SimulatorRuntimeDirectApiCall target = + reinterpret_cast(external); if (::v8::internal::FLAG_trace_sim || !stack_aligned) { - PrintF( - "Call to host function at %p args %08x", - FUNCTION_ADDR(target), - arg0); + PrintF("Call to host function at %p args %08x", + FUNCTION_ADDR(target), arg0); if (!stack_aligned) { PrintF(" with unaligned stack %08x\n", get_register(sp)); } @@ -1591,6 +1603,23 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { PrintF("Returned %p\n", reinterpret_cast(*result)); } set_register(r0, (int32_t) *result); + } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { + SimulatorRuntimeDirectGetterCall target = + reinterpret_cast(external); + if (::v8::internal::FLAG_trace_sim || !stack_aligned) { + PrintF("Call to host function at %p args %08x %08x", + FUNCTION_ADDR(target), arg0, arg1); + if (!stack_aligned) { + PrintF(" with unaligned stack %08x\n", get_register(sp)); + } + PrintF("\n"); + } + CHECK(stack_aligned); + v8::Handle result = target(arg0, arg1); + if (::v8::internal::FLAG_trace_sim) { + PrintF("Returned %p\n", reinterpret_cast(*result)); + } + set_register(r0, (int32_t) *result); } else { // builtin call. ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL); @@ -2535,6 +2564,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { double dn_value = get_double_from_d_register(vn); double dm_value = get_double_from_d_register(vm); double dd_value = dn_value / dm_value; + div_zero_vfp_flag_ = (dm_value == 0); set_d_register_from_double(vd, dd_value); } else { UNIMPLEMENTED(); // Not used by V8. @@ -2769,14 +2799,17 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer); + double abs_diff = + unsigned_integer ? fabs(val - static_cast(temp)) + : fabs(val - temp); + + inexact_vfp_flag_ = (abs_diff != 0); + if (inv_op_vfp_flag_) { temp = VFPConversionSaturate(val, unsigned_integer); } else { switch (mode) { case RN: { - double abs_diff = - unsigned_integer ? fabs(val - static_cast(temp)) - : fabs(val - temp); int val_sign = (val > 0) ? 1 : -1; if (abs_diff > 0.5) { temp += val_sign; diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h index 5256ae35..bdf1f8a1 100644 --- a/src/arm/simulator-arm.h +++ b/src/arm/simulator-arm.h @@ -48,10 +48,16 @@ namespace internal { #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ (entry(p0, p1, p2, p3, p4)) -// Call the generated regexp code directly. The entry function pointer should -// expect seven int/pointer sized arguments and return an int. +typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, + void*, int*, Address, int); + + +// Call the generated regexp code directly. The code at the entry address +// should act as a function matching the type arm_regexp_matcher. +// The fifth argument is a dummy that reserves the space used for +// the return address added by the ExitFrame in native calls. #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ - (entry(p0, p1, p2, p3, p4, p5, p6)) + (FUNCTION_CAST(entry)(p0, p1, p2, p3, NULL, p4, p5, p6)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ (reinterpret_cast(try_catch_address)) @@ -362,8 +368,7 @@ class Simulator { FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ - Simulator::current()->Call( \ - FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6) + Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ try_catch_address == \ diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc index 9ef61158..60a11f3c 100644 --- a/src/arm/stub-cache-arm.cc +++ b/src/arm/stub-cache-arm.cc @@ -655,12 +655,10 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, // already generated). Do not allow the assembler to perform a // garbage collection but instead return the allocation failure // object. - MaybeObject* result = masm->TryCallApiFunctionAndReturn( - &fun, argc + kFastApiCallArguments + 1); - if (result->IsFailure()) { - return result; - } - return Heap::undefined_value(); + const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; + ExternalReference ref = + ExternalReference(&fun, ExternalReference::DIRECT_API_CALL); + return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } class CallInterceptorCompiler BASE_EMBEDDED { @@ -1245,18 +1243,38 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, name, miss); - // Push the arguments on the JS stack of the caller. - __ push(receiver); // Receiver. - __ mov(scratch3, Operand(Handle(callback))); // callback data - __ ldr(ip, FieldMemOperand(scratch3, AccessorInfo::kDataOffset)); - __ Push(reg, ip, scratch3, name_reg); + // Build AccessorInfo::args_ list on the stack and push property name below + // the exit frame to make GC aware of them and store pointers to them. + __ push(receiver); + __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_ + Handle callback_handle(callback); + if (Heap::InNewSpace(callback_handle->data())) { + __ Move(scratch3, callback_handle); + __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset)); + } else { + __ Move(scratch3, Handle(callback_handle->data())); + } + __ Push(reg, scratch3, name_reg); + __ mov(r0, sp); // r0 = Handle + + Address getter_address = v8::ToCData
(callback->getter()); + ApiFunction fun(getter_address); - // Do tail-call to the runtime system. - ExternalReference load_callback_property = - ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallExternalReference(load_callback_property, 5, 1); + const int kApiStackSpace = 1; + __ EnterExitFrame(false, kApiStackSpace); + // Create AccessorInfo instance on the stack above the exit frame with + // scratch2 (internal::Object **args_) as the data. + __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); + __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& - return Heap::undefined_value(); // Success. + // Emitting a stub call may try to allocate (if the code is not + // already generated). Do not allow the assembler to perform a + // garbage collection but instead return the allocation failure + // object. + const int kStackUnwindSpace = 4; + ExternalReference ref = + ExternalReference(&fun, ExternalReference::DIRECT_GETTER_CALL); + return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } @@ -2332,8 +2350,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case STRING_CHECK: - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { // Check that the object is a two-byte string or a symbol. @@ -2348,8 +2367,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case NUMBER_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2369,8 +2389,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } case BOOLEAN_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2650,10 +2671,13 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, __ Push(r1, r2, r0); // Receiver, name, value. + __ mov(r0, Operand(Smi::FromInt(strict_mode_))); + __ push(r0); // strict mode + // Do tail-call to the runtime system. ExternalReference store_ic_property = ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); - __ TailCallExternalReference(store_ic_property, 3, 1); + __ TailCallExternalReference(store_ic_property, 4, 1); // Handle store cache miss. __ bind(&miss); @@ -3259,6 +3283,47 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized( } +MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray( + JSObject* receiver) { + // ----------- S t a t e ------------- + // -- r0 : value + // -- r1 : key + // -- r2 : receiver + // -- r3 : scratch + // -- r4 : scratch + // -- r5 : scratch + // -- r6 : scratch + // -- lr : return address + // ----------------------------------- + Label miss; + + // Check that the map matches. + __ CheckMap(r2, r6, Handle(receiver->map()), &miss, false); + + GenerateFastPixelArrayStore(masm(), + r2, + r1, + r0, + r3, + r4, + r5, + r6, + true, + true, + &miss, + &miss, + NULL, + &miss); + + __ bind(&miss); + Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, NULL); +} + + MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { // ----------- S t a t e ------------- // -- r0 : argc @@ -3994,7 +4059,12 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub( // Push receiver, key and value for runtime call. __ Push(r2, r1, r0); - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes + __ mov(r0, Operand(Smi::FromInt( + Code::ExtractExtraICStateFromFlags(flags) & kStrictMode))); + __ Push(r1, r0); + + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); return GetCode(flags); } diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc index b4b518cf..544e405d 100644 --- a/src/arm/virtual-frame-arm.cc +++ b/src/arm/virtual-frame-arm.cc @@ -332,9 +332,9 @@ void VirtualFrame::CallLoadIC(Handle name, RelocInfo::Mode mode) { void VirtualFrame::CallStoreIC(Handle name, bool is_contextual, StrictModeFlag strict_mode) { - Handle ic(Builtins::builtin(strict_mode == kStrictMode - ? Builtins::StoreIC_Initialize_Strict - : Builtins::StoreIC_Initialize)); + Handle ic(Builtins::builtin( + (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); PopToR0(); RelocInfo::Mode mode; if (is_contextual) { @@ -359,8 +359,10 @@ void VirtualFrame::CallKeyedLoadIC() { } -void VirtualFrame::CallKeyedStoreIC() { - Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); +void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) { + Handle ic(Builtins::builtin( + (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); PopToR1R0(); SpillAll(); EmitPop(r2); diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h index b6e794a5..76470bdc 100644 --- a/src/arm/virtual-frame-arm.h +++ b/src/arm/virtual-frame-arm.h @@ -303,7 +303,7 @@ class VirtualFrame : public ZoneObject { // Call keyed store IC. Value, key and receiver are on the stack. All three // are consumed. Result is returned in r0. - void CallKeyedStoreIC(); + void CallKeyedStoreIC(StrictModeFlag strict_mode); // Call into an IC stub given the number of arguments it removes // from the stack. Register arguments to the IC stub are implicit, diff --git a/src/array.js b/src/array.js index 1298434d..6ed14760 100644 --- a/src/array.js +++ b/src/array.js @@ -33,7 +33,7 @@ // Global list of arrays visited during toString, toLocaleString and // join invocations. -var visited_arrays = new $Array(); +var visited_arrays = new InternalArray(); // Gets a sorted array of array keys. Useful for operations on sparse @@ -73,7 +73,7 @@ function SparseJoin(array, len, convert) { var last_key = -1; var keys_length = keys.length; - var elements = new $Array(keys_length); + var elements = new InternalArray(keys_length); var elements_length = 0; for (var i = 0; i < keys_length; i++) { @@ -122,7 +122,7 @@ function Join(array, length, separator, convert) { } // Construct an array for the elements. - var elements = new $Array(length); + var elements = new InternalArray(length); // We pull the empty separator check outside the loop for speed! if (separator.length == 0) { @@ -140,7 +140,7 @@ function Join(array, length, separator, convert) { return %StringBuilderConcat(elements, elements_length, ''); } // Non-empty separator case. - // If the first element is a number then use the heuristic that the + // If the first element is a number then use the heuristic that the // remaining elements are also likely to be numbers. if (!IS_NUMBER(array[0])) { for (var i = 0; i < length; i++) { @@ -148,7 +148,7 @@ function Join(array, length, separator, convert) { if (!IS_STRING(e)) e = convert(e); elements[i] = e; } - } else { + } else { for (var i = 0; i < length; i++) { var e = array[i]; if (IS_NUMBER(e)) elements[i] = %_NumberToString(e); @@ -157,19 +157,11 @@ function Join(array, length, separator, convert) { elements[i] = e; } } - } - var result = %_FastAsciiArrayJoin(elements, separator); - if (!IS_UNDEFINED(result)) return result; - - var length2 = (length << 1) - 1; - var j = length2; - var i = length; - elements[--j] = elements[--i]; - while (i > 0) { - elements[--j] = separator; - elements[--j] = elements[--i]; } - return %StringBuilderConcat(elements, length2, ''); + var result = %_FastAsciiArrayJoin(elements, separator); + if (!IS_UNDEFINED(result)) return result; + + return %StringBuilderJoin(elements, length, separator); } finally { // Make sure to remove the last element of the visited array no // matter what happens. @@ -179,7 +171,7 @@ function Join(array, length, separator, convert) { function ConvertToString(x) { - // Assumes x is a non-string. + // Assumes x is a non-string. if (IS_NUMBER(x)) return %_NumberToString(x); if (IS_BOOLEAN(x)) return x ? 'true' : 'false'; return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x)); @@ -249,7 +241,7 @@ function SmartSlice(array, start_i, del_count, len, deleted_elements) { // special array operations to handle sparse arrays in a sensible fashion. function SmartMove(array, start_i, del_count, len, num_additional_args) { // Move data to new array. - var new_array = new $Array(len - del_count + num_additional_args); + var new_array = new InternalArray(len - del_count + num_additional_args); var intervals = %GetArrayKeys(array, len); var length = intervals.length; for (var k = 0; k < length; k++) { @@ -426,9 +418,8 @@ function ArrayPush() { function ArrayConcat(arg1) { // length == 1 - // TODO: can we just use arguments? var arg_count = %_ArgumentsLength(); - var arrays = new $Array(1 + arg_count); + var arrays = new InternalArray(1 + arg_count); arrays[0] = this; for (var i = 0; i < arg_count; i++) { arrays[i + 1] = %_Arguments(i); @@ -934,7 +925,9 @@ function ArrayFilter(f, receiver) { for (var i = 0; i < length; i++) { var current = this[i]; if (!IS_UNDEFINED(current) || i in this) { - if (f.call(receiver, current, i, this)) result[result_length++] = current; + if (f.call(receiver, current, i, this)) { + result[result_length++] = current; + } } } return result; @@ -999,13 +992,15 @@ function ArrayMap(f, receiver) { // Pull out the length so that modifications to the length in the // loop will not affect the looping. var length = TO_UINT32(this.length); - var result = new $Array(length); + var result = new $Array(); + var accumulator = new InternalArray(length); for (var i = 0; i < length; i++) { var current = this[i]; if (!IS_UNDEFINED(current) || i in this) { - result[i] = f.call(receiver, current, i, this); + accumulator[i] = f.call(receiver, current, i, this); } } + %MoveArrayContents(accumulator, result); return result; } @@ -1026,13 +1021,13 @@ function ArrayIndexOf(element, index) { } var min = index; var max = length; - if (UseSparseVariant(this, length, true)) { + if (UseSparseVariant(this, length, IS_ARRAY(this))) { var intervals = %GetArrayKeys(this, length); if (intervals.length == 2 && intervals[0] < 0) { // A single interval. var intervalMin = -(intervals[0] + 1); var intervalMax = intervalMin + intervals[1]; - min = MAX(min, intervalMin); + if (min < intervalMin) min = intervalMin; max = intervalMax; // Capped by length already. // Fall through to loop below. } else { @@ -1082,13 +1077,13 @@ function ArrayLastIndexOf(element, index) { } var min = 0; var max = index; - if (UseSparseVariant(this, length, true)) { + if (UseSparseVariant(this, length, IS_ARRAY(this))) { var intervals = %GetArrayKeys(this, index + 1); if (intervals.length == 2 && intervals[0] < 0) { // A single interval. var intervalMin = -(intervals[0] + 1); var intervalMax = intervalMin + intervals[1]; - min = MAX(min, intervalMin); + if (min < intervalMin) min = intervalMin; max = intervalMax; // Capped by index already. // Fall through to loop below. } else { @@ -1234,6 +1229,20 @@ function SetupArray() { )); %FinishArrayPrototypeSetup($Array.prototype); + + // The internal Array prototype doesn't need to be fancy, since it's never + // exposed to user code, so no hidden prototypes or DONT_ENUM attributes + // are necessary. + // The null __proto__ ensures that we never inherit any user created + // getters or setters from, e.g., Object.prototype. + InternalArray.prototype.__proto__ = null; + // Adding only the functions that are actually used, and a toString. + InternalArray.prototype.join = getFunction("join", ArrayJoin); + InternalArray.prototype.pop = getFunction("pop", ArrayPop); + InternalArray.prototype.push = getFunction("push", ArrayPush); + InternalArray.prototype.toString = function() { + return "Internal Array, length " + this.length; + }; } diff --git a/src/assembler.cc b/src/assembler.cc index ef2094f6..b0b44fd9 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -68,7 +68,7 @@ const double DoubleConstant::min_int = kMinInt; const double DoubleConstant::one_half = 0.5; const double DoubleConstant::minus_zero = -0.0; const double DoubleConstant::negative_infinity = -V8_INFINITY; - +const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; // ----------------------------------------------------------------------------- // Implementation of Label @@ -228,6 +228,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { WriteTaggedPC(pc_delta, kEmbeddedObjectTag); } else if (rmode == RelocInfo::CODE_TARGET) { WriteTaggedPC(pc_delta, kCodeTargetTag); + ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize); } else if (RelocInfo::IsPosition(rmode)) { // Use signed delta-encoding for data. intptr_t data_delta = rinfo->data() - last_data_; @@ -251,6 +252,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { WriteExtraTaggedPC(pc_delta, kPCJumpTag); WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag); last_data_ = rinfo->data(); + ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize); } else { // For all other modes we simply use the mode as the extra tag. // None of these modes need a data component. @@ -814,6 +816,39 @@ static double mod_two_doubles(double x, double y) { } +static double math_sin_double(double x) { + return sin(x); +} + + +static double math_cos_double(double x) { + return cos(x); +} + + +static double math_log_double(double x) { + return log(x); +} + + +ExternalReference ExternalReference::math_sin_double_function() { + return ExternalReference(Redirect(FUNCTION_ADDR(math_sin_double), + FP_RETURN_CALL)); +} + + +ExternalReference ExternalReference::math_cos_double_function() { + return ExternalReference(Redirect(FUNCTION_ADDR(math_cos_double), + FP_RETURN_CALL)); +} + + +ExternalReference ExternalReference::math_log_double_function() { + return ExternalReference(Redirect(FUNCTION_ADDR(math_log_double), + FP_RETURN_CALL)); +} + + // Helper function to compute x^y, where y is known to be an // integer. Uses binary decomposition to limit the number of // multiplications; see the discussion in "Hacker's Delight" by Henry @@ -850,12 +885,14 @@ double power_double_double(double x, double y) { ExternalReference ExternalReference::power_double_double_function() { - return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double))); + return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double), + FP_RETURN_CALL)); } ExternalReference ExternalReference::power_double_int_function() { - return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int))); + return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int), + FP_RETURN_CALL)); } diff --git a/src/assembler.h b/src/assembler.h index b089b090..8ebbfadf 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -178,6 +178,20 @@ class RelocInfo BASE_EMBEDDED { // invalid/uninitialized position value. static const int kNoPosition = -1; + // This string is used to add padding comments to the reloc info in cases + // where we are not sure to have enough space for patching in during + // lazy deoptimization. This is the case if we have indirect calls for which + // we do not normally record relocation info. + static const char* kFillerCommentString; + + // The minimum size of a comment is equal to three bytes for the extra tagged + // pc + the tag for the data, and kPointerSize for the actual pointer to the + // comment. + static const int kMinRelocCommentSize = 3 + kPointerSize; + + // The maximum size for a call instruction including pc-jump. + static const int kMaxCallSize = 6; + enum Mode { // Please note the order is important (see IsCodeTarget, IsGCRelocMode). CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor. @@ -467,21 +481,22 @@ class Debug_Address; class ExternalReference BASE_EMBEDDED { public: // Used in the simulator to support different native api calls. - // - // BUILTIN_CALL - builtin call. - // MaybeObject* f(v8::internal::Arguments). - // - // FP_RETURN_CALL - builtin call that returns floating point. - // double f(double, double). - // - // DIRECT_CALL - direct call to API function native callback - // from generated code. - // Handle f(v8::Arguments&) - // enum Type { + // Builtin call. + // MaybeObject* f(v8::internal::Arguments). BUILTIN_CALL, // default + + // Builtin call that returns floating point. + // double f(double, double). FP_RETURN_CALL, - DIRECT_CALL + + // Direct call to API function callback. + // Handle f(v8::Arguments&) + DIRECT_API_CALL, + + // Direct call to accessor getter callback. + // Handle f(Local property, AccessorInfo& info) + DIRECT_GETTER_CALL }; typedef void* ExternalReferenceRedirector(void* original, Type type); @@ -576,6 +591,10 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference address_of_minus_zero(); static ExternalReference address_of_negative_infinity(); + static ExternalReference math_sin_double_function(); + static ExternalReference math_cos_double_function(); + static ExternalReference math_log_double_function(); + Address address() const {return reinterpret_cast
(address_);} #ifdef ENABLE_DEBUGGER_SUPPORT diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc index 415d2dd8..8cd29b21 100644 --- a/src/bootstrapper.cc +++ b/src/bootstrapper.cc @@ -1240,6 +1240,43 @@ bool Genesis::InstallNatives() { global_context()->set_opaque_reference_function(*opaque_reference_fun); } + { // --- I n t e r n a l A r r a y --- + // An array constructor on the builtins object that works like + // the public Array constructor, except that its prototype + // doesn't inherit from Object.prototype. + // To be used only for internal work by builtins. Instances + // must not be leaked to user code. + // Only works correctly when called as a constructor. The normal + // Array code uses Array.prototype as prototype when called as + // a function. + Handle array_function = + InstallFunction(builtins, + "InternalArray", + JS_ARRAY_TYPE, + JSArray::kSize, + Top::initial_object_prototype(), + Builtins::ArrayCode, + true); + Handle prototype = + Factory::NewJSObject(Top::object_function(), TENURED); + SetPrototype(array_function, prototype); + + array_function->shared()->set_construct_stub( + Builtins::builtin(Builtins::ArrayConstructCode)); + array_function->shared()->DontAdaptArguments(); + + // Make "length" magic on instances. + Handle array_descriptors = + Factory::CopyAppendProxyDescriptor( + Factory::empty_descriptor_array(), + Factory::length_symbol(), + Factory::NewProxy(&Accessors::ArrayLength), + static_cast(DONT_ENUM | DONT_DELETE)); + + array_function->initial_map()->set_instance_descriptors( + *array_descriptors); + } + if (FLAG_disable_native_files) { PrintF("Warning: Running without installed natives!\n"); return true; @@ -1358,6 +1395,7 @@ bool Genesis::InstallNatives() { global_context()->set_regexp_result_map(*initial_map); } + #ifdef DEBUG builtins->Verify(); #endif diff --git a/src/builtins.cc b/src/builtins.cc index 8fdc1b13..01e8deb4 100644 --- a/src/builtins.cc +++ b/src/builtins.cc @@ -1328,12 +1328,12 @@ static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) { static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) { - StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICNonStrict); + StoreIC::GenerateMegamorphic(masm, kNonStrictMode); } static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) { - StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICStrict); + StoreIC::GenerateMegamorphic(masm, kStrictMode); } @@ -1348,17 +1348,22 @@ static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) { static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) { - StoreIC::GenerateGlobalProxy(masm); + StoreIC::GenerateGlobalProxy(masm, kNonStrictMode); } static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) { - StoreIC::GenerateGlobalProxy(masm); + StoreIC::GenerateGlobalProxy(masm, kStrictMode); } static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) { - KeyedStoreIC::GenerateGeneric(masm); + KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode); +} + + +static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) { + KeyedStoreIC::GenerateGeneric(masm, kStrictMode); } @@ -1372,6 +1377,11 @@ static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) { } +static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) { + KeyedStoreIC::GenerateInitialize(masm); +} + + #ifdef ENABLE_DEBUGGER_SUPPORT static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) { Debug::GenerateLoadICDebugBreak(masm); diff --git a/src/builtins.h b/src/builtins.h index ada23a75..5ea46651 100644 --- a/src/builtins.h +++ b/src/builtins.h @@ -62,111 +62,116 @@ enum BuiltinExtraArguments { // Define list of builtins implemented in assembly. -#define BUILTIN_LIST_A(V) \ - V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSConstructCall, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(LazyCompile, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(LazyRecompile, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(NotifyOSR, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - \ - V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - \ - V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - \ - V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - \ - V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \ - StoreIC::kStoreICStrict) \ - V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \ - StoreIC::kStoreICStrict) \ - V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \ - StoreIC::kStoreICStrict) \ - V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \ - StoreIC::kStoreICStrict) \ - V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \ - StoreIC::kStoreICStrict) \ - \ - V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - \ - /* Uses KeyedLoadIC_Initialize; must be after in list. */ \ - V(FunctionCall, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(FunctionApply, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - \ - V(ArrayCode, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - \ - V(StringConstructCode, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - \ - V(OnStackReplacement, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) +#define BUILTIN_LIST_A(V) \ + V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructCall, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(LazyCompile, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(LazyRecompile, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(NotifyOSR, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + \ + V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + \ + V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + \ + V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + \ + V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \ + kStrictMode) \ + V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \ + kStrictMode) \ + V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \ + kStrictMode) \ + V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \ + kStrictMode) \ + V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \ + kStrictMode) \ + \ + V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + \ + V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \ + kStrictMode) \ + V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \ + kStrictMode) \ + \ + /* Uses KeyedLoadIC_Initialize; must be after in list. */ \ + V(FunctionCall, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(FunctionApply, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + \ + V(ArrayCode, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + \ + V(StringConstructCode, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + \ + V(OnStackReplacement, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) #ifdef ENABLE_DEBUGGER_SUPPORT @@ -214,7 +219,7 @@ enum BuiltinExtraArguments { V(SHL, 1) \ V(SAR, 1) \ V(SHR, 1) \ - V(DELETE, 1) \ + V(DELETE, 2) \ V(IN, 1) \ V(INSTANCE_OF, 1) \ V(GET_KEYS, 0) \ diff --git a/src/code-stubs.h b/src/code-stubs.h index 0d0e37ff..96ac7335 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -86,9 +86,6 @@ namespace internal { CODE_STUB_LIST_ALL_PLATFORMS(V) \ CODE_STUB_LIST_ARM(V) -// Types of uncatchable exceptions. -enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION }; - // Mode to overwrite BinaryExpression values. enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT }; enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE }; diff --git a/src/compiler.cc b/src/compiler.cc index ae7b2b9f..667432f2 100755 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -221,11 +221,12 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { // or perform on-stack replacement for function with too many // stack-allocated local variables. // - // The encoding is as a signed value, with parameters using the negative - // indices and locals the non-negative ones. + // The encoding is as a signed value, with parameters and receiver using + // the negative indices and locals the non-negative ones. const int limit = LUnallocated::kMaxFixedIndices / 2; Scope* scope = info->scope(); - if (scope->num_parameters() > limit || scope->num_stack_slots() > limit) { + if ((scope->num_parameters() + 1) > limit || + scope->num_stack_slots() > limit) { AbortAndDisable(info); // True indicates the compilation pipeline is still going, not // necessarily that we optimized the code. @@ -261,10 +262,8 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { Handle shared = info->shared_info(); shared->EnableDeoptimizationSupport(*unoptimized.code()); // The existing unoptimized code was replaced with the new one. - Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, - Handle(shared->DebugName()), - shared->start_position(), - &unoptimized); + Compiler::RecordFunctionCompilation( + Logger::LAZY_COMPILE_TAG, &unoptimized, shared); } } @@ -273,7 +272,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { // optimizable marker in the code object and optimize anyway. This // is safe as long as the unoptimized code has deoptimization // support. - ASSERT(FLAG_always_opt || info->shared_info()->code()->optimizable()); + ASSERT(FLAG_always_opt || code->optimizable()); ASSERT(info->shared_info()->has_deoptimization_support()); if (FLAG_trace_hydrogen) { @@ -282,21 +281,20 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { HTracer::Instance()->TraceCompilation(info->function()); } - TypeFeedbackOracle oracle( - Handle(info->shared_info()->code()), - Handle(info->closure()->context()->global_context())); - HGraphBuilder builder(&oracle); + Handle global_context(info->closure()->context()->global_context()); + TypeFeedbackOracle oracle(code, global_context); + HGraphBuilder builder(info, &oracle); HPhase phase(HPhase::kTotal); - HGraph* graph = builder.CreateGraph(info); + HGraph* graph = builder.CreateGraph(); if (Top::has_pending_exception()) { info->SetCode(Handle::null()); return false; } if (graph != NULL && FLAG_build_lithium) { - Handle code = graph->Compile(); - if (!code.is_null()) { - info->SetCode(code); + Handle optimized_code = graph->Compile(info); + if (!optimized_code.is_null()) { + info->SetCode(optimized_code); FinishOptimization(info->closure(), start); return true; } @@ -415,13 +413,25 @@ static Handle MakeFunctionInfo(CompilationInfo* info) { return Handle::null(); } + // Allocate function. ASSERT(!info->code().is_null()); + Handle result = + Factory::NewSharedFunctionInfo( + lit->name(), + lit->materialized_literal_count(), + info->code(), + SerializedScopeInfo::Create(info->scope())); + + ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); + Compiler::SetFunctionInfo(result, lit, true, script); + if (script->name()->IsString()) { PROFILE(CodeCreateEvent( info->is_eval() ? Logger::EVAL_TAG : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), + *result, String::cast(script->name()))); GDBJIT(AddCode(Handle(String::cast(script->name())), script, @@ -432,21 +442,11 @@ static Handle MakeFunctionInfo(CompilationInfo* info) { ? Logger::EVAL_TAG : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), - "")); + *result, + Heap::empty_string())); GDBJIT(AddCode(Handle(), script, info->code())); } - // Allocate function. - Handle result = - Factory::NewSharedFunctionInfo( - lit->name(), - lit->materialized_literal_count(), - info->code(), - SerializedScopeInfo::Create(info->scope())); - - ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); - Compiler::SetFunctionInfo(result, lit, true, script); - // Hint to the runtime system used when allocating space for initial // property space by setting the expected number of properties for // the instances of the function. @@ -613,10 +613,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) { ASSERT(!info->code().is_null()); Handle code = info->code(); Handle function = info->closure(); - RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, - Handle(shared->DebugName()), - shared->start_position(), - info); + RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared); if (info->IsOptimizing()) { function->ReplaceCode(*code); @@ -724,10 +721,6 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, ASSERT(!info.code().is_null()); // Function compilation complete. - RecordFunctionCompilation(Logger::FUNCTION_TAG, - literal->debug_name(), - literal->start_position(), - &info); scope_info = SerializedScopeInfo::Create(info.scope()); } @@ -738,6 +731,7 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, info.code(), scope_info); SetFunctionInfo(result, literal, false, script); + RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result); result->set_allows_lazy_compilation(allow_lazy); // Set the expected number of properties for instances and return @@ -776,28 +770,31 @@ void Compiler::SetFunctionInfo(Handle function_info, void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag, - Handle name, - int start_position, - CompilationInfo* info) { + CompilationInfo* info, + Handle shared) { + // SharedFunctionInfo is passed separately, because if CompilationInfo + // was created using Script object, it will not have it. + // Log the code generation. If source information is available include // script name and line number. Check explicitly whether logging is // enabled as finding the line number is not free. - if (Logger::is_logging() || - CpuProfiler::is_profiling()) { + if (Logger::is_logging() || CpuProfiler::is_profiling()) { Handle