diff options
author | Ben Murdoch <benm@google.com> | 2011-05-25 10:26:03 +0100 |
---|---|---|
committer | Ben Murdoch <benm@google.com> | 2011-05-25 16:24:42 +0100 |
commit | e0cee9b3ed82e2391fd85d118aeaa4ea361c687d (patch) | |
tree | 31c7963cf0dfc88be29e765884e1f235076c03a4 | |
parent | 1e0659c275bb392c045087af4f6b0d7565cb3d77 (diff) | |
download | android_external_v8-e0cee9b3ed82e2391fd85d118aeaa4ea361c687d.tar.gz android_external_v8-e0cee9b3ed82e2391fd85d118aeaa4ea361c687d.tar.bz2 android_external_v8-e0cee9b3ed82e2391fd85d118aeaa4ea361c687d.zip |
Update V8 to r7079 as required by WebKit r80534.
Change-Id: I487c152e485d5a40b68997d7c0d2f1fba5da0834
237 files changed, 21495 insertions, 8135 deletions
@@ -20,6 +20,11 @@ d8_g shell shell_g /obj/ +/test/es5conform/data/ +/test/mozilla/data/ +/test/sputnik/sputniktests/ +/tools/oom_dump/oom_dump +/tools/oom_dump/oom_dump.o /tools/visual_studio/Debug /tools/visual_studio/Release /xcodebuild/ diff --git a/Android.v8common.mk b/Android.v8common.mk index a976a486..0a57ce6e 100644 --- a/Android.v8common.mk +++ b/Android.v8common.mk @@ -114,6 +114,7 @@ ifeq ($(TARGET_ARCH),arm) src/arm/jump-target-arm.cc \ src/arm/lithium-arm.cc \ src/arm/lithium-codegen-arm.cc \ + src/arm/lithium-gap-resolver-arm.cc \ src/arm/macro-assembler-arm.cc \ src/arm/regexp-macro-assembler-arm.cc \ src/arm/register-allocator-arm.cc \ @@ -1,3 +1,65 @@ +2011-03-07: Version 3.2.0 + + Fixed a number of crash bugs. + + Turned on Crankshaft by default on x64 and ARM. + + Improved Crankshaft for x64 and ARM. + + Implemented more of EcmaScript 5 strict mode. + + +2011-03-02: Version 3.1.8 + + Fixed a number of crash bugs. + + Improved Crankshaft for x64 and ARM. + + Implemented more of EcmaScript 5 strict mode. + + Fixed issue with unaligned reads and writes on ARM. + + Improved heap profiler support. + + +2011-02-28: Version 3.1.7 + + Fixed a number of crash bugs. + + Improved Crankshaft for x64 and ARM. + + Fixed implementation of indexOf/lastIndexOf for sparse + arrays (http://crbug.com/73940). + + Fixed bug in map space compaction (http://crbug.com/59688). + + Added support for direct getter accessors calls on ARM. + + +2011-02-24: Version 3.1.6 + + Fixed a number of crash bugs. + + Added support for Cygwin (issue 64). + + Improved Crankshaft for x64 and ARM. + + Added Crankshaft support for stores to pixel arrays. + + Fixed issue in CPU profiler with Crankshaft. + + +2011-02-16: Version 3.1.5 + + Change RegExp parsing to disallow /(*)/. + + Added GDB JIT support for ARM. + + Fixed several crash bugs. + + Performance improvements on the IA32 platform. + + 2011-02-14: Version 3.1.4 Fixed incorrect compare of prototypes of the global object (issue @@ -663,8 +663,8 @@ def GuessToolchain(os): def GuessVisibility(os, toolchain): - if os == 'win32' and toolchain == 'gcc': - # MinGW can't do it. + if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc': + # MinGW / Cygwin can't do it. return 'default' elif os == 'solaris': return 'default' @@ -685,7 +685,7 @@ SIMPLE_OPTIONS = { 'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS }, 'os': { - 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'], + 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'], 'default': OS_GUESS, 'help': 'the os to build for (%s)' % OS_GUESS }, @@ -890,7 +890,7 @@ def VerifyOptions(env): return False if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on': Abort("Profiling on windows only supported for static library.") - if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64')): + if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')): Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.") if env['os'] == 'win32' and env['soname'] == 'on': Abort("Shared Object soname not applicable for Windows.") diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION index 07693bd6..120fd679 100644 --- a/V8_MERGE_REVISION +++ b/V8_MERGE_REVISION @@ -1,6 +1,5 @@ We use a V8 revision that has been used for a Chromium release. -http://src.chromium.org/svn/releases/11.0.672.0/DEPS -http://v8.googlecode.com/svn/trunk@6768 plus a partial cherry-pick to fix Android build ... -- r7077 - CreateThread() in src/platform-linux.cc +http://src.chromium.org/svn/releases/11.0.696.0/DEPS +http://v8.googlecode.com/svn/trunk@7079 diff --git a/copy-new-sources b/copy-new-sources deleted file mode 100755 index 84fc6843..00000000 --- a/copy-new-sources +++ /dev/null @@ -1 +0,0 @@ -cp -r AUTHORS ChangeLog LICENSE SConstruct benchmarks include samples src test tools ../android/master/external/v8/ diff --git a/samples/shell.cc b/samples/shell.cc index 6b67df6c..64f78f02 100644 --- a/samples/shell.cc +++ b/samples/shell.cc @@ -27,6 +27,7 @@ #include <v8.h> #include <v8-testing.h> +#include <assert.h> #include <fcntl.h> #include <string.h> #include <stdio.h> @@ -290,11 +291,13 @@ bool ExecuteString(v8::Handle<v8::String> source, } else { v8::Handle<v8::Value> result = script->Run(); if (result.IsEmpty()) { + assert(try_catch.HasCaught()); // Print errors that happened during execution. if (report_exceptions) ReportException(&try_catch); return false; } else { + assert(!try_catch.HasCaught()); if (print_result && !result->IsUndefined()) { // If all went well and the result wasn't undefined then print // the returned value. diff --git a/src/SConscript b/src/SConscript index c3561be3..34ca91ca 100755 --- a/src/SConscript +++ b/src/SConscript @@ -153,6 +153,7 @@ SOURCES = { arm/jump-target-arm.cc arm/lithium-arm.cc arm/lithium-codegen-arm.cc + arm/lithium-gap-resolver-arm.cc arm/macro-assembler-arm.cc arm/regexp-macro-assembler-arm.cc arm/register-allocator-arm.cc @@ -233,6 +234,7 @@ SOURCES = { 'os:android': ['platform-linux.cc', 'platform-posix.cc'], 'os:macos': ['platform-macos.cc', 'platform-posix.cc'], 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'], + 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'], 'os:nullos': ['platform-nullos.cc'], 'os:win32': ['platform-win32.cc'], 'mode:release': [], @@ -264,6 +266,9 @@ D8_FILES = { 'os:solaris': [ 'd8-posix.cc' ], + 'os:cygwin': [ + 'd8-posix.cc' + ], 'os:win32': [ 'd8-windows.cc' ], diff --git a/src/accessors.cc b/src/accessors.cc index 2b205d5d..18264254 100644 --- a/src/accessors.cc +++ b/src/accessors.cc @@ -446,6 +446,14 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { bool found_it = false; JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it); if (!found_it) return Heap::undefined_value(); + while (!function->should_have_prototype()) { + found_it = false; + function = FindInPrototypeChain<JSFunction>(object->GetPrototype(), + &found_it); + // There has to be one because we hit the getter. + ASSERT(found_it); + } + if (!function->has_prototype()) { Object* prototype; { MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function); @@ -466,6 +474,13 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object, bool found_it = false; JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it); if (!found_it) return Heap::undefined_value(); + if (!function->should_have_prototype()) { + // Since we hit this accessor, object will have no prototype property. + return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(), + value, + NONE); + } + if (function->has_initial_map()) { // If the function has allocated the initial map // replace it with a copy containing the new prototype. @@ -2286,7 +2286,8 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value, self, key_obj, value_obj, - static_cast<PropertyAttributes>(attribs)); + static_cast<PropertyAttributes>(attribs), + i::kNonStrictMode); has_pending_exception = obj.is_null(); EXCEPTION_BAILOUT_CHECK(false); return true; @@ -2303,7 +2304,8 @@ bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) { i::Handle<i::Object> obj = i::SetElement( self, index, - value_obj); + value_obj, + i::kNonStrictMode); has_pending_exception = obj.is_null(); EXCEPTION_BAILOUT_CHECK(false); return true; @@ -2711,7 +2713,8 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key, hidden_props, key_obj, value_obj, - static_cast<PropertyAttributes>(None)); + static_cast<PropertyAttributes>(None), + i::kNonStrictMode); has_pending_exception = obj.is_null(); EXCEPTION_BAILOUT_CHECK(false); return true; diff --git a/src/arguments.h b/src/arguments.h index d51c9e4c..5cf8deaa 100644 --- a/src/arguments.h +++ b/src/arguments.h @@ -78,7 +78,7 @@ class Arguments BASE_EMBEDDED { class CustomArguments : public Relocatable { public: inline CustomArguments(Object* data, - JSObject* self, + Object* self, JSObject* holder) { values_[2] = self; values_[1] = holder; diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc index fb9bb488..c91d4ba2 100644 --- a/src/arm/assembler-arm.cc +++ b/src/arm/assembler-arm.cc @@ -1848,11 +1848,31 @@ void Assembler::vldr(const DwVfpRegister dst, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); + ASSERT(offset >= 0); - emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | + 0xB*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8); + } +} + + +void Assembler::vldr(const DwVfpRegister dst, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(dst, operand.rn(), operand.offset(), cond); } @@ -1870,13 +1890,33 @@ void Assembler::vldr(const SwVfpRegister dst, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); - ASSERT(offset >= 0); int sd, d; dst.split_code(&sd, &d); + ASSERT(offset >= 0); + + if ((offset % 4) == 0 && (offset / 4) < 256) { emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | 0xA*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); + } +} + + +void Assembler::vldr(const SwVfpRegister dst, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(dst, operand.rn(), operand.offset(), cond); } @@ -1894,11 +1934,30 @@ void Assembler::vstr(const DwVfpRegister src, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); ASSERT(offset >= 0); - emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | + 0xB*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8); + } +} + + +void Assembler::vstr(const DwVfpRegister src, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vstr(src, operand.rn(), operand.offset(), cond); } @@ -1916,13 +1975,32 @@ void Assembler::vstr(const SwVfpRegister src, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); - ASSERT(offset >= 0); int sd, d; src.split_code(&sd, &d); - emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | - 0xA*B8 | ((offset / 4) & 255)); + ASSERT(offset >= 0); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | + 0xA*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); + } +} + + +void Assembler::vstr(const SwVfpRegister src, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(src, operand.rn(), operand.offset(), cond); } diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h index 3941c84b..f5eb5075 100644 --- a/src/arm/assembler-arm.h +++ b/src/arm/assembler-arm.h @@ -284,6 +284,7 @@ const SwVfpRegister s29 = { 29 }; const SwVfpRegister s30 = { 30 }; const SwVfpRegister s31 = { 31 }; +const DwVfpRegister no_dreg = { -1 }; const DwVfpRegister d0 = { 0 }; const DwVfpRegister d1 = { 1 }; const DwVfpRegister d2 = { 2 }; @@ -387,7 +388,7 @@ class Operand BASE_EMBEDDED { // Return true if this is a register operand. INLINE(bool is_reg() const); - // Return true of this operand fits in one instruction so that no + // Return true if this operand fits in one instruction so that no // 2-instruction solution with a load into the ip register is necessary. bool is_single_instruction() const; bool must_use_constant_pool() const; @@ -439,7 +440,7 @@ class MemOperand BASE_EMBEDDED { offset_ = offset; } - uint32_t offset() { + uint32_t offset() const { ASSERT(rm_.is(no_reg)); return offset_; } @@ -447,6 +448,10 @@ class MemOperand BASE_EMBEDDED { Register rn() const { return rn_; } Register rm() const { return rm_; } + bool OffsetIsUint12Encodable() const { + return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_); + } + private: Register rn_; // base Register rm_; // register offset @@ -902,22 +907,34 @@ class Assembler : public Malloced { void vldr(const DwVfpRegister dst, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vldr(const DwVfpRegister dst, + const MemOperand& src, const Condition cond = al); void vldr(const SwVfpRegister dst, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vldr(const SwVfpRegister dst, + const MemOperand& src, const Condition cond = al); void vstr(const DwVfpRegister src, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vstr(const DwVfpRegister src, + const MemOperand& dst, const Condition cond = al); void vstr(const SwVfpRegister src, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vstr(const SwVfpRegister src, + const MemOperand& dst, const Condition cond = al); void vmov(const DwVfpRegister dst, diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc index f14d77af..961d3ce5 100644 --- a/src/arm/builtins-arm.cc +++ b/src/arm/builtins-arm.cc @@ -428,7 +428,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { GenerateLoadArrayFunction(masm, r1); if (FLAG_debug_code) { - // Initial map for the builtin Array function shoud be a map. + // Initial map for the builtin Array functions should be maps. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ tst(r2, Operand(kSmiTagMask)); __ Assert(ne, "Unexpected initial map for Array function"); @@ -458,11 +458,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { Label generic_constructor; if (FLAG_debug_code) { - // The array construct code is only set for the builtin Array function which - // always have a map. - GenerateLoadArrayFunction(masm, r2); - __ cmp(r1, r2); - __ Assert(eq, "Unexpected Array function"); + // The array construct code is only set for the builtin and internal + // Array functions which always have a map. // Initial map for the builtin Array function should be a map. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ tst(r2, Operand(kSmiTagMask)); @@ -1231,6 +1228,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // Change context eagerly in case we need the global receiver. __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Do not transform the receiver for strict mode functions. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, &shift_arguments); + + // Compute the receiver in non-strict mode. __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2)); __ ldr(r2, MemOperand(r2, -kPointerSize)); // r0: actual number of arguments @@ -1394,10 +1399,20 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Change context eagerly to get the right global object if necessary. __ ldr(r0, MemOperand(fp, kFunctionOffset)); __ ldr(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); + // Load the shared function info while the function is still in r0. + __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); // Compute the receiver. Label call_to_object, use_global_receiver, push_receiver; __ ldr(r0, MemOperand(fp, kRecvOffset)); + + // Do not transform the receiver for strict mode functions. + __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, &push_receiver); + + // Compute the receiver in non-strict mode. __ tst(r0, Operand(kSmiTagMask)); __ b(eq, &call_to_object); __ LoadRoot(r1, Heap::kNullValueRootIndex); diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index 1e7d5589..1c6d709f 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -398,8 +398,11 @@ class FloatingPointHelper : public AllStatic { Label* not_number); // Loads the number from object into dst as a 32-bit integer if possible. If - // the object is not a 32-bit integer control continues at the label - // not_int32. If VFP is supported double_scratch is used but not scratch2. + // the object cannot be converted to a 32-bit integer control continues at + // the label not_int32. If VFP is supported double_scratch is used + // but not scratch2. + // Floating point value in the 32-bit integer range will be rounded + // to an integer. static void LoadNumberAsInteger(MacroAssembler* masm, Register object, Register dst, @@ -409,6 +412,76 @@ class FloatingPointHelper : public AllStatic { DwVfpRegister double_scratch, Label* not_int32); + // Load the number from object into double_dst in the double format. + // Control will jump to not_int32 if the value cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be loaded. + static void LoadNumberAsInt32Double(MacroAssembler* masm, + Register object, + Destination destination, + DwVfpRegister double_dst, + Register dst1, + Register dst2, + Register heap_number_map, + Register scratch1, + Register scratch2, + SwVfpRegister single_scratch, + Label* not_int32); + + // Loads the number from object into dst as a 32-bit integer. + // Control will jump to not_int32 if the object cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be converted. + // scratch3 is not used when VFP3 is supported. + static void LoadNumberAsInt32(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch, + Label* not_int32); + + // Generate non VFP3 code to check if a double can be exactly represented by a + // 32-bit integer. This does not check for 0 or -0, which need + // to be checked for separately. + // Control jumps to not_int32 if the value is not a 32-bit integer, and falls + // through otherwise. + // src1 and src2 will be cloberred. + // + // Expected input: + // - src1: higher (exponent) part of the double value. + // - src2: lower (mantissa) part of the double value. + // Output status: + // - dst: 32 higher bits of the mantissa. (mantissa[51:20]) + // - src2: contains 1. + // - other registers are clobbered. + static void DoubleIs32BitInteger(MacroAssembler* masm, + Register src1, + Register src2, + Register dst, + Register scratch, + Label* not_int32); + + // Generates code to call a C function to do a double operation using core + // registers. (Used when VFP3 is not supported.) + // This code never falls through, but returns with a heap number containing + // the result in r0. + // Register heapnumber_result must be a heap number in which the + // result of the operation will be stored. + // Requires the following layout on entry: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). + static void CallCCodeForDoubleOperation(MacroAssembler* masm, + Token::Value op, + Register heap_number_result, + Register scratch); + private: static void LoadNumber(MacroAssembler* masm, FloatingPointHelper::Destination destination, @@ -560,6 +633,318 @@ void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm, } +void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, + Register object, + Destination destination, + DwVfpRegister double_dst, + Register dst1, + Register dst2, + Register heap_number_map, + Register scratch1, + Register scratch2, + SwVfpRegister single_scratch, + Label* not_int32) { + ASSERT(!scratch1.is(object) && !scratch2.is(object)); + ASSERT(!scratch1.is(scratch2)); + ASSERT(!heap_number_map.is(object) && + !heap_number_map.is(scratch1) && + !heap_number_map.is(scratch2)); + + Label done, obj_is_not_smi; + + __ JumpIfNotSmi(object, &obj_is_not_smi); + __ SmiUntag(scratch1, object); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ vmov(single_scratch, scratch1); + __ vcvt_f64_s32(double_dst, single_scratch); + if (destination == kCoreRegisters) { + __ vmov(dst1, dst2, double_dst); + } + } else { + Label fewer_than_20_useful_bits; + // Expected output: + // | dst1 | dst2 | + // | s | exp | mantissa | + + // Check for zero. + __ cmp(scratch1, Operand(0)); + __ mov(dst1, scratch1); + __ mov(dst2, scratch1); + __ b(eq, &done); + + // Preload the sign of the value. + __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC); + // Get the absolute value of the object (as an unsigned integer). + __ rsb(scratch1, scratch1, Operand(0), SetCC, mi); + + // Get mantisssa[51:20]. + + // Get the position of the first set bit. + __ CountLeadingZeros(dst2, scratch1, scratch2); + __ rsb(dst2, dst2, Operand(31)); + + // Set the exponent. + __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias)); + __ Bfi(dst1, scratch2, scratch2, + HeapNumber::kExponentShift, HeapNumber::kExponentBits); + + // Clear the first non null bit. + __ mov(scratch2, Operand(1)); + __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2)); + + __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); + // Get the number of bits to set in the lower part of the mantissa. + __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); + __ b(mi, &fewer_than_20_useful_bits); + // Set the higher 20 bits of the mantissa. + __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2)); + __ rsb(scratch2, scratch2, Operand(32)); + __ mov(dst2, Operand(scratch1, LSL, scratch2)); + __ b(&done); + + __ bind(&fewer_than_20_useful_bits); + __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ mov(scratch2, Operand(scratch1, LSL, scratch2)); + __ orr(dst1, dst1, scratch2); + // Set dst2 to 0. + __ mov(dst2, Operand(0)); + } + + __ b(&done); + + __ bind(&obj_is_not_smi); + if (FLAG_debug_code) { + __ AbortIfNotRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + } + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); + + // Load the number. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Load the double value. + __ sub(scratch1, object, Operand(kHeapObjectTag)); + __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); + + __ EmitVFPTruncate(kRoundToZero, + single_scratch, + double_dst, + scratch1, + scratch2, + kCheckForInexactConversion); + + // Jump to not_int32 if the operation did not succeed. + __ b(ne, not_int32); + + if (destination == kCoreRegisters) { + __ vmov(dst1, dst2, double_dst); + } + + } else { + ASSERT(!scratch1.is(object) && !scratch2.is(object)); + // Load the double value in the destination registers.. + __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); + + // Check for 0 and -0. + __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask)); + __ orr(scratch1, scratch1, Operand(dst2)); + __ cmp(scratch1, Operand(0)); + __ b(eq, &done); + + // Check that the value can be exactly represented by a 32-bit integer. + // Jump to not_int32 if that's not the case. + DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); + + // dst1 and dst2 were trashed. Reload the double value. + __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); + } + + __ bind(&done); +} + + +void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch, + Label* not_int32) { + ASSERT(!dst.is(object)); + ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); + ASSERT(!scratch1.is(scratch2) && + !scratch1.is(scratch3) && + !scratch2.is(scratch3)); + + Label done; + + // Untag the object into the destination register. + __ SmiUntag(dst, object); + // Just return if the object is a smi. + __ JumpIfSmi(object, &done); + + if (FLAG_debug_code) { + __ AbortIfNotRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + } + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); + + // Object is a heap number. + // Convert the floating point value to a 32-bit integer. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + SwVfpRegister single_scratch = double_scratch.low(); + // Load the double value. + __ sub(scratch1, object, Operand(kHeapObjectTag)); + __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset); + + __ EmitVFPTruncate(kRoundToZero, + single_scratch, + double_scratch, + scratch1, + scratch2, + kCheckForInexactConversion); + + // Jump to not_int32 if the operation did not succeed. + __ b(ne, not_int32); + // Get the result in the destination register. + __ vmov(dst, single_scratch); + + } else { + // Load the double value in the destination registers. + __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); + __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); + + // Check for 0 and -0. + __ bic(dst, scratch1, Operand(HeapNumber::kSignMask)); + __ orr(dst, scratch2, Operand(dst)); + __ cmp(dst, Operand(0)); + __ b(eq, &done); + + DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); + + // Registers state after DoubleIs32BitInteger. + // dst: mantissa[51:20]. + // scratch2: 1 + + // Shift back the higher bits of the mantissa. + __ mov(dst, Operand(dst, LSR, scratch3)); + // Set the implicit first bit. + __ rsb(scratch3, scratch3, Operand(32)); + __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); + // Set the sign. + __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + __ rsb(dst, dst, Operand(0), LeaveCC, mi); + } + + __ bind(&done); +} + + +void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, + Register src1, + Register src2, + Register dst, + Register scratch, + Label* not_int32) { + // Get exponent alone in scratch. + __ Ubfx(scratch, + src1, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + + // Substract the bias from the exponent. + __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC); + + // src1: higher (exponent) part of the double value. + // src2: lower (mantissa) part of the double value. + // scratch: unbiased exponent. + + // Fast cases. Check for obvious non 32-bit integer values. + // Negative exponent cannot yield 32-bit integers. + __ b(mi, not_int32); + // Exponent greater than 31 cannot yield 32-bit integers. + // Also, a positive value with an exponent equal to 31 is outside of the + // signed 32-bit integer range. + // Another way to put it is that if (exponent - signbit) > 30 then the + // number cannot be represented as an int32. + Register tmp = dst; + __ sub(tmp, scratch, Operand(src1, LSR, 31)); + __ cmp(tmp, Operand(30)); + __ b(gt, not_int32); + // - Bits [21:0] in the mantissa are not null. + __ tst(src2, Operand(0x3fffff)); + __ b(ne, not_int32); + + // Otherwise the exponent needs to be big enough to shift left all the + // non zero bits left. So we need the (30 - exponent) last bits of the + // 31 higher bits of the mantissa to be null. + // Because bits [21:0] are null, we can check instead that the + // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null. + + // Get the 32 higher bits of the mantissa in dst. + __ Ubfx(dst, + src2, + HeapNumber::kMantissaBitsInTopWord, + 32 - HeapNumber::kMantissaBitsInTopWord); + __ orr(dst, + dst, + Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + + // Create the mask and test the lower bits (of the higher bits). + __ rsb(scratch, scratch, Operand(32)); + __ mov(src2, Operand(1)); + __ mov(src1, Operand(src2, LSL, scratch)); + __ sub(src1, src1, Operand(1)); + __ tst(dst, src1); + __ b(ne, not_int32); +} + + +void FloatingPointHelper::CallCCodeForDoubleOperation( + MacroAssembler* masm, + Token::Value op, + Register heap_number_result, + Register scratch) { + // Using core registers: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). + + // Assert that heap_number_result is callee-saved. + // We currently always use r5 to pass it. + ASSERT(heap_number_result.is(r5)); + + // Push the current return address before the C call. Return will be + // through pop(pc) below. + __ push(lr); + __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. + // Call C routine that may not cause GC or other trouble. + __ CallCFunction(ExternalReference::double_fp_operation(op), 4); + // Store answer in the overwritable heap number. +#if !defined(USE_ARM_EABI) + // Double returned in fp coprocessor register 0 and 1, encoded as + // register cr8. Offsets must be divisible by 4 for coprocessor so we + // need to substract the tag from heap_number_result. + __ sub(scratch, heap_number_result, Operand(kHeapObjectTag)); + __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset)); +#else + // Double returned in registers 0 and 1. + __ Strd(r0, r1, FieldMemOperand(heap_number_result, + HeapNumber::kValueOffset)); +#endif + // Place heap_number_result in r0 and return to the pushed return address. + __ mov(r0, Operand(heap_number_result)); + __ pop(pc); +} + // See comment for class. void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { @@ -1296,6 +1681,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // This stub does not handle the inlined cases (Smis, Booleans, undefined). // The stub returns zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { + // This stub uses VFP3 instructions. + ASSERT(CpuFeatures::IsEnabled(VFP3)); + Label false_result; Label not_heap_number; Register scratch = r9.is(tos_) ? r7 : r9; @@ -2661,8 +3049,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Allocate new heap number for result. Register result = r5; - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); // Load the operands. if (smi_operands) { @@ -2704,33 +3092,11 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, __ add(r0, r0, Operand(kHeapObjectTag)); __ Ret(); } else { - // Using core registers: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - - // Push the current return address before the C call. Return will be - // through pop(pc) below. - __ push(lr); - __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. - // Call C routine that may not cause GC or other trouble. r5 is callee - // save. - __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); - // Store answer in the overwritable heap number. -#if !defined(USE_ARM_EABI) - // Double returned in fp coprocessor register 0 and 1, encoded as - // register cr8. Offsets must be divisible by 4 for coprocessor so we - // need to substract the tag from r5. - __ sub(scratch1, result, Operand(kHeapObjectTag)); - __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); -#else - // Double returned in registers 0 and 1. - __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); -#endif - // Plase result in r0 and return to the pushed return address. - __ mov(r0, Operand(result)); - __ pop(pc); + // Call the C function to handle the double operation. + FloatingPointHelper::CallCCodeForDoubleOperation(masm, + op_, + result, + scratch1); } break; } @@ -2776,7 +3142,6 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, break; case Token::SAR: // Use only the 5 least significant bits of the shift count. - __ and_(r2, r2, Operand(0x1f)); __ GetLeastBitsFromInt32(r2, r2, 5); __ mov(r2, Operand(r3, ASR, r2)); break; @@ -2811,8 +3176,14 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Allocate new heap number for result. __ bind(&result_not_a_smi); - __ AllocateHeapNumber( - r5, scratch1, scratch2, heap_number_map, gc_required); + Register result = r5; + if (smi_operands) { + __ AllocateHeapNumber( + result, scratch1, scratch2, heap_number_map, gc_required); + } else { + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); + } // r2: Answer as signed int32. // r5: Heap number to write answer into. @@ -2915,7 +3286,288 @@ void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { ASSERT(operands_type_ == TRBinaryOpIC::INT32); - GenerateTypeTransition(masm); + Register left = r1; + Register right = r0; + Register scratch1 = r7; + Register scratch2 = r9; + DwVfpRegister double_scratch = d0; + SwVfpRegister single_scratch = s3; + + Register heap_number_result = no_reg; + Register heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + Label call_runtime; + // Labels for type transition, used for wrong input or output types. + // Both label are currently actually bound to the same position. We use two + // different label to differentiate the cause leading to type transition. + Label transition; + + // Smi-smi fast case. + Label skip; + __ orr(scratch1, left, right); + __ JumpIfNotSmi(scratch1, &skip); + GenerateSmiSmiOperation(masm); + // Fall through if the result is not a smi. + __ bind(&skip); + + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: { + // Load both operands and check that they are 32-bit integer. + // Jump to type transition if they are not. The registers r0 and r1 (right + // and left) are preserved for the runtime call. + FloatingPointHelper::Destination destination = + CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? + FloatingPointHelper::kVFPRegisters : + FloatingPointHelper::kCoreRegisters; + + FloatingPointHelper::LoadNumberAsInt32Double(masm, + right, + destination, + d7, + r2, + r3, + heap_number_map, + scratch1, + scratch2, + s0, + &transition); + FloatingPointHelper::LoadNumberAsInt32Double(masm, + left, + destination, + d6, + r4, + r5, + heap_number_map, + scratch1, + scratch2, + s0, + &transition); + + if (destination == FloatingPointHelper::kVFPRegisters) { + CpuFeatures::Scope scope(VFP3); + Label return_heap_number; + switch (op_) { + case Token::ADD: + __ vadd(d5, d6, d7); + break; + case Token::SUB: + __ vsub(d5, d6, d7); + break; + case Token::MUL: + __ vmul(d5, d6, d7); + break; + case Token::DIV: + __ vdiv(d5, d6, d7); + break; + default: + UNREACHABLE(); + } + + if (op_ != Token::DIV) { + // These operations produce an integer result. + // Try to return a smi if we can. + // Otherwise return a heap number if allowed, or jump to type + // transition. + + __ EmitVFPTruncate(kRoundToZero, + single_scratch, + d5, + scratch1, + scratch2); + + if (result_type_ <= TRBinaryOpIC::INT32) { + // If the ne condition is set, result does + // not fit in a 32-bit integer. + __ b(ne, &transition); + } + + // Check if the result fits in a smi. + __ vmov(scratch1, single_scratch); + __ add(scratch2, scratch1, Operand(0x40000000), SetCC); + // If not try to return a heap number. + __ b(mi, &return_heap_number); + // Tag the result and return. + __ SmiTag(r0, scratch1); + __ Ret(); + } + + if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER + : TRBinaryOpIC::INT32) { + __ bind(&return_heap_number); + // We are using vfp registers so r5 is available. + heap_number_result = r5; + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); + __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); + __ vstr(d5, r0, HeapNumber::kValueOffset); + __ mov(r0, heap_number_result); + __ Ret(); + } + + // A DIV operation expecting an integer result falls through + // to type transition. + + } else { + // We preserved r0 and r1 to be able to call runtime. + // Save the left value on the stack. + __ Push(r5, r4); + + // Allocate a heap number to store the result. + heap_number_result = r5; + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); + + // Load the left value from the value saved on the stack. + __ Pop(r1, r0); + + // Call the C function to handle the double operation. + FloatingPointHelper::CallCCodeForDoubleOperation( + masm, op_, heap_number_result, scratch1); + } + + break; + } + + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: + case Token::SAR: + case Token::SHR: + case Token::SHL: { + Label return_heap_number; + Register scratch3 = r5; + // Convert operands to 32-bit integers. Right in r2 and left in r3. The + // registers r0 and r1 (right and left) are preserved for the runtime + // call. + FloatingPointHelper::LoadNumberAsInt32(masm, + left, + r3, + heap_number_map, + scratch1, + scratch2, + scratch3, + d0, + &transition); + FloatingPointHelper::LoadNumberAsInt32(masm, + right, + r2, + heap_number_map, + scratch1, + scratch2, + scratch3, + d0, + &transition); + + // The ECMA-262 standard specifies that, for shift operations, only the + // 5 least significant bits of the shift value should be used. + switch (op_) { + case Token::BIT_OR: + __ orr(r2, r3, Operand(r2)); + break; + case Token::BIT_XOR: + __ eor(r2, r3, Operand(r2)); + break; + case Token::BIT_AND: + __ and_(r2, r3, Operand(r2)); + break; + case Token::SAR: + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, ASR, r2)); + break; + case Token::SHR: + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, LSR, r2), SetCC); + // SHR is special because it is required to produce a positive answer. + // We only get a negative result if the shift value (r2) is 0. + // This result cannot be respresented as a signed 32-bit integer, try + // to return a heap number if we can. + // The non vfp3 code does not support this special case, so jump to + // runtime if we don't support it. + if (CpuFeatures::IsSupported(VFP3)) { + __ b(mi, + (result_type_ <= TRBinaryOpIC::INT32) ? &transition + : &return_heap_number); + } else { + __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition + : &call_runtime); + } + break; + case Token::SHL: + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, LSL, r2)); + break; + default: + UNREACHABLE(); + } + + // Check if the result fits in a smi. + __ add(scratch1, r2, Operand(0x40000000), SetCC); + // If not try to return a heap number. (We know the result is an int32.) + __ b(mi, &return_heap_number); + // Tag the result and return. + __ SmiTag(r0, r2); + __ Ret(); + + __ bind(&return_heap_number); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + heap_number_result = r5; + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); + + if (op_ != Token::SHR) { + // Convert the result to a floating point value. + __ vmov(double_scratch.low(), r2); + __ vcvt_f64_s32(double_scratch, double_scratch.low()); + } else { + // The result must be interpreted as an unsigned 32-bit integer. + __ vmov(double_scratch.low(), r2); + __ vcvt_f64_u32(double_scratch, double_scratch.low()); + } + + // Store the result. + __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); + __ vstr(double_scratch, r0, HeapNumber::kValueOffset); + __ mov(r0, heap_number_result); + __ Ret(); + } else { + // Tail call that writes the int32 in r2 to the heap number in r0, using + // r3 as scratch. r0 is preserved and returned. + WriteInt32ToHeapNumberStub stub(r2, r0, r3); + __ TailCallStub(&stub); + } + + break; + } + + default: + UNREACHABLE(); + } + + if (transition.is_linked()) { + __ bind(&transition); + GenerateTypeTransition(masm); + } + + __ bind(&call_runtime); + GenerateCallRuntime(masm); } @@ -2934,45 +3586,47 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime; + Label call_runtime, call_string_add_or_runtime; GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); - // If all else fails, use the runtime system to get the correct - // result. - __ bind(&call_runtime); + GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); - // Try to add strings before calling runtime. + __ bind(&call_string_add_or_runtime); if (op_ == Token::ADD) { GenerateAddStrings(masm); } - GenericBinaryOpStub stub(op_, mode_, r1, r0); - __ TailCallStub(&stub); + __ bind(&call_runtime); + GenerateCallRuntime(masm); } void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { ASSERT(op_ == Token::ADD); + Label left_not_string, call_runtime; Register left = r1; Register right = r0; - Label call_runtime; - // Check if first argument is a string. - __ JumpIfSmi(left, &call_runtime); + // Check if left argument is a string. + __ JumpIfSmi(left, &left_not_string); __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &call_runtime); + __ b(ge, &left_not_string); + + StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_left_stub); - // First argument is a a string, test second. + // Left operand is not a string, test right. + __ bind(&left_not_string); __ JumpIfSmi(right, &call_runtime); __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); __ b(ge, &call_runtime); - // First and second argument are strings. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_stub); + __ TailCallStub(&string_add_right_stub); // At least one argument is not a string. __ bind(&call_runtime); @@ -3061,32 +3715,47 @@ void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { void TranscendentalCacheStub::Generate(MacroAssembler* masm) { - // Argument is a number and is on stack and in r0. - Label runtime_call; + // Untagged case: double input in d2, double result goes + // into d2. + // Tagged case: tagged input on top of stack and in r0, + // tagged result (heap number) goes into r0. + Label input_not_smi; Label loaded; + Label calculate; + Label invalid_cache; + const Register scratch0 = r9; + const Register scratch1 = r7; + const Register cache_entry = r0; + const bool tagged = (argument_type_ == TAGGED); if (CpuFeatures::IsSupported(VFP3)) { - // Load argument and check if it is a smi. - __ JumpIfNotSmi(r0, &input_not_smi); - CpuFeatures::Scope scope(VFP3); - // Input is a smi. Convert to double and load the low and high words - // of the double into r2, r3. - __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); - __ b(&loaded); - - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ CheckMap(r0, - r1, - Heap::kHeapNumberMapRootIndex, - &runtime_call, - true); - // Input is a HeapNumber. Load it to a double register and store the - // low and high words into r2, r3. - __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); - + if (tagged) { + // Argument is a number and is on stack and in r0. + // Load argument and check if it is a smi. + __ JumpIfNotSmi(r0, &input_not_smi); + + // Input is a smi. Convert to double and load the low and high words + // of the double into r2, r3. + __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); + __ b(&loaded); + + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ CheckMap(r0, + r1, + Heap::kHeapNumberMapRootIndex, + &calculate, + true); + // Input is a HeapNumber. Load it to a double register and store the + // low and high words into r2, r3. + __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ vmov(r2, r3, d0); + } else { + // Input is untagged double in d2. Output goes to d2. + __ vmov(r2, r3, d2); + } __ bind(&loaded); // r2 = low 32 bits of double value // r3 = high 32 bits of double value @@ -3101,14 +3770,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // r2 = low 32 bits of double value. // r3 = high 32 bits of double value. // r1 = TranscendentalCache::hash(double value). - __ mov(r0, + __ mov(cache_entry, Operand(ExternalReference::transcendental_cache_array_address())); // r0 points to cache array. - __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0]))); + __ ldr(cache_entry, MemOperand(cache_entry, + type_ * sizeof(TranscendentalCache::caches_[0]))); // r0 points to the cache for the type type_. // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ cmp(r0, Operand(0, RelocInfo::NONE)); - __ b(eq, &runtime_call); + __ cmp(cache_entry, Operand(0, RelocInfo::NONE)); + __ b(eq, &invalid_cache); #ifdef DEBUG // Check that the layout of cache elements match expectations. @@ -3127,21 +3797,109 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. __ add(r1, r1, Operand(r1, LSL, 1)); - __ add(r0, r0, Operand(r1, LSL, 2)); + __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); // Check if cache matches: Double value is stored in uint32_t[2] array. - __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit()); + __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); __ cmp(r2, r4); - __ b(ne, &runtime_call); + __ b(ne, &calculate); __ cmp(r3, r5); - __ b(ne, &runtime_call); - // Cache hit. Load result, pop argument and return. - __ mov(r0, Operand(r6)); - __ pop(); + __ b(ne, &calculate); + // Cache hit. Load result, cleanup and return. + if (tagged) { + // Pop input value from stack and load result into r0. + __ pop(); + __ mov(r0, Operand(r6)); + } else { + // Load result into d2. + __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); + } + __ Ret(); + } // if (CpuFeatures::IsSupported(VFP3)) + + __ bind(&calculate); + if (tagged) { + __ bind(&invalid_cache); + __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); + } else { + if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE(); + CpuFeatures::Scope scope(VFP3); + + Label no_update; + Label skip_cache; + const Register heap_number_map = r5; + + // Call C function to calculate the result and update the cache. + // Register r0 holds precalculated cache entry address; preserve + // it on the stack and pop it into register cache_entry after the + // call. + __ push(cache_entry); + GenerateCallCFunction(masm, scratch0); + __ GetCFunctionDoubleResult(d2); + + // Try to update the cache. If we cannot allocate a + // heap number, we return the result without updating. + __ pop(cache_entry); + __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); + __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); + __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit()); + __ Ret(); + + __ bind(&invalid_cache); + // The cache is invalid. Call runtime which will recreate the + // cache. + __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); + __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ EnterInternalFrame(); + __ push(r0); + __ CallRuntime(RuntimeFunction(), 1); + __ LeaveInternalFrame(); + __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ Ret(); + + __ bind(&skip_cache); + // Call C function to calculate the result and answer directly + // without updating the cache. + GenerateCallCFunction(masm, scratch0); + __ GetCFunctionDoubleResult(d2); + __ bind(&no_update); + + // We return the value in d2 without adding it to the cache, but + // we cause a scavenging GC so that future allocations will succeed. + __ EnterInternalFrame(); + + // Allocate an aligned object larger than a HeapNumber. + ASSERT(4 * kPointerSize >= HeapNumber::kSize); + __ mov(scratch0, Operand(4 * kPointerSize)); + __ push(scratch0); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + __ LeaveInternalFrame(); __ Ret(); } +} - __ bind(&runtime_call); - __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); + +void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, + Register scratch) { + __ push(lr); + __ PrepareCallCFunction(2, scratch); + __ vmov(r0, r1, d2); + switch (type_) { + case TranscendentalCache::SIN: + __ CallCFunction(ExternalReference::math_sin_double_function(), 2); + break; + case TranscendentalCache::COS: + __ CallCFunction(ExternalReference::math_cos_double_function(), 2); + break; + case TranscendentalCache::LOG: + __ CallCFunction(ExternalReference::math_log_double_function(), 2); + break; + default: + UNIMPLEMENTED(); + break; + } + __ pop(lr); } @@ -3299,105 +4057,13 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // r0 holds the exception. - - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop the sp to the top of the handler. - __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); - __ ldr(sp, MemOperand(r3)); - - // Restore the next handler and frame pointer, discard handler state. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(r2); - __ str(r2, MemOperand(r3)); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); - __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. - - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of a - // JS entry frame. - __ cmp(fp, Operand(0, RelocInfo::NONE)); - // Set cp to NULL if fp is NULL. - __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); - // Restore cp otherwise. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ pop(pc); + __ Throw(r0); } void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, UncatchableExceptionType type) { - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop sp to the top stack handler. - __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); - __ ldr(sp, MemOperand(r3)); - - // Unwind the handlers until the ENTRY handler is found. - Label loop, done; - __ bind(&loop); - // Load the type of the current stack handler. - const int kStateOffset = StackHandlerConstants::kStateOffset; - __ ldr(r2, MemOperand(sp, kStateOffset)); - __ cmp(r2, Operand(StackHandler::ENTRY)); - __ b(eq, &done); - // Fetch the next handler in the list. - const int kNextOffset = StackHandlerConstants::kNextOffset; - __ ldr(sp, MemOperand(sp, kNextOffset)); - __ jmp(&loop); - __ bind(&done); - - // Set the top handler address to next handler past the current ENTRY handler. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(r2); - __ str(r2, MemOperand(r3)); - - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Top::k_external_caught_exception_address); - __ mov(r0, Operand(false, RelocInfo::NONE)); - __ mov(r2, Operand(external_caught)); - __ str(r0, MemOperand(r2)); - - // Set pending exception and r0 to out of memory exception. - Failure* out_of_memory = Failure::OutOfMemoryException(); - __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); - __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); - __ str(r0, MemOperand(r2)); - } - - // Stack layout at this point. See also StackHandlerConstants. - // sp -> state (ENTRY) - // fp - // lr - - // Discard handler state (r2 is not used) and restore frame pointer. - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); - __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of a - // JS entry frame. - __ cmp(fp, Operand(0, RelocInfo::NONE)); - // Set cp to NULL if fp is NULL. - __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); - // Restore cp otherwise. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ pop(pc); + __ ThrowUncatchable(type, r0); } @@ -3484,7 +4150,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // r0:r1: result // sp: stack pointer // fp: frame pointer - __ LeaveExitFrame(save_doubles_); + // Callee-saved register r4 still holds argc. + __ LeaveExitFrame(save_doubles_, r4); + __ mov(pc, lr); // check if we should retry or throw exception Label retry; @@ -3796,7 +4464,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // The offset was stored in r4 safepoint slot. // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) - __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4)); + __ LoadFromSafepointRegisterSlot(scratch, r4); __ sub(inline_site, lr, scratch); // Get the map location in scratch and patch it. __ GetRelocatedValueLocation(inline_site, scratch); @@ -4263,24 +4931,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); static const int kRegExpExecuteArguments = 7; - __ push(lr); - __ PrepareCallCFunction(kRegExpExecuteArguments, r0); + static const int kParameterRegisters = 4; + __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); + + // Stack pointer now points to cell where return address is to be written. + // Arguments are before that on the stack or in registers. - // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. + // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. __ mov(r0, Operand(1)); - __ str(r0, MemOperand(sp, 2 * kPointerSize)); + __ str(r0, MemOperand(sp, 3 * kPointerSize)); - // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. + // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area. __ mov(r0, Operand(address_of_regexp_stack_memory_address)); __ ldr(r0, MemOperand(r0, 0)); __ mov(r2, Operand(address_of_regexp_stack_memory_size)); __ ldr(r2, MemOperand(r2, 0)); __ add(r0, r0, Operand(r2)); - __ str(r0, MemOperand(sp, 1 * kPointerSize)); + __ str(r0, MemOperand(sp, 2 * kPointerSize)); - // Argument 5 (sp[0]): static offsets vector buffer. + // Argument 5 (sp[4]): static offsets vector buffer. __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); - __ str(r0, MemOperand(sp, 0 * kPointerSize)); + __ str(r0, MemOperand(sp, 1 * kPointerSize)); // For arguments 4 and 3 get string length, calculate start of string data and // calculate the shift of the index (0 for ASCII and 1 for two byte). @@ -4302,8 +4973,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Locate the code entry and call it. __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(r7, kRegExpExecuteArguments); - __ pop(lr); + DirectCEntryStub stub; + stub.GenerateCall(masm, r7); + + __ LeaveExitFrame(false, no_reg); // r0: result // subject: subject string (callee saved) @@ -4312,6 +4985,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check the result. Label success; + __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); __ b(eq, &success); Label failure; @@ -4324,12 +4998,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r0, Operand(ExternalReference::the_hole_value_location())); - __ ldr(r0, MemOperand(r0, 0)); - __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address))); + __ mov(r1, Operand(ExternalReference::the_hole_value_location())); __ ldr(r1, MemOperand(r1, 0)); + __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); + __ ldr(r0, MemOperand(r2, 0)); __ cmp(r0, r1); __ b(eq, &runtime); + + __ str(r1, MemOperand(r2, 0)); // Clear pending exception. + + // Check if the exception is a termination. If so, throw as uncatchable. + __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex); + __ cmp(r0, ip); + Label termination_exception; + __ b(eq, &termination_exception); + + __ Throw(r0); // Expects thrown value in r0. + + __ bind(&termination_exception); + __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0. + __ bind(&failure); // For failure and exception return null. __ mov(r0, Operand(Factory::null_value())); @@ -5508,18 +6196,19 @@ void StringCompareStub::Generate(MacroAssembler* masm) { void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; + Label string_add_runtime, call_builtin; + Builtins::JavaScript builtin_id = Builtins::ADD; + // Stack on entry: - // sp[0]: second argument. - // sp[4]: first argument. + // sp[0]: second argument (right). + // sp[4]: first argument (left). // Load the two arguments. __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. // Make sure that both arguments are strings if not known in advance. - if (string_check_) { - STATIC_ASSERT(kSmiTag == 0); + if (flags_ == NO_STRING_ADD_FLAGS) { __ JumpIfEitherSmi(r0, r1, &string_add_runtime); // Load instance types. __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); @@ -5531,13 +6220,27 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ tst(r4, Operand(kIsNotStringMask)); __ tst(r5, Operand(kIsNotStringMask), eq); __ b(ne, &string_add_runtime); + } else { + // Here at least one of the arguments is definitely a string. + // We convert the one that is not known to be a string. + if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); + GenerateConvertArgument( + masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin); + builtin_id = Builtins::STRING_ADD_RIGHT; + } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); + GenerateConvertArgument( + masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin); + builtin_id = Builtins::STRING_ADD_LEFT; + } } // Both arguments are strings. // r0: first string // r1: second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) { Label strings_not_empty; // Check if either of the strings are empty. In that case return the other. @@ -5565,8 +6268,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r1: second string // r2: length of first string // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) // Look at the length of the result of adding the two strings. Label string_add_flat_result, longer_than_two; // Adding two lengths can't overflow. @@ -5578,7 +6281,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(ne, &longer_than_two); // Check that both strings are non-external ascii strings. - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5626,7 +6329,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // If result is not supposed to be flat, allocate a cons string object. // If both strings are ascii the result is an ascii cons string. - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5674,11 +6377,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r1: second string // r2: length of first string // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) // r6: sum of lengths. __ bind(&string_add_flat_result); - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5776,6 +6479,60 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&string_add_runtime); __ TailCallRuntime(Runtime::kStringAdd, 2, 1); + + if (call_builtin.is_linked()) { + __ bind(&call_builtin); + __ InvokeBuiltin(builtin_id, JUMP_JS); + } +} + + +void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* slow) { + // First check if the argument is already a string. + Label not_string, done; + __ JumpIfSmi(arg, ¬_string); + __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE); + __ b(lt, &done); + + // Check the number to string cache. + Label not_cached; + __ bind(¬_string); + // Puts the cached result into scratch1. + NumberToStringStub::GenerateLookupNumberStringCache(masm, + arg, + scratch1, + scratch2, + scratch3, + scratch4, + false, + ¬_cached); + __ mov(arg, scratch1); + __ str(arg, MemOperand(sp, stack_offset)); + __ jmp(&done); + + // Check if the argument is a safe string wrapper. + __ bind(¬_cached); + __ JumpIfSmi(arg, slow); + __ CompareObjectType( + arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1. + __ b(ne, slow); + __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); + __ and_(scratch2, + scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ cmp(scratch2, + Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ b(ne, slow); + __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); + __ str(arg, MemOperand(sp, stack_offset)); + + __ bind(&done); } @@ -5950,17 +6707,26 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) { void DirectCEntryStub::GenerateCall(MacroAssembler* masm, - ApiFunction *function) { + ExternalReference function) { __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), RelocInfo::CODE_TARGET)); + __ mov(r2, Operand(function)); // Push return address (accessible to GC through exit frame pc). - __ mov(r2, - Operand(ExternalReference(function, ExternalReference::DIRECT_CALL))); __ str(pc, MemOperand(sp, 0)); __ Jump(r2); // Call the api function. } +void DirectCEntryStub::GenerateCall(MacroAssembler* masm, + Register target) { + __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), + RelocInfo::CODE_TARGET)); + // Push return address (accessible to GC through exit frame pc). + __ str(pc, MemOperand(sp, 0)); + __ Jump(target); // Call the C++ function. +} + + void GenerateFastPixelArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -6028,6 +6794,91 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, } +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register elements_map, + Register scratch1, + Register scratch2, + bool load_elements_from_receiver, + bool load_elements_map_from_elements, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range) { + // Register use: + // receiver - holds the receiver and is unchanged unless the + // store succeeds. + // key - holds the key (must be a smi) and is unchanged. + // value - holds the value (must be a smi) and is unchanged. + // elements - holds the element object of the receiver on entry if + // load_elements_from_receiver is false, otherwise used + // internally to store the pixel arrays elements and + // external array pointer. + // elements_map - holds the map of the element object if + // load_elements_map_from_elements is false, otherwise + // loaded with the element map. + // + Register external_pointer = elements; + Register untagged_key = scratch1; + Register untagged_value = scratch2; + + if (load_elements_from_receiver) { + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + } + + // By passing NULL as not_pixel_array, callers signal that they have already + // verified that the receiver has pixel array elements. + if (not_pixel_array != NULL) { + if (load_elements_map_from_elements) { + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + } + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(elements_map, ip); + __ b(ne, not_pixel_array); + } else { + if (FLAG_debug_code) { + // Map check should have already made sure that elements is a pixel array. + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(elements_map, ip); + __ Assert(eq, "Elements isn't a pixel array"); + } + } + + // Some callers already have verified that the key is a smi. key_not_smi is + // set to NULL as a sentinel for that case. Otherwise, add an explicit check + // to ensure the key is a smi must be added. + if (key_not_smi != NULL) { + __ JumpIfNotSmi(key, key_not_smi); + } else { + if (FLAG_debug_code) { + __ AbortIfNotSmi(key); + } + } + + __ SmiUntag(untagged_key, key); + + // Perform bounds check. + __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset)); + __ cmp(untagged_key, scratch2); + __ b(hs, out_of_range); // unsigned check handles negative keys. + + __ JumpIfNotSmi(value, value_not_smi); + __ SmiUntag(untagged_value, value); + + // Clamp the value to [0..255]. + __ Usat(untagged_value, 8, Operand(untagged_value)); + // Get the pointer to the external array. This clobbers elements. + __ ldr(external_pointer, + FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); + __ strb(untagged_value, MemOperand(external_pointer, untagged_key)); + __ Ret(); +} + + #undef __ } } // namespace v8::internal diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h index bf7d6354..e3ef3391 100644 --- a/src/arm/code-stubs-arm.h +++ b/src/arm/code-stubs-arm.h @@ -38,13 +38,22 @@ namespace internal { // TranscendentalCache runtime function. class TranscendentalCacheStub: public CodeStub { public: - explicit TranscendentalCacheStub(TranscendentalCache::Type type) - : type_(type) {} + enum ArgumentType { + TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, + UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits + }; + + TranscendentalCacheStub(TranscendentalCache::Type type, + ArgumentType argument_type) + : type_(type), argument_type_(argument_type) { } void Generate(MacroAssembler* masm); private: TranscendentalCache::Type type_; + ArgumentType argument_type_; + void GenerateCallCFunction(MacroAssembler* masm, Register scratch); + Major MajorKey() { return TranscendentalCache; } - int MinorKey() { return type_; } + int MinorKey() { return type_ | argument_type_; } Runtime::FunctionId RuntimeFunction(); }; @@ -335,24 +344,36 @@ class TypeRecordingBinaryOpStub: public CodeStub { // Flag that indicates how to generate code for the stub StringAddStub. enum StringAddFlags { NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. + // Omit left string check in stub (left is definitely a string). + NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0, + // Omit right string check in stub (right is definitely a string). + NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1, + // Omit both string checks in stub. + NO_STRING_CHECK_IN_STUB = + NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB }; class StringAddStub: public CodeStub { public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } + explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} private: Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } + int MinorKey() { return flags_; } void Generate(MacroAssembler* masm); - // Should the stub check whether arguments are strings? - bool string_check_; + void GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* slow); + + const StringAddFlags flags_; }; @@ -580,7 +601,8 @@ class DirectCEntryStub: public CodeStub { public: DirectCEntryStub() {} void Generate(MacroAssembler* masm); - void GenerateCall(MacroAssembler* masm, ApiFunction *function); + void GenerateCall(MacroAssembler* masm, ExternalReference function); + void GenerateCall(MacroAssembler* masm, Register target); private: Major MajorKey() { return DirectCEntry; } @@ -589,14 +611,14 @@ class DirectCEntryStub: public CodeStub { }; -// Generate code the to load an element from a pixel array. The receiver is -// assumed to not be a smi and to have elements, the caller must guarantee this -// precondition. If the receiver does not have elements that are pixel arrays, -// the generated code jumps to not_pixel_array. If key is not a smi, then the -// generated code branches to key_not_smi. Callers can specify NULL for -// key_not_smi to signal that a smi check has already been performed on key so -// that the smi check is not generated . If key is not a valid index within the -// bounds of the pixel array, the generated code jumps to out_of_range. +// Generate code to load an element from a pixel array. The receiver is assumed +// to not be a smi and to have elements, the caller must guarantee this +// precondition. If key is not a smi, then the generated code branches to +// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi +// check has already been performed on key so that the smi check is not +// generated. If key is not a valid index within the bounds of the pixel array, +// the generated code jumps to out_of_range. receiver, key and elements are +// unchanged throughout the generated code sequence. void GenerateFastPixelArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -609,6 +631,35 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, Label* key_not_smi, Label* out_of_range); +// Generate code to store an element into a pixel array, clamping values between +// [0..255]. The receiver is assumed to not be a smi and to have elements, the +// caller must guarantee this precondition. If key is not a smi, then the +// generated code branches to key_not_smi. Callers can specify NULL for +// key_not_smi to signal that a smi check has already been performed on key so +// that the smi check is not generated. If value is not a smi, the generated +// code will branch to value_not_smi. If the receiver doesn't have pixel array +// elements, the generated code will branch to not_pixel_array, unless +// not_pixel_array is NULL, in which case the caller must ensure that the +// receiver has pixel array elements. If key is not a valid index within the +// bounds of the pixel array, the generated code jumps to out_of_range. If +// load_elements_from_receiver is true, then the elements of receiver is loaded +// into elements, otherwise elements is assumed to already be the receiver's +// elements. If load_elements_map_from_elements is true, elements_map is loaded +// from elements, otherwise it is assumed to already contain the element map. +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register elements_map, + Register scratch1, + Register scratch2, + bool load_elements_from_receiver, + bool load_elements_map_from_elements, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range); } } // namespace v8::internal diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index c8271107..d32b0091 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -1938,8 +1938,9 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { frame_->EmitPush(cp); frame_->EmitPush(Operand(pairs)); frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0))); + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); - frame_->CallRuntime(Runtime::kDeclareGlobals, 3); + frame_->CallRuntime(Runtime::kDeclareGlobals, 4); // The result is discarded. } @@ -3287,7 +3288,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { // context slot followed by initialization. frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); } else { - frame_->CallRuntime(Runtime::kStoreContextSlot, 3); + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + frame_->CallRuntime(Runtime::kStoreContextSlot, 4); } // Storing a variable must keep the (new) value on the expression // stack. This is necessary for compiling assignment expressions. @@ -3637,7 +3639,8 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { Load(key); Load(value); if (property->emit_store()) { - frame_->CallRuntime(Runtime::kSetProperty, 3); + frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes + frame_->CallRuntime(Runtime::kSetProperty, 4); } else { frame_->Drop(3); } @@ -5170,11 +5173,11 @@ class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode { // Set the bit in the map to indicate that it has been checked safe for // default valueOf and set true result. - __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); + __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); __ orr(scratch1_, scratch1_, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); - __ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); + __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); __ mov(map_result_, Operand(1)); __ jmp(exit_label()); __ bind(&false_result); @@ -5656,7 +5659,8 @@ void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) { ASSERT_EQ(args->length(), 1); Load(args->at(0)); if (CpuFeatures::IsSupported(VFP3)) { - TranscendentalCacheStub stub(TranscendentalCache::SIN); + TranscendentalCacheStub stub(TranscendentalCache::SIN, + TranscendentalCacheStub::TAGGED); frame_->SpillAllButCopyTOSToR0(); frame_->CallStub(&stub, 1); } else { @@ -5670,7 +5674,8 @@ void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) { ASSERT_EQ(args->length(), 1); Load(args->at(0)); if (CpuFeatures::IsSupported(VFP3)) { - TranscendentalCacheStub stub(TranscendentalCache::COS); + TranscendentalCacheStub stub(TranscendentalCache::COS, + TranscendentalCacheStub::TAGGED); frame_->SpillAllButCopyTOSToR0(); frame_->CallStub(&stub, 1); } else { @@ -5684,7 +5689,8 @@ void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) { ASSERT_EQ(args->length(), 1); Load(args->at(0)); if (CpuFeatures::IsSupported(VFP3)) { - TranscendentalCacheStub stub(TranscendentalCache::LOG); + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::TAGGED); frame_->SpillAllButCopyTOSToR0(); frame_->CallStub(&stub, 1); } else { @@ -5844,15 +5850,20 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { if (property != NULL) { Load(property->obj()); Load(property->key()); - frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2); + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3); frame_->EmitPush(r0); } else if (variable != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this()); Slot* slot = variable->AsSlot(); if (variable->is_global()) { LoadGlobal(); frame_->EmitPush(Operand(variable->name())); - frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2); + frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode))); + frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3); frame_->EmitPush(r0); } else if (slot != NULL && slot->type() == Slot::LOOKUP) { @@ -6669,8 +6680,12 @@ class DeferredReferenceSetKeyedValue: public DeferredCode { public: DeferredReferenceSetKeyedValue(Register value, Register key, - Register receiver) - : value_(value), key_(key), receiver_(receiver) { + Register receiver, + StrictModeFlag strict_mode) + : value_(value), + key_(key), + receiver_(receiver), + strict_mode_(strict_mode) { set_comment("[ DeferredReferenceSetKeyedValue"); } @@ -6680,6 +6695,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode { Register value_; Register key_; Register receiver_; + StrictModeFlag strict_mode_; }; @@ -6701,7 +6717,9 @@ void DeferredReferenceSetKeyedValue::Generate() { { Assembler::BlockConstPoolScope block_const_pool(masm_); // Call keyed store IC. It has the arguments value, key and receiver in r0, // r1 and r2. - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // The call must be followed by a nop instruction to indicate that the // keyed store has been inlined. @@ -6719,8 +6737,12 @@ class DeferredReferenceSetNamedValue: public DeferredCode { public: DeferredReferenceSetNamedValue(Register value, Register receiver, - Handle<String> name) - : value_(value), receiver_(receiver), name_(name) { + Handle<String> name, + StrictModeFlag strict_mode) + : value_(value), + receiver_(receiver), + name_(name), + strict_mode_(strict_mode) { set_comment("[ DeferredReferenceSetNamedValue"); } @@ -6730,6 +6752,7 @@ class DeferredReferenceSetNamedValue: public DeferredCode { Register value_; Register receiver_; Handle<String> name_; + StrictModeFlag strict_mode_; }; @@ -6749,7 +6772,9 @@ void DeferredReferenceSetNamedValue::Generate() { { Assembler::BlockConstPoolScope block_const_pool(masm_); // Call keyed store IC. It has the arguments value, key and receiver in r0, // r1 and r2. - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + (strict_mode_ == kStrictMode) ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // The call must be followed by a nop instruction to indicate that the // named store has been inlined. @@ -6938,7 +6963,8 @@ void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) { Register receiver = r1; DeferredReferenceSetNamedValue* deferred = - new DeferredReferenceSetNamedValue(value, receiver, name); + new DeferredReferenceSetNamedValue( + value, receiver, name, strict_mode_flag()); // Check that the receiver is a heap object. __ tst(receiver, Operand(kSmiTagMask)); @@ -7124,7 +7150,8 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type, // The deferred code expects value, key and receiver in registers. DeferredReferenceSetKeyedValue* deferred = - new DeferredReferenceSetKeyedValue(value, key, receiver); + new DeferredReferenceSetKeyedValue( + value, key, receiver, strict_mode_flag()); // Check that the value is a smi. As this inlined code does not set the // write barrier it is only possible to store smi values. @@ -7209,7 +7236,7 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type, deferred->BindExit(); } else { - frame()->CallKeyedStoreIC(); + frame()->CallKeyedStoreIC(strict_mode_flag()); } } diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h index 7ac38ed3..e6033a89 100644 --- a/src/arm/constants-arm.h +++ b/src/arm/constants-arm.h @@ -385,7 +385,10 @@ enum VFPConversionMode { kDefaultRoundToZero = 1 }; +// This mask does not include the "inexact" or "input denormal" cumulative +// exceptions flags, because we usually don't want to check for it. static const uint32_t kVFPExceptionMask = 0xf; +static const uint32_t kVFPInexactExceptionBit = 1 << 4; static const uint32_t kVFPFlushToZeroMask = 1 << 24; static const uint32_t kVFPInvalidExceptionBit = 1; @@ -411,6 +414,11 @@ enum VFPRoundingMode { static const uint32_t kVFPRoundingModeMask = 3 << 22; +enum CheckForInexactConversion { + kCheckForInexactConversion, + kDontCheckForInexactConversion +}; + // ----------------------------------------------------------------------------- // Hints. diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc index 507954d9..51c84b33 100644 --- a/src/arm/cpu-arm.cc +++ b/src/arm/cpu-arm.cc @@ -50,6 +50,11 @@ void CPU::Setup() { void CPU::FlushICache(void* start, size_t size) { + // Nothing to do flushing no instructions. + if (size == 0) { + return; + } + #if defined (USE_SIMULATOR) // Not generating ARM instructions for C-code. This means that we are // building an ARM emulator based target. We should notify the simulator diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc index caec55af..9a5aa902 100644 --- a/src/arm/deoptimizer-arm.cc +++ b/src/arm/deoptimizer-arm.cc @@ -124,14 +124,62 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { - UNIMPLEMENTED(); + const int kInstrSize = Assembler::kInstrSize; + // The call of the stack guard check has the following form: + // e1 5d 00 0c cmp sp, <limit> + // 2a 00 00 01 bcs ok + // e5 9f c? ?? ldr ip, [pc, <stack guard address>] + // e1 2f ff 3c blx ip + ASSERT(Memory::int32_at(pc_after - kInstrSize) == + (al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code())); + ASSERT(Assembler::IsLdrPcImmediateOffset( + Assembler::instr_at(pc_after - 2 * kInstrSize))); + + // We patch the code to the following form: + // e1 5d 00 0c cmp sp, <limit> + // e1 a0 00 00 mov r0, r0 (NOP) + // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>] + // e1 2f ff 3c blx ip + // and overwrite the constant containing the + // address of the stack check stub. + + // Replace conditional jump with NOP. + CodePatcher patcher(pc_after - 3 * kInstrSize, 1); + patcher.masm()->nop(); + + // Replace the stack check address in the constant pool + // with the entry address of the replacement code. + uint32_t stack_check_address_offset = Memory::uint16_at(pc_after - + 2 * kInstrSize) & 0xfff; + Address stack_check_address_pointer = pc_after + stack_check_address_offset; + ASSERT(Memory::uint32_at(stack_check_address_pointer) == + reinterpret_cast<uint32_t>(check_code->entry())); + Memory::uint32_at(stack_check_address_pointer) = + reinterpret_cast<uint32_t>(replacement_code->entry()); } void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { - UNIMPLEMENTED(); + const int kInstrSize = Assembler::kInstrSize; + ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c); + ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5); + ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f); + + // Replace NOP with conditional jump. + CodePatcher patcher(pc_after - 3 * kInstrSize, 1); + patcher.masm()->b(+4, cs); + + // Replace the stack check address in the constant pool + // with the entry address of the replacement code. + uint32_t stack_check_address_offset = Memory::uint16_at(pc_after - + 2 * kInstrSize) & 0xfff; + Address stack_check_address_pointer = pc_after + stack_check_address_offset; + ASSERT(Memory::uint32_at(stack_check_address_pointer) == + reinterpret_cast<uint32_t>(replacement_code->entry())); + Memory::uint32_at(stack_check_address_pointer) = + reinterpret_cast<uint32_t>(check_code->entry()); } @@ -381,14 +429,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, fp_value, output_offset, value); } - // The context can be gotten from the function so long as we don't - // optimize functions that need local contexts. + // For the bottommost output frame the context can be gotten from the input + // frame. For all subsequent output frames it can be gotten from the function + // so long as we don't inline functions that need local contexts. output_offset -= kPointerSize; input_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(function->context()); - // The context for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); + if (is_bottommost) { + value = input_->GetFrameSlot(input_offset); + } else { + value = reinterpret_cast<intptr_t>(function->context()); + } output_frame->SetFrameSlot(output_offset, value); if (is_topmost) { output_frame->SetRegister(cp.code(), value); diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index 2685fcb7..5f5de3a9 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -219,46 +219,47 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { Move(dot_arguments_slot, r3, r1, r2); } - { Comment cmnt(masm_, "[ Declarations"); - // For named function expressions, declare the function name as a - // constant. - if (scope()->is_function_scope() && scope()->function() != NULL) { - EmitDeclaration(scope()->function(), Variable::CONST, NULL); - } - // Visit all the explicit declarations unless there is an illegal - // redeclaration. - if (scope()->HasIllegalRedeclaration()) { - scope()->VisitIllegalRedeclaration(this); - } else { - VisitDeclarations(scope()->declarations()); - } - } - if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); } - // Check the stack for overflow or break request. - { Comment cmnt(masm_, "[ Stack check"); - PrepareForBailout(info->function(), NO_REGISTERS); - Label ok; - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &ok); - StackCheckStub stub; - __ CallStub(&stub); - __ bind(&ok); - } + // Visit the declarations and body unless there is an illegal + // redeclaration. + if (scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ Declarations"); + scope()->VisitIllegalRedeclaration(this); - { Comment cmnt(masm_, "[ Body"); - ASSERT(loop_depth() == 0); - VisitStatements(function()->body()); - ASSERT(loop_depth() == 0); + } else { + { Comment cmnt(masm_, "[ Declarations"); + // For named function expressions, declare the function name as a + // constant. + if (scope()->is_function_scope() && scope()->function() != NULL) { + EmitDeclaration(scope()->function(), Variable::CONST, NULL); + } + VisitDeclarations(scope()->declarations()); + } + + { Comment cmnt(masm_, "[ Stack check"); + PrepareForBailout(info->function(), NO_REGISTERS); + Label ok; + __ LoadRoot(ip, Heap::kStackLimitRootIndex); + __ cmp(sp, Operand(ip)); + __ b(hs, &ok); + StackCheckStub stub; + __ CallStub(&stub); + __ bind(&ok); + } + + { Comment cmnt(masm_, "[ Body"); + ASSERT(loop_depth() == 0); + VisitStatements(function()->body()); + ASSERT(loop_depth() == 0); + } } + // Always emit a 'return undefined' in case control fell off the end of + // the body. { Comment cmnt(masm_, "[ return <undefined>;"); - // Emit a 'return undefined' in case control fell off the end of the - // body. __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); } EmitReturnSequence(); @@ -338,13 +339,6 @@ void FullCodeGenerator::EmitReturnSequence() { } -FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand( - Token::Value op, Expression* left, Expression* right) { - ASSERT(ShouldInlineSmiCase(op)); - return kNoConstants; -} - - void FullCodeGenerator::EffectContext::Plug(Slot* slot) const { } @@ -563,13 +557,38 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const { void FullCodeGenerator::DoTest(Label* if_true, Label* if_false, Label* fall_through) { - // Call the runtime to find the boolean value of the source and then - // translate it into control flow to the pair of labels. - __ push(result_register()); - __ CallRuntime(Runtime::kToBool, 1); - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(r0, ip); - Split(eq, if_true, if_false, fall_through); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Emit the inlined tests assumed by the stub. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(result_register(), ip); + __ b(eq, if_false); + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(result_register(), ip); + __ b(eq, if_true); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(result_register(), ip); + __ b(eq, if_false); + STATIC_ASSERT(kSmiTag == 0); + __ tst(result_register(), result_register()); + __ b(eq, if_false); + __ JumpIfSmi(result_register(), if_true); + + // Call the ToBoolean stub for all other cases. + ToBooleanStub stub(result_register()); + __ CallStub(&stub); + __ tst(result_register(), result_register()); + } else { + // Call the runtime to find the boolean value of the source and then + // translate it into control flow to the pair of labels. + __ push(result_register()); + __ CallRuntime(Runtime::kToBool, 1); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(r0, ip); + } + + // The stub returns nonzero for true. + Split(ne, if_true, if_false, fall_through); } @@ -684,10 +703,11 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, // We bypass the general EmitSlotSearch because we know more about // this specific context. - // The variable in the decl always resides in the current context. + // The variable in the decl always resides in the current function + // context. ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); if (FLAG_debug_code) { - // Check if we have the correct context pointer. + // Check that we're not inside a 'with'. __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); __ cmp(r1, cp); __ Check(eq, "Unexpected declaration in current context."); @@ -756,7 +776,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, prop->key()->AsLiteral()->handle()->IsSmi()); __ mov(r1, Operand(prop->key()->AsLiteral()->handle())); - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin(is_strict() + ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); // Value in r0 is ignored (declarations are statements). } @@ -772,10 +794,11 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) { void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { // Call the runtime to declare the globals. // The context is the first argument. - __ mov(r1, Operand(pairs)); - __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0))); - __ Push(cp, r1, r0); - __ CallRuntime(Runtime::kDeclareGlobals, 3); + __ mov(r2, Operand(pairs)); + __ mov(r1, Operand(Smi::FromInt(is_eval() ? 1 : 0))); + __ mov(r0, Operand(Smi::FromInt(strict_mode_flag()))); + __ Push(cp, r2, r1, r0); + __ CallRuntime(Runtime::kDeclareGlobals, 4); // Return value is ignored. } @@ -784,9 +807,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Comment cmnt(masm_, "[ SwitchStatement"); Breakable nested_statement(this, stmt); SetStatementPosition(stmt); + // Keep the switch value on the stack until a case matches. VisitForStackValue(stmt->tag()); - PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); ZoneList<CaseClause*>* clauses = stmt->cases(); @@ -875,8 +898,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(r0, ip); __ b(eq, &exit); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(r0, ip); + Register null_value = r5; + __ LoadRoot(null_value, Heap::kNullValueRootIndex); + __ cmp(r0, null_value); __ b(eq, &exit); // Convert the object to a JS object. @@ -890,12 +914,62 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ bind(&done_convert); __ push(r0); - // BUG(867): Check cache validity in generated code. This is a fast - // case for the JSObject::IsSimpleEnum cache validity checks. If we - // cannot guarantee cache validity, call the runtime system to check - // cache validity or get the property names in a fixed array. + // Check cache validity in generated code. This is a fast case for + // the JSObject::IsSimpleEnum cache validity checks. If we cannot + // guarantee cache validity, call the runtime system to check cache + // validity or get the property names in a fixed array. + Label next, call_runtime; + // Preload a couple of values used in the loop. + Register empty_fixed_array_value = r6; + __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); + Register empty_descriptor_array_value = r7; + __ LoadRoot(empty_descriptor_array_value, + Heap::kEmptyDescriptorArrayRootIndex); + __ mov(r1, r0); + __ bind(&next); + + // Check that there are no elements. Register r1 contains the + // current JS object we've reached through the prototype chain. + __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ cmp(r2, empty_fixed_array_value); + __ b(ne, &call_runtime); + + // Check that instance descriptors are not empty so that we can + // check for an enum cache. Leave the map in r2 for the subsequent + // prototype load. + __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOffset)); + __ cmp(r3, empty_descriptor_array_value); + __ b(eq, &call_runtime); + + // Check that there is an enum cache in the non-empty instance + // descriptors (r3). This is the case if the next enumeration + // index field does not contain a smi. + __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset)); + __ JumpIfSmi(r3, &call_runtime); + + // For all objects but the receiver, check that the cache is empty. + Label check_prototype; + __ cmp(r1, r0); + __ b(eq, &check_prototype); + __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset)); + __ cmp(r3, empty_fixed_array_value); + __ b(ne, &call_runtime); + + // Load the prototype from the map and loop if non-null. + __ bind(&check_prototype); + __ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset)); + __ cmp(r1, null_value); + __ b(ne, &next); + + // The enum cache is valid. Load the map of the object being + // iterated over and use the cache for the iteration. + Label use_cache; + __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ b(&use_cache); // Get the set of properties to enumerate. + __ bind(&call_runtime); __ push(r0); // Duplicate the enumerable object on the stack. __ CallRuntime(Runtime::kGetPropertyNamesFast, 1); @@ -910,6 +984,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ b(ne, &fixed_array); // We got a map in register r0. Get the enumeration cache from it. + __ bind(&use_cache); __ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset)); __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset)); __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset)); @@ -998,8 +1073,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure) { // Use the fast case closure allocation code that allocates in new - // space for nested functions that don't need literals cloning. - if (scope()->is_function_scope() && + // space for nested functions that don't need literals cloning. If + // we're running with the --always-opt or the --prepare-always-opt + // flag, we need to use the runtime function so that the new function + // we are creating here gets a chance to have its code optimized and + // doesn't just get a copy of the existing unoptimized code. + if (!FLAG_always_opt && + !FLAG_prepare_always_opt && + scope()->is_function_scope() && info->num_literals() == 0 && !pretenure) { FastNewClosureStub stub; @@ -1027,7 +1108,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( Slot* slot, Label* slow) { ASSERT(slot->type() == Slot::CONTEXT); - Register current = cp; + Register context = cp; Register next = r3; Register temp = r4; @@ -1035,22 +1116,25 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( if (s->num_heap_slots() > 0) { if (s->calls_eval()) { // Check that extension is NULL. - __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX)); __ tst(temp, temp); __ b(ne, slow); } - __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX)); + __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX)); __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset)); // Walk the rest of the chain without clobbering cp. - current = next; + context = next; } } // Check that last extension is NULL. - __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX)); __ tst(temp, temp); __ b(ne, slow); - __ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX)); - return ContextOperand(temp, slot->index()); + + // This function is used only for loads, not stores, so it's safe to + // return an cp-based operand (the write barrier cannot be allowed to + // destroy the cp register). + return ContextOperand(context, slot->index()); } @@ -1250,18 +1334,19 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { Comment cmnt(masm_, "[ RegExpLiteral"); Label materialized; // Registers will be used as follows: + // r5 = materialized value (RegExp literal) // r4 = JS function, literals array // r3 = literal index // r2 = RegExp pattern // r1 = RegExp flags - // r0 = temp + materialized value (RegExp literal) + // r0 = RegExp literal clone __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); int literal_offset = FixedArray::kHeaderSize + expr->literal_index() * kPointerSize; - __ ldr(r0, FieldMemOperand(r4, literal_offset)); + __ ldr(r5, FieldMemOperand(r4, literal_offset)); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r0, ip); + __ cmp(r5, ip); __ b(ne, &materialized); // Create regexp literal using runtime function. @@ -1271,20 +1356,27 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { __ mov(r1, Operand(expr->flags())); __ Push(r4, r3, r2, r1); __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); + __ mov(r5, r0); __ bind(&materialized); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; - __ push(r0); + Label allocated, runtime_allocate; + __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); + __ jmp(&allocated); + + __ bind(&runtime_allocate); + __ push(r5); __ mov(r0, Operand(Smi::FromInt(size))); __ push(r0); __ CallRuntime(Runtime::kAllocateInNewSpace, 1); + __ pop(r5); + __ bind(&allocated); // After this, registers are used as follows: // r0: Newly allocated regexp. - // r1: Materialized regexp. + // r5: Materialized regexp. // r2: temp. - __ pop(r1); - __ CopyFields(r0, r1, r2.bit(), size / kPointerSize); + __ CopyFields(r0, r5, r2.bit(), size / kPointerSize); context()->Plug(r0); } @@ -1350,7 +1442,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { VisitForStackValue(key); VisitForStackValue(value); if (property->emit_store()) { - __ CallRuntime(Runtime::kSetProperty, 3); + __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes + __ push(r0); + __ CallRuntime(Runtime::kSetProperty, 4); } else { __ Drop(3); } @@ -1528,14 +1622,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } Token::Value op = expr->binary_op(); - ConstantOperand constant = ShouldInlineSmiCase(op) - ? GetConstantOperand(op, expr->target(), expr->value()) - : kNoConstants; - ASSERT(constant == kRightConstant || constant == kNoConstants); - if (constant == kNoConstants) { - __ push(r0); // Left operand goes on the stack. - VisitForAccumulatorValue(expr->value()); - } + __ push(r0); // Left operand goes on the stack. + VisitForAccumulatorValue(expr->value()); OverwriteMode mode = expr->value()->ResultOverwriteAllowed() ? OVERWRITE_RIGHT @@ -1547,8 +1635,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { op, mode, expr->target(), - expr->value(), - constant); + expr->value()); } else { EmitBinaryOp(op, mode); } @@ -1601,11 +1688,99 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, Token::Value op, OverwriteMode mode, - Expression* left, - Expression* right, - ConstantOperand constant) { - ASSERT(constant == kNoConstants); // Only handled case. - EmitBinaryOp(op, mode); + Expression* left_expr, + Expression* right_expr) { + Label done, smi_case, stub_call; + + Register scratch1 = r2; + Register scratch2 = r3; + + // Get the arguments. + Register left = r1; + Register right = r0; + __ pop(left); + + // Perform combined smi check on both operands. + __ orr(scratch1, left, Operand(right)); + STATIC_ASSERT(kSmiTag == 0); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(scratch1, &smi_case); + + __ bind(&stub_call); + TypeRecordingBinaryOpStub stub(op, mode); + EmitCallIC(stub.GetCode(), &patch_site); + __ jmp(&done); + + __ bind(&smi_case); + // Smi case. This code works the same way as the smi-smi case in the type + // recording binary operation stub, see + // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments. + switch (op) { + case Token::SAR: + __ b(&stub_call); + __ GetLeastBitsFromSmi(scratch1, right, 5); + __ mov(right, Operand(left, ASR, scratch1)); + __ bic(right, right, Operand(kSmiTagMask)); + break; + case Token::SHL: { + __ b(&stub_call); + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ mov(scratch1, Operand(scratch1, LSL, scratch2)); + __ add(scratch2, scratch1, Operand(0x40000000), SetCC); + __ b(mi, &stub_call); + __ SmiTag(right, scratch1); + break; + } + case Token::SHR: { + __ b(&stub_call); + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ mov(scratch1, Operand(scratch1, LSR, scratch2)); + __ tst(scratch1, Operand(0xc0000000)); + __ b(ne, &stub_call); + __ SmiTag(right, scratch1); + break; + } + case Token::ADD: + __ add(scratch1, left, Operand(right), SetCC); + __ b(vs, &stub_call); + __ mov(right, scratch1); + break; + case Token::SUB: + __ sub(scratch1, left, Operand(right), SetCC); + __ b(vs, &stub_call); + __ mov(right, scratch1); + break; + case Token::MUL: { + __ SmiUntag(ip, right); + __ smull(scratch1, scratch2, left, ip); + __ mov(ip, Operand(scratch1, ASR, 31)); + __ cmp(ip, Operand(scratch2)); + __ b(ne, &stub_call); + __ tst(scratch1, Operand(scratch1)); + __ mov(right, Operand(scratch1), LeaveCC, ne); + __ b(ne, &done); + __ add(scratch2, right, Operand(left), SetCC); + __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); + __ b(mi, &stub_call); + break; + } + case Token::BIT_OR: + __ orr(right, left, Operand(right)); + break; + case Token::BIT_AND: + __ and_(right, left, Operand(right)); + break; + case Token::BIT_XOR: + __ eor(right, left, Operand(right)); + break; + default: + UNREACHABLE(); + } + + __ bind(&done); + context()->Plug(r0); } @@ -1650,18 +1825,32 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { __ mov(r1, r0); __ pop(r0); // Restore value. __ mov(r2, Operand(prop->key()->AsLiteral()->handle())); - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); break; } case KEYED_PROPERTY: { __ push(r0); // Preserve value. - VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - __ mov(r1, r0); - __ pop(r2); + if (prop->is_synthetic()) { + ASSERT(prop->obj()->AsVariableProxy() != NULL); + ASSERT(prop->key()->AsLiteral() != NULL); + { AccumulatorValueContext for_object(this); + EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + } + __ mov(r2, r0); + __ mov(r1, Operand(prop->key()->AsLiteral()->handle())); + } else { + VisitForStackValue(prop->obj()); + VisitForAccumulatorValue(prop->key()); + __ mov(r1, r0); + __ pop(r2); + } __ pop(r0); // Restore value. - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); break; } @@ -1685,39 +1874,65 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // r2, and the global object in r1. __ mov(r2, Operand(var->name())); __ ldr(r1, GlobalObjectOperand()); - Handle<Code> ic(Builtins::builtin(is_strict() - ? Builtins::StoreIC_Initialize_Strict - : Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); - } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) { - // Perform the assignment for non-const variables and for initialization - // of const variables. Const assignments are simply skipped. - Label done; + } else if (op == Token::INIT_CONST) { + // Like var declarations, const declarations are hoisted to function + // scope. However, unlike var initializers, const initializers are able + // to drill a hole to that function context, even from inside a 'with' + // context. We thus bypass the normal static scope lookup. + Slot* slot = var->AsSlot(); + Label skip; + switch (slot->type()) { + case Slot::PARAMETER: + // No const parameters. + UNREACHABLE(); + break; + case Slot::LOCAL: + // Detect const reinitialization by checking for the hole value. + __ ldr(r1, MemOperand(fp, SlotOffset(slot))); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r1, ip); + __ b(ne, &skip); + __ str(result_register(), MemOperand(fp, SlotOffset(slot))); + break; + case Slot::CONTEXT: { + __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); + __ ldr(r2, ContextOperand(r1, slot->index())); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r2, ip); + __ b(ne, &skip); + __ str(r0, ContextOperand(r1, slot->index())); + int offset = Context::SlotOffset(slot->index()); + __ mov(r3, r0); // Preserve the stored value in r0. + __ RecordWrite(r1, Operand(offset), r3, r2); + break; + } + case Slot::LOOKUP: + __ push(r0); + __ mov(r0, Operand(slot->var()->name())); + __ Push(cp, r0); // Context and name. + __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); + break; + } + __ bind(&skip); + + } else if (var->mode() != Variable::CONST) { + // Perform the assignment for non-const variables. Const assignments + // are simply skipped. Slot* slot = var->AsSlot(); switch (slot->type()) { case Slot::PARAMETER: case Slot::LOCAL: - if (op == Token::INIT_CONST) { - // Detect const reinitialization by checking for the hole value. - __ ldr(r1, MemOperand(fp, SlotOffset(slot))); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r1, ip); - __ b(ne, &done); - } // Perform the assignment. __ str(result_register(), MemOperand(fp, SlotOffset(slot))); break; case Slot::CONTEXT: { MemOperand target = EmitSlotSearch(slot, r1); - if (op == Token::INIT_CONST) { - // Detect const reinitialization by checking for the hole value. - __ ldr(r2, target); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r2, ip); - __ b(ne, &done); - } // Perform the assignment and issue the write barrier. __ str(result_register(), target); // RecordWrite may destroy all its register arguments. @@ -1728,20 +1943,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, } case Slot::LOOKUP: - // Call the runtime for the assignment. The runtime will ignore - // const reinitialization. + // Call the runtime for the assignment. __ push(r0); // Value. - __ mov(r0, Operand(slot->var()->name())); - __ Push(cp, r0); // Context and name. - if (op == Token::INIT_CONST) { - // The runtime will ignore const redeclaration. - __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); - } else { - __ CallRuntime(Runtime::kStoreContextSlot, 3); - } + __ mov(r1, Operand(slot->var()->name())); + __ mov(r0, Operand(Smi::FromInt(strict_mode_flag()))); + __ Push(cp, r1, r0); // Context, name, strict mode. + __ CallRuntime(Runtime::kStoreContextSlot, 4); break; } - __ bind(&done); } } @@ -1774,7 +1983,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { __ pop(r1); } - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. @@ -1818,7 +2029,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { __ pop(r2); } - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. @@ -1933,6 +2146,29 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) { } +void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, + int arg_count) { + // Push copy of the first argument or undefined if it doesn't exist. + if (arg_count > 0) { + __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); + } else { + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + } + __ push(r1); + + // Push the receiver of the enclosing function and do runtime call. + __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize)); + __ push(r1); + // Push the strict mode flag. + __ mov(r1, Operand(Smi::FromInt(strict_mode_flag()))); + __ push(r1); + + __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP + ? Runtime::kResolvePossiblyDirectEvalNoLookup + : Runtime::kResolvePossiblyDirectEval, 4); +} + + void FullCodeGenerator::VisitCall(Call* expr) { #ifdef DEBUG // We want to verify that RecordJSReturnSite gets called on all paths @@ -1962,26 +2198,31 @@ void FullCodeGenerator::VisitCall(Call* expr) { VisitForStackValue(args->at(i)); } - // Push copy of the function - found below the arguments. - __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ push(r1); - - // Push copy of the first argument or undefined if it doesn't exist. - if (arg_count > 0) { - __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); - __ push(r1); - } else { - __ push(r2); + // If we know that eval can only be shadowed by eval-introduced + // variables we attempt to load the global eval function directly + // in generated code. If we succeed, there is no need to perform a + // context lookup in the runtime system. + Label done; + if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) { + Label slow; + EmitLoadGlobalSlotCheckExtensions(var->AsSlot(), + NOT_INSIDE_TYPEOF, + &slow); + // Push the function and resolve eval. + __ push(r0); + EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count); + __ jmp(&done); + __ bind(&slow); } - // Push the receiver of the enclosing function and do runtime call. - __ ldr(r1, - MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize)); - __ push(r1); - // Push the strict mode flag. - __ mov(r1, Operand(Smi::FromInt(strict_mode_flag()))); + // Push copy of the function (found below the arguments) and + // resolve eval. + __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ push(r1); - __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); + EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count); + if (done.is_linked()) { + __ bind(&done); + } // The runtime call returns a pair of values in r0 (function) and // r1 (receiver). Touch up the stack with the right values. @@ -2796,37 +3037,43 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) { - // Load the argument on the stack and call the runtime. + // Load the argument on the stack and call the stub. + TranscendentalCacheStub stub(TranscendentalCache::SIN, + TranscendentalCacheStub::TAGGED); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_sin, 1); + __ CallStub(&stub); context()->Plug(r0); } void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) { - // Load the argument on the stack and call the runtime. + // Load the argument on the stack and call the stub. + TranscendentalCacheStub stub(TranscendentalCache::COS, + TranscendentalCacheStub::TAGGED); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_cos, 1); + __ CallStub(&stub); context()->Plug(r0); } -void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) { - // Load the argument on the stack and call the runtime function. +void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) { + // Load the argument on the stack and call the stub. + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::TAGGED); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_sqrt, 1); + __ CallStub(&stub); context()->Plug(r0); } -void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) { +void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) { // Load the argument on the stack and call the runtime function. ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_log, 1); + __ CallRuntime(Runtime::kMath_sqrt, 1); context()->Plug(r0); } @@ -2866,7 +3113,79 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); + Label done; + Label slow_case; + Register object = r0; + Register index1 = r1; + Register index2 = r2; + Register elements = r3; + Register scratch1 = r4; + Register scratch2 = r5; + + __ ldr(object, MemOperand(sp, 2 * kPointerSize)); + // Fetch the map and check if array is in fast case. + // Check that object doesn't require security checks and + // has no indexed interceptor. + __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_OBJECT_TYPE); + __ b(lt, &slow_case); + // Map is now in scratch1. + + __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); + __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); + __ b(ne, &slow_case); + + // Check the object's elements are in fast case and writable. + __ ldr(elements, FieldMemOperand(object, JSObject::kElementsOffset)); + __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(scratch1, ip); + __ b(ne, &slow_case); + + // Check that both indices are smis. + __ ldr(index1, MemOperand(sp, 1 * kPointerSize)); + __ ldr(index2, MemOperand(sp, 0)); + __ JumpIfNotBothSmi(index1, index2, &slow_case); + + // Check that both indices are valid. + __ ldr(scratch1, FieldMemOperand(object, JSArray::kLengthOffset)); + __ cmp(scratch1, index1); + __ cmp(scratch1, index2, hi); + __ b(ls, &slow_case); + + // Bring the address of the elements into index1 and index2. + __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(index1, + scratch1, + Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(index2, + scratch1, + Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize)); + + // Swap elements. + __ ldr(scratch1, MemOperand(index1, 0)); + __ ldr(scratch2, MemOperand(index2, 0)); + __ str(scratch1, MemOperand(index2, 0)); + __ str(scratch2, MemOperand(index1, 0)); + + Label new_space; + __ InNewSpace(elements, scratch1, eq, &new_space); + // Possible optimization: do a check that both values are Smis + // (or them and test against Smi mask.) + + __ mov(scratch1, elements); + __ RecordWriteHelper(elements, index1, scratch2); + __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements. + + __ bind(&new_space); + // We are done. Drop elements from the stack, and return undefined. + __ Drop(3); + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + __ jmp(&done); + + __ bind(&slow_case); __ CallRuntime(Runtime::kSwapElements, 3); + + __ bind(&done); context()->Plug(r0); } @@ -2985,16 +3304,248 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); VisitForAccumulatorValue(args->at(0)); + + if (FLAG_debug_code) { + __ AbortIfNotString(r0); + } + __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset)); __ IndexFromHash(r0, r0); + context()->Plug(r0); } void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { + Label bailout, done, one_char_separator, long_separator, + non_trivial_array, not_size_one_array, loop, + empty_separator_loop, one_char_separator_loop, + one_char_separator_loop_entry, long_separator_loop; + + ASSERT(args->length() == 2); + VisitForStackValue(args->at(1)); + VisitForAccumulatorValue(args->at(0)); + + // All aliases of the same register have disjoint lifetimes. + Register array = r0; + Register elements = no_reg; // Will be r0. + Register result = no_reg; // Will be r0. + Register separator = r1; + Register array_length = r2; + Register result_pos = no_reg; // Will be r2 + Register string_length = r3; + Register string = r4; + Register element = r5; + Register elements_end = r6; + Register scratch1 = r7; + Register scratch2 = r9; + + // Separator operand is on the stack. + __ pop(separator); + + // Check that the array is a JSArray. + __ JumpIfSmi(array, &bailout); + __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE); + __ b(ne, &bailout); + + // Check that the array has fast elements. + __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); + __ tst(scratch2, Operand(1 << Map::kHasFastElements)); + __ b(eq, &bailout); + + // If the array has length zero, return the empty string. + __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset)); + __ SmiUntag(array_length, SetCC); + __ b(ne, &non_trivial_array); + __ LoadRoot(r0, Heap::kEmptyStringRootIndex); + __ b(&done); + + __ bind(&non_trivial_array); + + // Get the FixedArray containing array's elements. + elements = array; + __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset)); + array = no_reg; // End of array's live range. + + // Check that all array elements are sequential ASCII strings, and + // accumulate the sum of their lengths, as a smi-encoded value. + __ mov(string_length, Operand(0)); + __ add(element, + elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2)); + // Loop condition: while (element < elements_end). + // Live values in registers: + // elements: Fixed array of strings. + // array_length: Length of the fixed array of strings (not smi) + // separator: Separator string + // string_length: Accumulated sum of string lengths (smi). + // element: Current array element. + // elements_end: Array end. + if (FLAG_debug_code) { + __ cmp(array_length, Operand(0)); + __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin"); + } + __ bind(&loop); + __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); + __ JumpIfSmi(string, &bailout); + __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); + __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); + __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset)); + __ add(string_length, string_length, Operand(scratch1)); + __ b(vs, &bailout); + __ cmp(element, elements_end); + __ b(lt, &loop); + + // If array_length is 1, return elements[0], a string. + __ cmp(array_length, Operand(1)); + __ b(ne, ¬_size_one_array); + __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize)); + __ b(&done); + + __ bind(¬_size_one_array); + + // Live values in registers: + // separator: Separator string + // array_length: Length of the array. + // string_length: Sum of string lengths (smi). + // elements: FixedArray of strings. + + // Check that the separator is a flat ASCII string. + __ JumpIfSmi(separator, &bailout); + __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset)); + __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); + + // Add (separator length times array_length) - separator length to the + // string_length to get the length of the result string. array_length is not + // smi but the other values are, so the result is a smi + __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); + __ sub(string_length, string_length, Operand(scratch1)); + __ smull(scratch2, ip, array_length, scratch1); + // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are + // zero. + __ cmp(ip, Operand(0)); + __ b(ne, &bailout); + __ tst(scratch2, Operand(0x80000000)); + __ b(ne, &bailout); + __ add(string_length, string_length, Operand(scratch2)); + __ b(vs, &bailout); + __ SmiUntag(string_length); + + // Get first element in the array to free up the elements register to be used + // for the result. + __ add(element, + elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + result = elements; // End of live range for elements. + elements = no_reg; + // Live values in registers: + // element: First array element + // separator: Separator string + // string_length: Length of result string (not smi) + // array_length: Length of the array. + __ AllocateAsciiString(result, + string_length, + scratch1, + scratch2, + elements_end, + &bailout); + // Prepare for looping. Set up elements_end to end of the array. Set + // result_pos to the position of the result where to write the first + // character. + __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2)); + result_pos = array_length; // End of live range for array_length. + array_length = no_reg; + __ add(result_pos, + result, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + + // Check the length of the separator. + __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); + __ cmp(scratch1, Operand(Smi::FromInt(1))); + __ b(eq, &one_char_separator); + __ b(gt, &long_separator); + + // Empty separator case + __ bind(&empty_separator_loop); + // Live values in registers: + // result_pos: the position to which we are currently copying characters. + // element: Current array element. + // elements_end: Array end. + + // Copy next array element to the result. + __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); + __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); + __ SmiUntag(string_length); + __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ CopyBytes(string, result_pos, string_length, scratch1); + __ cmp(element, elements_end); + __ b(lt, &empty_separator_loop); // End while (element < elements_end). + ASSERT(result.is(r0)); + __ b(&done); + + // One-character separator case + __ bind(&one_char_separator); + // Replace separator with its ascii character value. + __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize)); + // Jump into the loop after the code that copies the separator, so the first + // element is not preceded by a separator + __ jmp(&one_char_separator_loop_entry); + + __ bind(&one_char_separator_loop); + // Live values in registers: + // result_pos: the position to which we are currently copying characters. + // element: Current array element. + // elements_end: Array end. + // separator: Single separator ascii char (in lower byte). + + // Copy the separator character to the result. + __ strb(separator, MemOperand(result_pos, 1, PostIndex)); + + // Copy next array element to the result. + __ bind(&one_char_separator_loop_entry); + __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); + __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); + __ SmiUntag(string_length); + __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ CopyBytes(string, result_pos, string_length, scratch1); + __ cmp(element, elements_end); + __ b(lt, &one_char_separator_loop); // End while (element < elements_end). + ASSERT(result.is(r0)); + __ b(&done); + + // Long separator case (separator is more than one character). Entry is at the + // label long_separator below. + __ bind(&long_separator_loop); + // Live values in registers: + // result_pos: the position to which we are currently copying characters. + // element: Current array element. + // elements_end: Array end. + // separator: Separator string. + + // Copy the separator to the result. + __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset)); + __ SmiUntag(string_length); + __ add(string, + separator, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ CopyBytes(string, result_pos, string_length, scratch1); + + __ bind(&long_separator); + __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); + __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); + __ SmiUntag(string_length); + __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ CopyBytes(string, result_pos, string_length, scratch1); + __ cmp(element, elements_end); + __ b(lt, &long_separator_loop); // End while (element < elements_end). + ASSERT(result.is(r0)); + __ b(&done); + + __ bind(&bailout); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + __ bind(&done); context()->Plug(r0); - return; } @@ -3043,19 +3594,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { Comment cmnt(masm_, "[ UnaryOperation (DELETE)"); Property* prop = expr->expression()->AsProperty(); Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); - if (prop == NULL && var == NULL) { - // Result of deleting non-property, non-variable reference is true. - // The subexpression may have side effects. - VisitForEffect(expr->expression()); - context()->Plug(true); - } else if (var != NULL && - !var->is_global() && - var->AsSlot() != NULL && - var->AsSlot()->type() != Slot::LOOKUP) { - // Result of deleting non-global, non-dynamic variables is false. - // The subexpression does not have side effects. - context()->Plug(false); - } else if (prop != NULL) { + + if (prop != NULL) { if (prop->is_synthetic()) { // Result of deleting parameters is false, even when they rewrite // to accesses on the arguments object. @@ -3063,23 +3603,41 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { } else { VisitForStackValue(prop->obj()); VisitForStackValue(prop->key()); + __ mov(r1, Operand(Smi::FromInt(strict_mode_flag()))); + __ push(r1); __ InvokeBuiltin(Builtins::DELETE, CALL_JS); context()->Plug(r0); } - } else if (var->is_global()) { - __ ldr(r1, GlobalObjectOperand()); - __ mov(r0, Operand(var->name())); - __ Push(r1, r0); - __ InvokeBuiltin(Builtins::DELETE, CALL_JS); - context()->Plug(r0); + } else if (var != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this()); + if (var->is_global()) { + __ ldr(r2, GlobalObjectOperand()); + __ mov(r1, Operand(var->name())); + __ mov(r0, Operand(Smi::FromInt(kNonStrictMode))); + __ Push(r2, r1, r0); + __ InvokeBuiltin(Builtins::DELETE, CALL_JS); + context()->Plug(r0); + } else if (var->AsSlot() != NULL && + var->AsSlot()->type() != Slot::LOOKUP) { + // Result of deleting non-global, non-dynamic variables is false. + // The subexpression does not have side effects. + context()->Plug(false); + } else { + // Non-global variable. Call the runtime to try to delete from the + // context where the variable was introduced. + __ push(context_register()); + __ mov(r2, Operand(var->name())); + __ push(r2); + __ CallRuntime(Runtime::kDeleteContextSlot, 2); + context()->Plug(r0); + } } else { - // Non-global variable. Call the runtime to try to delete from the - // context where the variable was introduced. - __ push(context_register()); - __ mov(r2, Operand(var->name())); - __ push(r2); - __ CallRuntime(Runtime::kDeleteContextSlot, 2); - context()->Plug(r0); + // Result of deleting non-property, non-variable reference is true. + // The subexpression may have side effects. + VisitForEffect(expr->expression()); + context()->Plug(true); } break; } @@ -3093,17 +3651,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::NOT: { Comment cmnt(masm_, "[ UnaryOperation (NOT)"); - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - - // Notice that the labels are swapped. - context()->PrepareTest(&materialize_true, &materialize_false, - &if_false, &if_true, &fall_through); - if (context()->IsTest()) ForwardBailoutToChild(expr); - VisitForControl(expr->expression(), if_true, if_false, fall_through); - context()->Plug(if_false, if_true); // Labels swapped. + if (context()->IsEffect()) { + // Unary NOT has no side effects so it's only necessary to visit the + // subexpression. Match the optimizing compiler by not branching. + VisitForEffect(expr->expression()); + } else { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + + // Notice that the labels are swapped. + context()->PrepareTest(&materialize_true, &materialize_false, + &if_false, &if_true, &fall_through); + if (context()->IsTest()) ForwardBailoutToChild(expr); + VisitForControl(expr->expression(), if_true, if_false, fall_through); + context()->Plug(if_false, if_true); // Labels swapped. + } break; } @@ -3135,9 +3699,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { bool can_overwrite = expr->expression()->ResultOverwriteAllowed(); UnaryOverwriteMode overwrite = can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - GenericUnaryOpStub stub(Token::SUB, - overwrite, - NO_UNARY_FLAGS); + GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS); // GenericUnaryOpStub expects the argument to be in the // accumulator register r0. VisitForAccumulatorValue(expr->expression()); @@ -3270,13 +3832,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Inline smi case if we are in a loop. Label stub_call, done; + JumpPatchSite patch_site(masm_); + int count_value = expr->op() == Token::INC ? 1 : -1; if (ShouldInlineSmiCase(expr->op())) { __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC); __ b(vs, &stub_call); // We could eliminate this smi check if we split the code at // the first smi check before calling ToNumber. - __ JumpIfSmi(r0, &done); + patch_site.EmitJumpIfSmi(r0, &done); + __ bind(&stub_call); // Call stub. Undo operation first. __ sub(r0, r0, Operand(Smi::FromInt(count_value))); @@ -3286,8 +3851,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Record position before stub call. SetSourcePosition(expr->position()); - GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0); - __ CallStub(&stub); + TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE); + EmitCallIC(stub.GetCode(), &patch_site); __ bind(&done); // Store the value returned in r0. @@ -3315,7 +3880,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case NAMED_PROPERTY: { __ mov(r2, Operand(prop->key()->AsLiteral()->handle())); __ pop(r1); - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -3330,7 +3897,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case KEYED_PROPERTY: { __ pop(r1); // Key. __ pop(r2); // Receiver. - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -3408,71 +3977,52 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op, PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); if (check->Equals(Heap::number_symbol())) { - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, if_true); + __ JumpIfSmi(r0, if_true); __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(r0, ip); Split(eq, if_true, if_false, fall_through); } else if (check->Equals(Heap::string_symbol())) { - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, if_false); + __ JumpIfSmi(r0, if_false); // Check for undetectable objects => false. - __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE); + __ b(ge, if_false); __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); - __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); - __ cmp(r1, Operand(1 << Map::kIsUndetectable)); - __ b(eq, if_false); - __ ldrb(r1, FieldMemOperand(r0, Map::kInstanceTypeOffset)); - __ cmp(r1, Operand(FIRST_NONSTRING_TYPE)); - Split(lt, if_true, if_false, fall_through); + __ tst(r1, Operand(1 << Map::kIsUndetectable)); + Split(eq, if_true, if_false, fall_through); } else if (check->Equals(Heap::boolean_symbol())) { - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(r0, ip); + __ CompareRoot(r0, Heap::kTrueValueRootIndex); __ b(eq, if_true); - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(r0, ip); + __ CompareRoot(r0, Heap::kFalseValueRootIndex); Split(eq, if_true, if_false, fall_through); } else if (check->Equals(Heap::undefined_symbol())) { - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r0, ip); + __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); __ b(eq, if_true); - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, if_false); + __ JumpIfSmi(r0, if_false); // Check for undetectable objects => true. __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); - __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); - __ cmp(r1, Operand(1 << Map::kIsUndetectable)); - Split(eq, if_true, if_false, fall_through); + __ tst(r1, Operand(1 << Map::kIsUndetectable)); + Split(ne, if_true, if_false, fall_through); + } else if (check->Equals(Heap::function_symbol())) { - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, if_false); - __ CompareObjectType(r0, r1, r0, JS_FUNCTION_TYPE); - __ b(eq, if_true); - // Regular expressions => 'function' (they are callable). - __ CompareInstanceType(r1, r0, JS_REGEXP_TYPE); - Split(eq, if_true, if_false, fall_through); + __ JumpIfSmi(r0, if_false); + __ CompareObjectType(r0, r1, r0, FIRST_FUNCTION_CLASS_TYPE); + Split(ge, if_true, if_false, fall_through); + } else if (check->Equals(Heap::object_symbol())) { - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, if_false); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(r0, ip); + __ JumpIfSmi(r0, if_false); + __ CompareRoot(r0, Heap::kNullValueRootIndex); __ b(eq, if_true); - // Regular expressions => 'function', not 'object'. - __ CompareObjectType(r0, r1, r0, JS_REGEXP_TYPE); - __ b(eq, if_false); - // Check for undetectable objects => false. - __ ldrb(r0, FieldMemOperand(r1, Map::kBitFieldOffset)); - __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); - __ cmp(r0, Operand(1 << Map::kIsUndetectable)); - __ b(eq, if_false); // Check for JS objects => true. - __ ldrb(r0, FieldMemOperand(r1, Map::kInstanceTypeOffset)); - __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE)); - __ b(lt, if_false); - __ cmp(r0, Operand(LAST_JS_OBJECT_TYPE)); - Split(le, if_true, if_false, fall_through); + __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE); + __ b(lo, if_false); + __ CompareInstanceType(r0, r1, FIRST_FUNCTION_CLASS_TYPE); + __ b(hs, if_false); + // Check for undetectable objects => false. + __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); + __ tst(r1, Operand(1 << Map::kIsUndetectable)); + Split(eq, if_true, if_false, fall_through); } else { if (if_false != fall_through) __ jmp(if_false); } @@ -3644,11 +4194,43 @@ Register FullCodeGenerator::context_register() { void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) { ASSERT(mode == RelocInfo::CODE_TARGET || mode == RelocInfo::CODE_TARGET_CONTEXT); + switch (ic->kind()) { + case Code::LOAD_IC: + __ IncrementCounter(&Counters::named_load_full, 1, r1, r2); + break; + case Code::KEYED_LOAD_IC: + __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2); + break; + case Code::STORE_IC: + __ IncrementCounter(&Counters::named_store_full, 1, r1, r2); + break; + case Code::KEYED_STORE_IC: + __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2); + default: + break; + } + __ Call(ic, mode); } void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) { + switch (ic->kind()) { + case Code::LOAD_IC: + __ IncrementCounter(&Counters::named_load_full, 1, r1, r2); + break; + case Code::KEYED_LOAD_IC: + __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2); + break; + case Code::STORE_IC: + __ IncrementCounter(&Counters::named_store_full, 1, r1, r2); + break; + case Code::KEYED_STORE_IC: + __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2); + default: + break; + } + __ Call(ic, RelocInfo::CODE_TARGET); if (patch_site != NULL && patch_site->is_bound()) { patch_site->EmitPatchInfo(); diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc index 8c76458e..0fc68187 100644 --- a/src/arm/ic-arm.cc +++ b/src/arm/ic-arm.cc @@ -115,6 +115,9 @@ static void GenerateStringDictionaryProbes(MacroAssembler* masm, Register name, Register scratch1, Register scratch2) { + // Assert that name contains a string. + if (FLAG_debug_code) __ AbortIfNotString(name); + // Compute the capacity mask. const int kCapacityOffset = StringDictionary::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize; @@ -843,7 +846,14 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { // -- lr : return address // ----------------------------------- + // Check if the name is a string. + Label miss; + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &miss); + __ IsObjectJSStringType(r2, r0, &miss); + GenerateCallNormal(masm, argc); + __ bind(&miss); GenerateMiss(masm, argc); } @@ -1390,7 +1400,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { } -void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { +void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ---------- S t a t e -------------- // -- r0 : value // -- r1 : key @@ -1401,11 +1412,16 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { // Push receiver, key and value for runtime call. __ Push(r2, r1, r0); - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes + __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode. + __ Push(r1, r0); + + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); } -void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { +void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ---------- S t a t e -------------- // -- r0 : value // -- r1 : key @@ -1460,29 +1476,25 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // r0: value. // r1: key. // r2: receiver. - GenerateRuntimeSetProperty(masm); + GenerateRuntimeSetProperty(masm, strict_mode); // Check whether the elements is a pixel array. // r4: elements map. __ bind(&check_pixel_array); - __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); - __ cmp(r4, ip); - __ b(ne, &slow); - // Check that the value is a smi. If a conversion is needed call into the - // runtime to convert and clamp. - __ JumpIfNotSmi(value, &slow); - __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key. - __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset)); - __ cmp(r4, Operand(ip)); - __ b(hs, &slow); - __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. - __ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255]. - - // Get the pointer to the external array. This clobbers elements. - __ ldr(elements, - FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); - __ strb(r5, MemOperand(elements, r4)); // Elements is now external array. - __ Ret(); + GenerateFastPixelArrayStore(masm, + r2, + r1, + r0, + elements, + r4, + r5, + r6, + false, + false, + NULL, + &slow, + &slow, + &slow); // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one @@ -1534,7 +1546,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { void StoreIC::GenerateMegamorphic(MacroAssembler* masm, - Code::ExtraICState extra_ic_state) { + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : receiver @@ -1546,7 +1558,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm, Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, NOT_IN_LOOP, MONOMORPHIC, - extra_ic_state); + strict_mode); StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); // Cache miss: Jump to runtime. @@ -1640,7 +1652,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { } -void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { +void StoreIC::GenerateGlobalProxy(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : receiver @@ -1650,8 +1663,12 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { __ Push(r1, r2, r0); + __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes + __ mov(r0, Operand(Smi::FromInt(strict_mode))); + __ Push(r1, r0); + // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); } diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc index 82de5d3e..e79465cb 100644 --- a/src/arm/lithium-arm.cc +++ b/src/arm/lithium-arm.cc @@ -346,7 +346,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { } -void LStoreNamed::PrintDataTo(StringStream* stream) { +void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("."); stream->Add(*String::cast(*name())->ToCString()); @@ -355,7 +355,16 @@ void LStoreNamed::PrintDataTo(StringStream* stream) { } -void LStoreKeyed::PrintDataTo(StringStream* stream) { +void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); @@ -364,8 +373,18 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) { } -LChunk::LChunk(HGraph* graph) +void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + +LChunk::LChunk(CompilationInfo* info, HGraph* graph) : spill_slot_count_(0), + info_(info), graph_(graph), instructions_(32), pointer_maps_(8), @@ -456,7 +475,7 @@ int LChunk::GetParameterStackSlot(int index) const { // shift all parameter indexes down by the number of parameters, and // make sure they end up negative so they are distinguishable from // spill slots. - int result = index - graph()->info()->scope()->num_parameters() - 1; + int result = index - info()->scope()->num_parameters() - 1; ASSERT(result < 0); return result; } @@ -464,7 +483,7 @@ int LChunk::GetParameterStackSlot(int index) const { // A parameter relative to ebp in the arguments stub. int LChunk::ParameterAt(int index) { ASSERT(-1 <= index); // -1 is the receiver. - return (1 + graph()->info()->scope()->num_parameters() - index) * + return (1 + info()->scope()->num_parameters() - index) * kPointerSize; } @@ -503,7 +522,7 @@ Representation LChunk::LookupLiteralRepresentation( LChunk* LChunkBuilder::Build() { ASSERT(is_unused()); - chunk_ = new LChunk(graph()); + chunk_ = new LChunk(info(), graph()); HPhase phase("Building chunk", chunk_); status_ = BUILDING; const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); @@ -520,8 +539,8 @@ LChunk* LChunkBuilder::Build() { void LChunkBuilder::Abort(const char* format, ...) { if (FLAG_trace_bailout) { - SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); - PrintF("Aborting LChunk building in @\"%s\": ", *debug_name); + SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); + PrintF("Aborting LChunk building in @\"%s\": ", *name); va_list arguments; va_start(arguments, format); OS::VPrint(format, arguments); @@ -855,6 +874,7 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, ASSERT(instr->representation().IsDouble()); ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble()); + ASSERT(op != Token::MOD); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); LArithmeticD* result = new LArithmeticD(op, left, right); @@ -1136,8 +1156,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( HInstanceOfKnownGlobal* instr) { LInstanceOfKnownGlobal* result = new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4)); - MarkAsSaveDoubles(result); - return AssignEnvironment(AssignPointerMap(DefineFixed(result, r0))); + return MarkAsCall(DefineFixed(result, r0), instr); } @@ -1193,34 +1212,30 @@ LInstruction* LChunkBuilder::DoCallConstantFunction( LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { BuiltinFunctionId op = instr->op(); - LOperand* input = UseRegisterAtStart(instr->value()); - LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL; - LUnaryMathOperation* result = new LUnaryMathOperation(input, temp); - switch (op) { - case kMathAbs: - return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); - case kMathFloor: - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - case kMathSqrt: - return DefineSameAsFirst(result); - case kMathRound: - Abort("MathRound LUnaryMathOperation not implemented"); - return NULL; - case kMathPowHalf: - Abort("MathPowHalf LUnaryMathOperation not implemented"); - return NULL; - case kMathLog: - Abort("MathLog LUnaryMathOperation not implemented"); - return NULL; - case kMathCos: - Abort("MathCos LUnaryMathOperation not implemented"); - return NULL; - case kMathSin: - Abort("MathSin LUnaryMathOperation not implemented"); - return NULL; - default: - UNREACHABLE(); - return NULL; + if (op == kMathLog || op == kMathSin || op == kMathCos) { + LOperand* input = UseFixedDouble(instr->value(), d2); + LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL); + return MarkAsCall(DefineFixedDouble(result, d2), instr); + } else { + LOperand* input = UseRegisterAtStart(instr->value()); + LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL; + LUnaryMathOperation* result = new LUnaryMathOperation(input, temp); + switch (op) { + case kMathAbs: + return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); + case kMathFloor: + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + case kMathSqrt: + return DefineSameAsFirst(result); + case kMathRound: + return AssignEnvironment(DefineAsRegister(result)); + case kMathPowHalf: + Abort("MathPowHalf LUnaryMathOperation not implemented"); + return NULL; + default: + UNREACHABLE(); + return NULL; + } } } @@ -1418,8 +1433,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { LInstruction* LChunkBuilder::DoPower(HPower* instr) { - Abort("LPower instruction not implemented on ARM"); - return NULL; + ASSERT(instr->representation().IsDouble()); + // We call a C function for double power. It can't trigger a GC. + // We need to use fixed result register for the call. + Representation exponent_type = instr->right()->representation(); + ASSERT(instr->left()->representation().IsDouble()); + LOperand* left = UseFixedDouble(instr->left(), d1); + LOperand* right = exponent_type.IsDouble() ? + UseFixedDouble(instr->right(), d2) : + UseFixed(instr->right(), r0); + LPower* result = new LPower(left, right); + return MarkAsCall(DefineFixedDouble(result, d3), + instr, + CAN_DEOPTIMIZE_EAGERLY); } @@ -1491,6 +1517,15 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) { } +LInstruction* LChunkBuilder::DoGetCachedArrayIndex( + HGetCachedArrayIndex* instr) { + ASSERT(instr->value()->representation().IsTagged()); + LOperand* value = UseRegisterAtStart(instr->value()); + + return DefineAsRegister(new LGetCachedArrayIndex(value)); +} + + LInstruction* LChunkBuilder::DoHasCachedArrayIndex( HHasCachedArrayIndex* instr) { ASSERT(instr->value()->representation().IsTagged()); @@ -1700,11 +1735,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context = UseTempRegister(instr->context()); + LOperand* context; LOperand* value; if (instr->NeedsWriteBarrier()) { + context = UseTempRegister(instr->context()); value = UseTempRegister(instr->value()); } else { + context = UseRegister(instr->context()); value = UseRegister(instr->value()); } return new LStoreContextSlot(context, value); @@ -1797,6 +1834,13 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( } +LInstruction* LChunkBuilder::DoStorePixelArrayElement( + HStorePixelArrayElement* instr) { + Abort("DoStorePixelArrayElement not implemented"); + return NULL; +} + + LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* obj = UseFixed(instr->object(), r2); LOperand* key = UseFixed(instr->key(), r1); @@ -1902,8 +1946,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object (we bail out in all other - // cases). + // There are no real uses of the arguments object. + // arguments.length and element access are supported directly on + // stack arguments, and any real arguments object use causes a bailout. + // So this value is never used. return NULL; } diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h index 8d2573d9..9cbcc3b9 100644 --- a/src/arm/lithium-arm.h +++ b/src/arm/lithium-arm.h @@ -42,8 +42,6 @@ class LCodeGen; #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ V(ControlInstruction) \ V(Call) \ - V(StoreKeyed) \ - V(StoreNamed) \ LITHIUM_CONCRETE_INSTRUCTION_LIST(V) @@ -94,6 +92,7 @@ class LCodeGen; V(FixedArrayLength) \ V(FunctionLiteral) \ V(Gap) \ + V(GetCachedArrayIndex) \ V(GlobalObject) \ V(GlobalReceiver) \ V(Goto) \ @@ -134,6 +133,7 @@ class LCodeGen; V(OuterContext) \ V(Parameter) \ V(PixelArrayLength) \ + V(Power) \ V(PushArgument) \ V(RegExpLiteral) \ V(Return) \ @@ -728,6 +728,17 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> { }; +class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { + public: + explicit LGetCachedArrayIndex(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index") + DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex) +}; + + class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { public: explicit LHasCachedArrayIndex(LOperand* value) { @@ -1046,6 +1057,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> { }; +class LPower: public LTemplateInstruction<1, 2, 0> { + public: + LPower(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + DECLARE_CONCRETE_INSTRUCTION(Power, "power") + DECLARE_HYDROGEN_ACCESSOR(Power) +}; + + class LArithmeticD: public LTemplateInstruction<1, 2, 0> { public: LArithmeticD(Token::Value op, LOperand* left, LOperand* right) @@ -1498,32 +1521,22 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { }; -class LStoreNamed: public LTemplateInstruction<0, 2, 0> { +class LStoreNamedField: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamed(LOperand* obj, LOperand* val) { + LStoreNamedField(LOperand* obj, LOperand* val) { inputs_[0] = obj; inputs_[1] = val; } - DECLARE_INSTRUCTION(StoreNamed) - DECLARE_HYDROGEN_ACCESSOR(StoreNamed) + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) virtual void PrintDataTo(StringStream* stream); LOperand* object() { return inputs_[0]; } LOperand* value() { return inputs_[1]; } - Handle<Object> name() const { return hydrogen()->name(); } -}; - - -class LStoreNamedField: public LStoreNamed { - public: - LStoreNamedField(LOperand* obj, LOperand* val) - : LStoreNamed(obj, val) { } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) + Handle<Object> name() const { return hydrogen()->name(); } bool is_in_object() { return hydrogen()->is_in_object(); } int offset() { return hydrogen()->offset(); } bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } @@ -1531,25 +1544,35 @@ class LStoreNamedField: public LStoreNamed { }; -class LStoreNamedGeneric: public LStoreNamed { +class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamedGeneric(LOperand* obj, LOperand* val) - : LStoreNamed(obj, val) { } + LStoreNamedGeneric(LOperand* obj, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = val; + } DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) + + virtual void PrintDataTo(StringStream* stream); + + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + Handle<Object> name() const { return hydrogen()->name(); } }; -class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { + LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) { inputs_[0] = obj; inputs_[1] = key; inputs_[2] = val; } - DECLARE_INSTRUCTION(StoreKeyed) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store-keyed-fast-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) virtual void PrintDataTo(StringStream* stream); @@ -1559,23 +1582,21 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedFastElement: public LStoreKeyed { +class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) {} - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) -}; + LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = key; + inputs_[2] = val; + } + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") -class LStoreKeyedGeneric: public LStoreKeyed { - public: - LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) { } + virtual void PrintDataTo(StringStream* stream); - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + LOperand* object() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } }; @@ -1808,7 +1829,7 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> { class LChunkBuilder; class LChunk: public ZoneObject { public: - explicit LChunk(HGraph* graph); + explicit LChunk(CompilationInfo* info, HGraph* graph); void AddInstruction(LInstruction* instruction, HBasicBlock* block); LConstantOperand* DefineConstantOperand(HConstant* constant); @@ -1821,6 +1842,7 @@ class LChunk: public ZoneObject { int ParameterAt(int index); int GetParameterStackSlot(int index) const; int spill_slot_count() const { return spill_slot_count_; } + CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } const ZoneList<LInstruction*>* instructions() const { return &instructions_; } void AddGapMove(int index, LOperand* from, LOperand* to); @@ -1857,6 +1879,7 @@ class LChunk: public ZoneObject { private: int spill_slot_count_; + CompilationInfo* info_; HGraph* const graph_; ZoneList<LInstruction*> instructions_; ZoneList<LPointerMap*> pointer_maps_; @@ -1866,8 +1889,9 @@ class LChunk: public ZoneObject { class LChunkBuilder BASE_EMBEDDED { public: - LChunkBuilder(HGraph* graph, LAllocator* allocator) + LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) : chunk_(NULL), + info_(info), graph_(graph), status_(UNUSED), current_instruction_(NULL), @@ -1896,6 +1920,7 @@ class LChunkBuilder BASE_EMBEDDED { }; LChunk* chunk() const { return chunk_; } + CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } bool is_unused() const { return status_ == UNUSED; } @@ -2002,6 +2027,7 @@ class LChunkBuilder BASE_EMBEDDED { HArithmeticBinaryOperation* instr); LChunk* chunk_; + CompilationInfo* info_; HGraph* const graph_; Status status_; HInstruction* current_instruction_; diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 057ac241..afe90159 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -26,6 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "arm/lithium-codegen-arm.h" +#include "arm/lithium-gap-resolver-arm.h" #include "code-stubs.h" #include "stub-cache.h" @@ -54,157 +55,6 @@ class SafepointGenerator : public PostCallGenerator { }; -class LGapNode: public ZoneObject { - public: - explicit LGapNode(LOperand* operand) - : operand_(operand), resolved_(false), visited_id_(-1) { } - - LOperand* operand() const { return operand_; } - bool IsResolved() const { return !IsAssigned() || resolved_; } - void MarkResolved() { - ASSERT(!IsResolved()); - resolved_ = true; - } - int visited_id() const { return visited_id_; } - void set_visited_id(int id) { - ASSERT(id > visited_id_); - visited_id_ = id; - } - - bool IsAssigned() const { return assigned_from_.is_set(); } - LGapNode* assigned_from() const { return assigned_from_.get(); } - void set_assigned_from(LGapNode* n) { assigned_from_.set(n); } - - private: - LOperand* operand_; - SetOncePointer<LGapNode> assigned_from_; - bool resolved_; - int visited_id_; -}; - - -LGapResolver::LGapResolver() - : nodes_(32), - identified_cycles_(4), - result_(16), - next_visited_id_(0) { -} - - -const ZoneList<LMoveOperands>* LGapResolver::Resolve( - const ZoneList<LMoveOperands>* moves, - LOperand* marker_operand) { - nodes_.Rewind(0); - identified_cycles_.Rewind(0); - result_.Rewind(0); - next_visited_id_ = 0; - - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) RegisterMove(move); - } - - for (int i = 0; i < identified_cycles_.length(); ++i) { - ResolveCycle(identified_cycles_[i], marker_operand); - } - - int unresolved_nodes; - do { - unresolved_nodes = 0; - for (int j = 0; j < nodes_.length(); j++) { - LGapNode* node = nodes_[j]; - if (!node->IsResolved() && node->assigned_from()->IsResolved()) { - AddResultMove(node->assigned_from(), node); - node->MarkResolved(); - } - if (!node->IsResolved()) ++unresolved_nodes; - } - } while (unresolved_nodes > 0); - return &result_; -} - - -void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) { - AddResultMove(from->operand(), to->operand()); -} - - -void LGapResolver::AddResultMove(LOperand* from, LOperand* to) { - result_.Add(LMoveOperands(from, to)); -} - - -void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) { - ZoneList<LOperand*> cycle_operands(8); - cycle_operands.Add(marker_operand); - LGapNode* cur = start; - do { - cur->MarkResolved(); - cycle_operands.Add(cur->operand()); - cur = cur->assigned_from(); - } while (cur != start); - cycle_operands.Add(marker_operand); - - for (int i = cycle_operands.length() - 1; i > 0; --i) { - LOperand* from = cycle_operands[i]; - LOperand* to = cycle_operands[i - 1]; - AddResultMove(from, to); - } -} - - -bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) { - ASSERT(a != b); - LGapNode* cur = a; - while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) { - cur->set_visited_id(visited_id); - cur = cur->assigned_from(); - } - - return cur == b; -} - - -bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) { - ASSERT(a != b); - return CanReach(a, b, next_visited_id_++); -} - - -void LGapResolver::RegisterMove(LMoveOperands move) { - if (move.source()->IsConstantOperand()) { - // Constant moves should be last in the machine code. Therefore add them - // first to the result set. - AddResultMove(move.source(), move.destination()); - } else { - LGapNode* from = LookupNode(move.source()); - LGapNode* to = LookupNode(move.destination()); - if (to->IsAssigned() && to->assigned_from() == from) { - move.Eliminate(); - return; - } - ASSERT(!to->IsAssigned()); - if (CanReach(from, to)) { - // This introduces a cycle. Save. - identified_cycles_.Add(from); - } - to->set_assigned_from(from); - } -} - - -LGapNode* LGapResolver::LookupNode(LOperand* operand) { - for (int i = 0; i < nodes_.length(); ++i) { - if (nodes_[i]->operand()->Equals(operand)) return nodes_[i]; - } - - // No node found => create a new one. - LGapNode* result = new LGapNode(operand); - nodes_.Add(result); - return result; -} - - #define __ masm()-> bool LCodeGen::GenerateCode() { @@ -230,8 +80,8 @@ void LCodeGen::FinishCode(Handle<Code> code) { void LCodeGen::Abort(const char* format, ...) { if (FLAG_trace_bailout) { - SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); - PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name); + SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); + PrintF("Aborting LCodeGen in @\"%s\": ", *name); va_list arguments; va_start(arguments, format); OS::VPrint(format, arguments); @@ -294,6 +144,44 @@ bool LCodeGen::GeneratePrologue() { } } + // Possibly allocate a local context. + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment(";;; Allocate local context"); + // Argument to NewContext is the function, which is in r1. + __ push(r1); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kNewContext, 1); + } + RecordSafepoint(Safepoint::kNoDeoptimizationIndex); + // Context is returned in both r0 and cp. It replaces the context + // passed to us. It's saved in the stack and kept live in cp. + __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + // Copy any necessary parameters into the context. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Slot* slot = scope()->parameter(i)->AsSlot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ ldr(r0, MemOperand(fp, parameter_offset)); + // Store it in the context. + __ mov(r1, Operand(Context::SlotOffset(slot->index()))); + __ str(r0, MemOperand(cp, r1)); + // Update the write barrier. This clobbers all involved + // registers, so we have to use two more registers to avoid + // clobbering cp. + __ mov(r2, Operand(cp)); + __ RecordWrite(r2, Operand(r1), r3, r0); + } + } + Comment(";;; End allocate local context"); + } + // Trace the call. if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); @@ -464,7 +352,6 @@ Operand LCodeGen::ToOperand(LOperand* op) { MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - // TODO(regis): Revisit. ASSERT(!op->IsRegister()); ASSERT(!op->IsDoubleRegister()); ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); @@ -480,6 +367,21 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const { } +MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { + ASSERT(op->IsDoubleStackSlot()); + int index = op->index(); + if (index >= 0) { + // Local or spill slot. Skip the frame pointer, function, context, + // and the first word of the double in the fixed part of the frame. + return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize); + } else { + // Incoming parameter. Skip the return address and the first word of + // the double. + return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize); + } +} + + void LCodeGen::WriteTranslation(LEnvironment* environment, Translation* translation) { if (environment == NULL) return; @@ -671,7 +573,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { Handle<DeoptimizationInputData> data = Factory::NewDeoptimizationInputData(length, TENURED); - data->SetTranslationByteArray(*translations_.CreateByteArray()); + Handle<ByteArray> translations = translations_.CreateByteArray(); + data->SetTranslationByteArray(*translations); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); Handle<FixedArray> literals = @@ -751,6 +654,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, } +void LCodeGen::RecordSafepoint(int deoptimization_index) { + LPointerMap empty_pointers(RelocInfo::kNoPosition); + RecordSafepoint(&empty_pointers, deoptimization_index); +} + + void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index) { @@ -787,116 +696,7 @@ void LCodeGen::DoLabel(LLabel* label) { void LCodeGen::DoParallelMove(LParallelMove* move) { - // d0 must always be a scratch register. - DoubleRegister dbl_scratch = d0; - LUnallocated marker_operand(LUnallocated::NONE); - - Register core_scratch = scratch0(); - bool destroys_core_scratch = false; - - const ZoneList<LMoveOperands>* moves = - resolver_.Resolve(move->move_operands(), &marker_operand); - for (int i = moves->length() - 1; i >= 0; --i) { - LMoveOperands move = moves->at(i); - LOperand* from = move.source(); - LOperand* to = move.destination(); - ASSERT(!from->IsDoubleRegister() || - !ToDoubleRegister(from).is(dbl_scratch)); - ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch)); - ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch)); - ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch)); - if (from == &marker_operand) { - if (to->IsRegister()) { - __ mov(ToRegister(to), core_scratch); - ASSERT(destroys_core_scratch); - } else if (to->IsStackSlot()) { - __ str(core_scratch, ToMemOperand(to)); - ASSERT(destroys_core_scratch); - } else if (to->IsDoubleRegister()) { - __ vmov(ToDoubleRegister(to), dbl_scratch); - } else { - ASSERT(to->IsDoubleStackSlot()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); - } - } else if (to == &marker_operand) { - if (from->IsRegister() || from->IsConstantOperand()) { - __ mov(core_scratch, ToOperand(from)); - destroys_core_scratch = true; - } else if (from->IsStackSlot()) { - __ ldr(core_scratch, ToMemOperand(from)); - destroys_core_scratch = true; - } else if (from->IsDoubleRegister()) { - __ vmov(dbl_scratch, ToDoubleRegister(from)); - } else { - ASSERT(from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); - } - } else if (from->IsConstantOperand()) { - if (to->IsRegister()) { - __ mov(ToRegister(to), ToOperand(from)); - } else { - ASSERT(to->IsStackSlot()); - __ mov(ip, ToOperand(from)); - __ str(ip, ToMemOperand(to)); - } - } else if (from->IsRegister()) { - if (to->IsRegister()) { - __ mov(ToRegister(to), ToOperand(from)); - } else { - ASSERT(to->IsStackSlot()); - __ str(ToRegister(from), ToMemOperand(to)); - } - } else if (to->IsRegister()) { - ASSERT(from->IsStackSlot()); - __ ldr(ToRegister(to), ToMemOperand(from)); - } else if (from->IsStackSlot()) { - ASSERT(to->IsStackSlot()); - __ ldr(ip, ToMemOperand(from)); - __ str(ip, ToMemOperand(to)); - } else if (from->IsDoubleRegister()) { - if (to->IsDoubleRegister()) { - __ vmov(ToDoubleRegister(to), ToDoubleRegister(from)); - } else { - ASSERT(to->IsDoubleStackSlot()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset()); - } - } else if (to->IsDoubleRegister()) { - ASSERT(from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(ToDoubleRegister(to), ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset()); - } else { - ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); - } - } - - if (destroys_core_scratch) { - __ ldr(core_scratch, MemOperand(fp, -kPointerSize)); - } - - LInstruction* next = GetNextInstruction(); - if (next != NULL && next->IsLazyBailout()) { - int pc = masm()->pc_offset(); - safepoints_.SetPcAfterGap(pc); - } + resolver_.Resolve(move); } @@ -966,7 +766,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) { } case CodeStub::TranscendentalCache: { __ ldr(r0, MemOperand(sp, 0)); - TranscendentalCacheStub stub(instr->transcendental_type()); + TranscendentalCacheStub stub(instr->transcendental_type(), + TranscendentalCacheStub::TAGGED); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } @@ -987,7 +788,7 @@ void LCodeGen::DoModI(LModI* instr) { DeferredModI(LCodeGen* codegen, LModI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { - codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD); + codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD); } private: LModI* instr_; @@ -1016,7 +817,7 @@ void LCodeGen::DoModI(LModI* instr) { __ bind(&ok); } - // Try a few common cases before using the generic stub. + // Try a few common cases before using the stub. Label call_stub; const int kUnfolds = 3; // Skip if either side is negative. @@ -1044,7 +845,7 @@ void LCodeGen::DoModI(LModI* instr) { __ and_(result, scratch, Operand(left)); __ bind(&call_stub); - // Call the generic stub. The numbers in r0 and r1 have + // Call the stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. DeferredModI* deferred = new DeferredModI(this, instr); __ TrySmiTag(left, &deoptimize, scratch); @@ -1070,7 +871,7 @@ void LCodeGen::DoDivI(LDivI* instr) { DeferredDivI(LCodeGen* codegen, LDivI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { - codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV); + codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV); } private: LDivI* instr_; @@ -1123,7 +924,7 @@ void LCodeGen::DoDivI(LDivI* instr) { __ mov(result, Operand(left, ASR, 2), LeaveCC, eq); __ b(eq, &done); - // Call the generic stub. The numbers in r0 and r1 have + // Call the stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. DeferredDivI* deferred = new DeferredDivI(this, instr); @@ -1145,19 +946,33 @@ void LCodeGen::DoDivI(LDivI* instr) { template<int T> -void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, - Token::Value op) { +void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, + Token::Value op) { Register left = ToRegister(instr->InputAt(0)); Register right = ToRegister(instr->InputAt(1)); __ PushSafepointRegistersAndDoubles(); - GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right); + // Move left to r1 and right to r0 for the stub call. + if (left.is(r1)) { + __ Move(r0, right); + } else if (left.is(r0) && right.is(r1)) { + __ Swap(r0, r1, r2); + } else if (left.is(r0)) { + ASSERT(!right.is(r1)); + __ mov(r1, r0); + __ mov(r0, right); + } else { + ASSERT(!left.is(r0) && !right.is(r0)); + __ mov(r0, right); + __ mov(r1, left); + } + TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT); __ CallStub(&stub); RecordSafepointWithRegistersAndDoubles(instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); // Overwrite the stored value of r0 with the result of the stub. - __ StoreToSafepointRegistersAndDoublesSlot(r0); + __ StoreToSafepointRegistersAndDoublesSlot(r0, r0); __ PopSafepointRegistersAndDoubles(); } @@ -1413,7 +1228,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { __ vmov(r2, r3, right); __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4); // Move the result in the double result register. - __ vmov(ToDoubleRegister(instr->result()), r0, r1); + __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result())); // Restore r0-r3. __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); @@ -1431,10 +1246,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->InputAt(1)).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); - // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current - // GenericBinaryOpStub: - // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); - GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0); + TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -1896,14 +1708,45 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { } +void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + + if (FLAG_debug_code) { + __ AbortIfNotString(input); + } + + __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset)); + __ IndexFromHash(result, result); +} + + void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { - Abort("DoHasCachedArrayIndex unimplemented."); + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + + ASSERT(instr->hydrogen()->value()->representation().IsTagged()); + __ ldr(scratch, + FieldMemOperand(input, String::kHashFieldOffset)); + __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); + __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); + __ LoadRoot(result, Heap::kFalseValueRootIndex, ne); } void LCodeGen::DoHasCachedArrayIndexAndBranch( LHasCachedArrayIndexAndBranch* instr) { - Abort("DoHasCachedArrayIndexAndBranch unimplemented."); + Register input = ToRegister(instr->InputAt(0)); + Register scratch = scratch0(); + + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + __ ldr(scratch, + FieldMemOperand(input, String::kHashFieldOffset)); + __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); + EmitBranch(true_block, false_block, eq); } @@ -2146,15 +1989,11 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, __ bind(&before_push_delta); __ BlockConstPoolFor(kAdditionalDelta); __ mov(temp, Operand(delta * kPointerSize)); - __ StoreToSafepointRegisterSlot(temp); - __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); - ASSERT_EQ(kAdditionalDelta, - masm_->InstructionsGeneratedSince(&before_push_delta)); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); + __ StoreToSafepointRegisterSlot(temp, temp); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); // Put the result value into the result register slot and // restore all registers. - __ StoreToSafepointRegisterSlot(result); + __ StoreToSafepointRegisterSlot(result, result); __ PopSafepointRegisters(); } @@ -2274,17 +2113,13 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); - __ ldr(result, - MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); - __ ldr(result, ContextOperand(result, instr->slot_index())); + __ ldr(result, ContextOperand(context, instr->slot_index())); } void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { Register context = ToRegister(instr->context()); Register value = ToRegister(instr->value()); - __ ldr(context, - MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); __ str(value, ContextOperand(context, instr->slot_index())); if (instr->needs_write_barrier()) { int offset = Context::SlotOffset(instr->slot_index()); @@ -2603,7 +2438,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, LInstruction* instr) { // Change context if needed. bool change_context = - (graph()->info()->closure()->context() != function->context()) || + (info()->closure()->context() != function->context()) || scope()->contains_with() || (scope()->num_heap_slots() > 0); if (change_context) { @@ -2687,7 +2522,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { // Set the pointer to the new heap number in tmp. if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input); + __ LoadFromSafepointRegisterSlot(input, input); __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); __ bind(&allocated); @@ -2698,7 +2533,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); - __ str(tmp1, masm()->SafepointRegisterSlot(input)); + __ StoreToSafepointRegisterSlot(tmp1, input); __ PopSafepointRegisters(); __ bind(&done); @@ -2752,41 +2587,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { } -// Truncates a double using a specific rounding mode. -// Clears the z flag (ne condition) if an overflow occurs. -void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode, - SwVfpRegister result, - DwVfpRegister double_input, - Register scratch1, - Register scratch2) { - Register prev_fpscr = scratch1; - Register scratch = scratch2; - - // Set custom FPCSR: - // - Set rounding mode. - // - Clear vfp cumulative exception flags. - // - Make sure Flush-to-zero mode control bit is unset. - __ vmrs(prev_fpscr); - __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask | - kVFPRoundingModeMask | - kVFPFlushToZeroMask)); - __ orr(scratch, scratch, Operand(rounding_mode)); - __ vmsr(scratch); - - // Convert the argument to an integer. - __ vcvt_s32_f64(result, - double_input, - kFPSCRRounding); - - // Retrieve FPSCR. - __ vmrs(scratch); - // Restore FPSCR. - __ vmsr(prev_fpscr); - // Check for vfp exceptions. - __ tst(scratch, Operand(kVFPExceptionMask)); -} - - void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); @@ -2794,11 +2594,11 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { Register scratch1 = scratch0(); Register scratch2 = ToRegister(instr->TempAt(0)); - EmitVFPTruncate(kRoundToMinusInf, - single_scratch, - input, - scratch1, - scratch2); + __ EmitVFPTruncate(kRoundToMinusInf, + single_scratch, + input, + scratch1, + scratch2); DeoptimizeIf(ne, instr->environment()); // Move the result back to general purpose register r0. @@ -2815,6 +2615,30 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { } +void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { + DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register scratch1 = scratch0(); + Register scratch2 = result; + __ EmitVFPTruncate(kRoundToNearest, + double_scratch0().low(), + input, + scratch1, + scratch2); + DeoptimizeIf(ne, instr->environment()); + __ vmov(result, double_scratch0().low()); + + // Test for -0. + Label done; + __ cmp(result, Operand(0)); + __ b(ne, &done); + __ vmov(scratch1, input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); + __ bind(&done); +} + + void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); ASSERT(ToDoubleRegister(instr->result()).is(input)); @@ -2822,6 +2646,88 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { } +void LCodeGen::DoPower(LPower* instr) { + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + Register scratch = scratch0(); + DoubleRegister result_reg = ToDoubleRegister(instr->result()); + Representation exponent_type = instr->hydrogen()->right()->representation(); + if (exponent_type.IsDouble()) { + // Prepare arguments and call C function. + __ PrepareCallCFunction(4, scratch); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ vmov(r2, r3, ToDoubleRegister(right)); + __ CallCFunction(ExternalReference::power_double_double_function(), 4); + } else if (exponent_type.IsInteger32()) { + ASSERT(ToRegister(right).is(r0)); + // Prepare arguments and call C function. + __ PrepareCallCFunction(4, scratch); + __ mov(r2, ToRegister(right)); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ CallCFunction(ExternalReference::power_double_int_function(), 4); + } else { + ASSERT(exponent_type.IsTagged()); + ASSERT(instr->hydrogen()->left()->representation().IsDouble()); + + Register right_reg = ToRegister(right); + + // Check for smi on the right hand side. + Label non_smi, call; + __ JumpIfNotSmi(right_reg, &non_smi); + + // Untag smi and convert it to a double. + __ SmiUntag(right_reg); + SwVfpRegister single_scratch = double_scratch0().low(); + __ vmov(single_scratch, right_reg); + __ vcvt_f64_s32(result_reg, single_scratch); + __ jmp(&call); + + // Heap number map check. + __ bind(&non_smi); + __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch, Operand(ip)); + DeoptimizeIf(ne, instr->environment()); + int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag; + __ add(scratch, right_reg, Operand(value_offset)); + __ vldr(result_reg, scratch, 0); + + // Prepare arguments and call C function. + __ bind(&call); + __ PrepareCallCFunction(4, scratch); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ vmov(r2, r3, result_reg); + __ CallCFunction(ExternalReference::power_double_double_function(), 4); + } + // Store the result in the result register. + __ GetCFunctionDoubleResult(result_reg); +} + + +void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { + ASSERT(ToDoubleRegister(instr->result()).is(d2)); + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::UNTAGGED); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { + ASSERT(ToDoubleRegister(instr->result()).is(d2)); + TranscendentalCacheStub stub(TranscendentalCache::COS, + TranscendentalCacheStub::UNTAGGED); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { + ASSERT(ToDoubleRegister(instr->result()).is(d2)); + TranscendentalCacheStub stub(TranscendentalCache::SIN, + TranscendentalCacheStub::UNTAGGED); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { switch (instr->op()) { case kMathAbs: @@ -2830,9 +2736,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { case kMathFloor: DoMathFloor(instr); break; + case kMathRound: + DoMathRound(instr); + break; case kMathSqrt: DoMathSqrt(instr); break; + case kMathCos: + DoMathCos(instr); + break; + case kMathSin: + DoMathSin(instr); + break; + case kMathLog: + DoMathLog(instr); + break; default: Abort("Unimplemented type of LUnaryMathOperation."); UNREACHABLE(); @@ -2944,9 +2862,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { // Name is always in r2. __ mov(r2, Operand(instr->name())); - Handle<Code> ic(Builtins::builtin(info_->is_strict() - ? Builtins::StoreIC_Initialize_Strict - : Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + info_->is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -2988,7 +2906,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { ASSERT(ToRegister(instr->key()).is(r1)); ASSERT(ToRegister(instr->value()).is(r0)); - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -3129,8 +3049,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { __ AbortIfNotSmi(r0); } __ SmiUntag(r0); - MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result); - __ str(r0, result_stack_slot); + __ StoreToSafepointRegisterSlot(r0, result); __ PopSafepointRegisters(); } @@ -3211,9 +3130,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { // register is stored, as this register is in the pointer map, but contains an // integer value. __ mov(ip, Operand(0)); - int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); - __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize)); - + __ StoreToSafepointRegisterSlot(ip, reg); __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); @@ -3224,7 +3141,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { __ bind(&done); __ sub(ip, reg, Operand(kHeapObjectTag)); __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset); - __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize)); + __ StoreToSafepointRegisterSlot(reg, reg); __ PopSafepointRegisters(); } @@ -3269,8 +3186,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); - int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); - __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize)); + __ StoreToSafepointRegisterSlot(r0, reg); __ PopSafepointRegisters(); } @@ -3456,30 +3372,36 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { Register scratch1 = scratch0(); Register scratch2 = ToRegister(instr->TempAt(0)); - VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf - : kRoundToNearest; + __ EmitVFPTruncate(kRoundToZero, + single_scratch, + double_input, + scratch1, + scratch2); - EmitVFPTruncate(rounding_mode, - single_scratch, - double_input, - scratch1, - scratch2); // Deoptimize if we had a vfp invalid exception. DeoptimizeIf(ne, instr->environment()); + // Retrieve the result. __ vmov(result_reg, single_scratch); - if (instr->truncating() && - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ cmp(result_reg, Operand(0)); - __ b(ne, &done); - // Check for -0. - __ vmov(scratch1, double_input.high()); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); + if (!instr->truncating()) { + // Convert result back to double and compare with input + // to check if the conversion was exact. + __ vmov(single_scratch, result_reg); + __ vcvt_f64_s32(double_scratch0(), single_scratch); + __ VFPCompareAndSetFlags(double_scratch0(), double_input); DeoptimizeIf(ne, instr->environment()); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label done; + __ cmp(result_reg, Operand(0)); + __ b(ne, &done); + // Check for -0. + __ vmov(scratch1, double_input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); - __ bind(&done); + __ bind(&done); + } } } @@ -3750,37 +3672,30 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Condition final_branch_condition = kNoCondition; Register scratch = scratch0(); if (type_name->Equals(Heap::number_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, true_label); + __ JumpIfSmi(input, true_label); __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(input, Operand(ip)); final_branch_condition = eq; } else if (type_name->Equals(Heap::string_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); + __ JumpIfSmi(input, false_label); + __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE); + __ b(ge, false_label); __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsUndetectable)); - __ b(ne, false_label); - __ CompareInstanceType(input, scratch, FIRST_NONSTRING_TYPE); - final_branch_condition = lo; + final_branch_condition = eq; } else if (type_name->Equals(Heap::boolean_symbol())) { - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(input, ip); + __ CompareRoot(input, Heap::kTrueValueRootIndex); __ b(eq, true_label); - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(input, ip); + __ CompareRoot(input, Heap::kFalseValueRootIndex); final_branch_condition = eq; } else if (type_name->Equals(Heap::undefined_symbol())) { - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(input, ip); + __ CompareRoot(input, Heap::kUndefinedValueRootIndex); __ b(eq, true_label); - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); + __ JumpIfSmi(input, false_label); // Check for undetectable objects => true. __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); @@ -3788,32 +3703,22 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = ne; } else if (type_name->Equals(Heap::function_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - __ CompareObjectType(input, input, scratch, JS_FUNCTION_TYPE); - __ b(eq, true_label); - // Regular expressions => 'function' (they are callable). - __ CompareInstanceType(input, scratch, JS_REGEXP_TYPE); - final_branch_condition = eq; + __ JumpIfSmi(input, false_label); + __ CompareObjectType(input, input, scratch, FIRST_FUNCTION_CLASS_TYPE); + final_branch_condition = ge; } else if (type_name->Equals(Heap::object_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(input, ip); + __ JumpIfSmi(input, false_label); + __ CompareRoot(input, Heap::kNullValueRootIndex); __ b(eq, true_label); - // Regular expressions => 'function', not 'object'. - __ CompareObjectType(input, input, scratch, JS_REGEXP_TYPE); - __ b(eq, false_label); + __ CompareObjectType(input, input, scratch, FIRST_JS_OBJECT_TYPE); + __ b(lo, false_label); + __ CompareInstanceType(input, scratch, FIRST_FUNCTION_CLASS_TYPE); + __ b(hs, false_label); // Check for undetectable objects => false. __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsUndetectable)); - __ b(ne, false_label); - // Check for JS objects => true. - __ CompareInstanceType(input, scratch, FIRST_JS_OBJECT_TYPE); - __ b(lo, false_label); - __ CompareInstanceType(input, scratch, LAST_JS_OBJECT_TYPE); - final_branch_condition = ls; + final_branch_condition = eq; } else { final_branch_condition = ne; @@ -3888,7 +3793,9 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) { void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { Register object = ToRegister(instr->object()); Register key = ToRegister(instr->key()); - __ Push(object, key); + Register strict = scratch0(); + __ mov(strict, Operand(Smi::FromInt(strict_mode_flag()))); + __ Push(object, key, strict); ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); LPointerMap* pointers = instr->pointer_map(); LEnvironment* env = instr->deoptimization_environment(); diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h index 7bc6689f..23e0c44b 100644 --- a/src/arm/lithium-codegen-arm.h +++ b/src/arm/lithium-codegen-arm.h @@ -29,7 +29,7 @@ #define V8_ARM_LITHIUM_CODEGEN_ARM_H_ #include "arm/lithium-arm.h" - +#include "arm/lithium-gap-resolver-arm.h" #include "deoptimizer.h" #include "safepoint-table.h" #include "scopes.h" @@ -39,31 +39,8 @@ namespace internal { // Forward declarations. class LDeferredCode; -class LGapNode; class SafepointGenerator; -class LGapResolver BASE_EMBEDDED { - public: - LGapResolver(); - const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves, - LOperand* marker_operand); - - private: - LGapNode* LookupNode(LOperand* operand); - bool CanReach(LGapNode* a, LGapNode* b, int visited_id); - bool CanReach(LGapNode* a, LGapNode* b); - void RegisterMove(LMoveOperands move); - void AddResultMove(LOperand* from, LOperand* to); - void AddResultMove(LGapNode* from, LGapNode* to); - void ResolveCycle(LGapNode* start, LOperand* marker_operand); - - ZoneList<LGapNode*> nodes_; - ZoneList<LGapNode*> identified_cycles_; - ZoneList<LMoveOperands> result_; - int next_visited_id_; -}; - - class LCodeGen BASE_EMBEDDED { public: LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) @@ -76,13 +53,39 @@ class LCodeGen BASE_EMBEDDED { deoptimizations_(4), deoptimization_literals_(8), inlined_function_count_(0), - scope_(chunk->graph()->info()->scope()), + scope_(info->scope()), status_(UNUSED), deferred_(8), - osr_pc_offset_(-1) { + osr_pc_offset_(-1), + resolver_(this) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } + + // Simple accessors. + MacroAssembler* masm() const { return masm_; } + CompilationInfo* info() const { return info_; } + + // Support for converting LOperands to assembler types. + // LOperand must be a register. + Register ToRegister(LOperand* op) const; + + // LOperand is loaded into scratch, unless already a register. + Register EmitLoadRegister(LOperand* op, Register scratch); + + // LOperand must be a double register. + DoubleRegister ToDoubleRegister(LOperand* op) const; + + // LOperand is loaded into dbl_scratch, unless already a double register. + DoubleRegister EmitLoadDoubleRegister(LOperand* op, + SwVfpRegister flt_scratch, + DoubleRegister dbl_scratch); + int ToInteger32(LConstantOperand* op) const; + Operand ToOperand(LOperand* op); + MemOperand ToMemOperand(LOperand* op) const; + // Returns a MemOperand pointing to the high word of a DoubleStackSlot. + MemOperand ToHighMemOperand(LOperand* op) const; + // Try to generate code for the entire chunk, but it may fail if the // chunk contains constructs we cannot handle. Returns true if the // code generation attempt succeeded. @@ -94,8 +97,8 @@ class LCodeGen BASE_EMBEDDED { // Deferred code support. template<int T> - void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, - Token::Value op); + void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, + Token::Value op); void DoDeferredNumberTagD(LNumberTagD* instr); void DoDeferredNumberTagI(LNumberTagI* instr); void DoDeferredTaggedToI(LTaggedToI* instr); @@ -129,10 +132,13 @@ class LCodeGen BASE_EMBEDDED { bool is_done() const { return status_ == DONE; } bool is_aborted() const { return status_ == ABORTED; } + int strict_mode_flag() const { + return info()->is_strict() ? kStrictMode : kNonStrictMode; + } + LChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } HGraph* graph() const { return chunk_->graph(); } - MacroAssembler* masm() const { return masm_; } Register scratch0() { return r9; } DwVfpRegister double_scratch0() { return d0; } @@ -198,34 +204,15 @@ class LCodeGen BASE_EMBEDDED { Register ToRegister(int index) const; DoubleRegister ToDoubleRegister(int index) const; - // LOperand must be a register. - Register ToRegister(LOperand* op) const; - - // LOperand is loaded into scratch, unless already a register. - Register EmitLoadRegister(LOperand* op, Register scratch); - - // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; - - // LOperand is loaded into dbl_scratch, unless already a double register. - DoubleRegister EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DoubleRegister dbl_scratch); - - int ToInteger32(LConstantOperand* op) const; - Operand ToOperand(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; - // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr); - void EmitVFPTruncate(VFPRoundingMode rounding_mode, - SwVfpRegister result, - DwVfpRegister double_input, - Register scratch1, - Register scratch2); void DoMathFloor(LUnaryMathOperation* instr); + void DoMathRound(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr); + void DoMathLog(LUnaryMathOperation* instr); + void DoMathCos(LUnaryMathOperation* instr); + void DoMathSin(LUnaryMathOperation* instr); // Support for recording safepoint and position information. void RecordSafepoint(LPointerMap* pointers, @@ -233,6 +220,7 @@ class LCodeGen BASE_EMBEDDED { int arguments, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); + void RecordSafepoint(int deoptimization_index); void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index); diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc new file mode 100644 index 00000000..1a2326b7 --- /dev/null +++ b/src/arm/lithium-gap-resolver-arm.cc @@ -0,0 +1,303 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "arm/lithium-gap-resolver-arm.h" +#include "arm/lithium-codegen-arm.h" + +namespace v8 { +namespace internal { + +static const Register kSavedValueRegister = { 9 }; +static const DoubleRegister kSavedDoubleValueRegister = { 0 }; + +LGapResolver::LGapResolver(LCodeGen* owner) + : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false), + saved_destination_(NULL) { } + + +void LGapResolver::Resolve(LParallelMove* parallel_move) { + ASSERT(moves_.is_empty()); + // Build up a worklist of moves. + BuildInitialMoveList(parallel_move); + + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands move = moves_[i]; + // Skip constants to perform them last. They don't block other moves + // and skipping such moves with register destinations keeps those + // registers free for the whole algorithm. + if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { + root_index_ = i; // Any cycle is found when by reaching this move again. + PerformMove(i); + if (in_cycle_) { + RestoreValue(); + } + } + } + + // Perform the moves with constant sources. + for (int i = 0; i < moves_.length(); ++i) { + if (!moves_[i].IsEliminated()) { + ASSERT(moves_[i].source()->IsConstantOperand()); + EmitMove(i); + } + } + + moves_.Rewind(0); +} + + +void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { + // Perform a linear sweep of the moves to add them to the initial list of + // moves to perform, ignoring any move that is redundant (the source is + // the same as the destination, the destination is ignored and + // unallocated, or the move was already eliminated). + const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); + for (int i = 0; i < moves->length(); ++i) { + LMoveOperands move = moves->at(i); + if (!move.IsRedundant()) moves_.Add(move); + } + Verify(); +} + + +void LGapResolver::PerformMove(int index) { + // Each call to this function performs a move and deletes it from the move + // graph. We first recursively perform any move blocking this one. We + // mark a move as "pending" on entry to PerformMove in order to detect + // cycles in the move graph. + + // We can only find a cycle, when doing a depth-first traversal of moves, + // be encountering the starting move again. So by spilling the source of + // the starting move, we break the cycle. All moves are then unblocked, + // and the starting move is completed by writing the spilled value to + // its destination. All other moves from the spilled source have been + // completed prior to breaking the cycle. + // An additional complication is that moves to MemOperands with large + // offsets (more than 1K or 4K) require us to spill this spilled value to + // the stack, to free up the register. + ASSERT(!moves_[index].IsPending()); + ASSERT(!moves_[index].IsRedundant()); + + // Clear this move's destination to indicate a pending move. The actual + // destination is saved in a stack allocated local. Multiple moves can + // be pending because this function is recursive. + ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. + LOperand* destination = moves_[index].destination(); + moves_[index].set_destination(NULL); + + // Perform a depth-first traversal of the move graph to resolve + // dependencies. Any unperformed, unpending move with a source the same + // as this one's destination blocks this one so recursively perform all + // such moves. + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands other_move = moves_[i]; + if (other_move.Blocks(destination) && !other_move.IsPending()) { + PerformMove(i); + // If there is a blocking, pending move it must be moves_[root_index_] + // and all other moves with the same source as moves_[root_index_] are + // sucessfully executed (because they are cycle-free) by this loop. + } + } + + // We are about to resolve this move and don't need it marked as + // pending, so restore its destination. + moves_[index].set_destination(destination); + + // The move may be blocked on a pending move, which must be the starting move. + // In this case, we have a cycle, and we save the source of this move to + // a scratch register to break it. + LMoveOperands other_move = moves_[root_index_]; + if (other_move.Blocks(destination)) { + ASSERT(other_move.IsPending()); + BreakCycle(index); + return; + } + + // This move is no longer blocked. + EmitMove(index); +} + + +void LGapResolver::Verify() { +#ifdef ENABLE_SLOW_ASSERTS + // No operand should be the destination for more than one move. + for (int i = 0; i < moves_.length(); ++i) { + LOperand* destination = moves_[i].destination(); + for (int j = i + 1; j < moves_.length(); ++j) { + SLOW_ASSERT(!destination->Equals(moves_[j].destination())); + } + } +#endif +} + +#define __ ACCESS_MASM(cgen_->masm()) + +void LGapResolver::BreakCycle(int index) { + // We save in a register the value that should end up in the source of + // moves_[root_index]. After performing all moves in the tree rooted + // in that move, we save the value to that source. + ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); + ASSERT(!in_cycle_); + in_cycle_ = true; + LOperand* source = moves_[index].source(); + saved_destination_ = moves_[index].destination(); + if (source->IsRegister()) { + __ mov(kSavedValueRegister, cgen_->ToRegister(source)); + } else if (source->IsStackSlot()) { + __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); + } else if (source->IsDoubleRegister()) { + __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source)); + } else if (source->IsDoubleStackSlot()) { + __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source)); + } else { + UNREACHABLE(); + } + // This move will be done by restoring the saved value to the destination. + moves_[index].Eliminate(); +} + + +void LGapResolver::RestoreValue() { + ASSERT(in_cycle_); + ASSERT(saved_destination_ != NULL); + + // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister. + if (saved_destination_->IsRegister()) { + __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister); + } else if (saved_destination_->IsStackSlot()) { + __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); + } else if (saved_destination_->IsDoubleRegister()) { + __ vmov(cgen_->ToDoubleRegister(saved_destination_), + kSavedDoubleValueRegister); + } else if (saved_destination_->IsDoubleStackSlot()) { + __ vstr(kSavedDoubleValueRegister, + cgen_->ToMemOperand(saved_destination_)); + } else { + UNREACHABLE(); + } + + in_cycle_ = false; + saved_destination_ = NULL; +} + + +void LGapResolver::EmitMove(int index) { + LOperand* source = moves_[index].source(); + LOperand* destination = moves_[index].destination(); + + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + + if (source->IsRegister()) { + Register source_register = cgen_->ToRegister(source); + if (destination->IsRegister()) { + __ mov(cgen_->ToRegister(destination), source_register); + } else { + ASSERT(destination->IsStackSlot()); + __ str(source_register, cgen_->ToMemOperand(destination)); + } + + } else if (source->IsStackSlot()) { + MemOperand source_operand = cgen_->ToMemOperand(source); + if (destination->IsRegister()) { + __ ldr(cgen_->ToRegister(destination), source_operand); + } else { + ASSERT(destination->IsStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + if (in_cycle_) { + if (!destination_operand.OffsetIsUint12Encodable()) { + // ip is overwritten while saving the value to the destination. + // Therefore we can't use ip. It is OK if the read from the source + // destroys ip, since that happens before the value is read. + __ vldr(kSavedDoubleValueRegister.low(), source_operand); + __ vstr(kSavedDoubleValueRegister.low(), destination_operand); + } else { + __ ldr(ip, source_operand); + __ str(ip, destination_operand); + } + } else { + __ ldr(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, destination_operand); + } + } + + } else if (source->IsConstantOperand()) { + Operand source_operand = cgen_->ToOperand(source); + if (destination->IsRegister()) { + __ mov(cgen_->ToRegister(destination), source_operand); + } else { + ASSERT(destination->IsStackSlot()); + ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. + MemOperand destination_operand = cgen_->ToMemOperand(destination); + __ mov(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, cgen_->ToMemOperand(destination)); + } + + } else if (source->IsDoubleRegister()) { + DoubleRegister source_register = cgen_->ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + __ vmov(cgen_->ToDoubleRegister(destination), source_register); + } else { + ASSERT(destination->IsDoubleStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + __ vstr(source_register, destination_operand); + } + + } else if (source->IsDoubleStackSlot()) { + MemOperand source_operand = cgen_->ToMemOperand(source); + if (destination->IsDoubleRegister()) { + __ vldr(cgen_->ToDoubleRegister(destination), source_operand); + } else { + ASSERT(destination->IsDoubleStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + if (in_cycle_) { + // kSavedDoubleValueRegister was used to break the cycle, + // but kSavedValueRegister is free. + MemOperand source_high_operand = + cgen_->ToHighMemOperand(source); + MemOperand destination_high_operand = + cgen_->ToHighMemOperand(destination); + __ ldr(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, destination_operand); + __ ldr(kSavedValueRegister, source_high_operand); + __ str(kSavedValueRegister, destination_high_operand); + } else { + __ vldr(kSavedDoubleValueRegister, source_operand); + __ vstr(kSavedDoubleValueRegister, destination_operand); + } + } + } else { + UNREACHABLE(); + } + + moves_[index].Eliminate(); +} + + +#undef __ + +} } // namespace v8::internal diff --git a/src/arm/lithium-gap-resolver-arm.h b/src/arm/lithium-gap-resolver-arm.h new file mode 100644 index 00000000..334d2920 --- /dev/null +++ b/src/arm/lithium-gap-resolver-arm.h @@ -0,0 +1,84 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ +#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ + +#include "v8.h" + +#include "lithium.h" + +namespace v8 { +namespace internal { + +class LCodeGen; +class LGapResolver; + +class LGapResolver BASE_EMBEDDED { + public: + + explicit LGapResolver(LCodeGen* owner); + + // Resolve a set of parallel moves, emitting assembler instructions. + void Resolve(LParallelMove* parallel_move); + + private: + // Build the initial list of moves. + void BuildInitialMoveList(LParallelMove* parallel_move); + + // Perform the move at the moves_ index in question (possibly requiring + // other moves to satisfy dependencies). + void PerformMove(int index); + + // If a cycle is found in the series of moves, save the blocking value to + // a scratch register. The cycle must be found by hitting the root of the + // depth-first search. + void BreakCycle(int index); + + // After a cycle has been resolved, restore the value from the scratch + // register to its proper destination. + void RestoreValue(); + + // Emit a move and remove it from the move graph. + void EmitMove(int index); + + // Verify the move list before performing moves. + void Verify(); + + LCodeGen* cgen_; + + // List of moves not yet resolved. + ZoneList<LMoveOperands> moves_; + + int root_index_; + bool in_cycle_; + LOperand* saved_destination_; +}; + +} } // namespace v8::internal + +#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index 5d8df1af..d431f6a9 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -271,6 +271,29 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, } +void MacroAssembler::Bfi(Register dst, + Register src, + Register scratch, + int lsb, + int width, + Condition cond) { + ASSERT(0 <= lsb && lsb < 32); + ASSERT(0 <= width && width < 32); + ASSERT(lsb + width < 32); + ASSERT(!scratch.is(dst)); + if (width == 0) return; + if (!CpuFeatures::IsSupported(ARMv7)) { + int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); + bic(dst, dst, Operand(mask)); + and_(scratch, src, Operand((1 << width) - 1)); + mov(scratch, Operand(scratch, LSL, lsb)); + orr(dst, dst, scratch); + } else { + bfi(dst, src, lsb, width, cond); + } +} + + void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { ASSERT(lsb < 32); if (!CpuFeatures::IsSupported(ARMv7)) { @@ -485,18 +508,19 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() { PopSafepointRegisters(); } -void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) { - str(reg, SafepointRegistersAndDoublesSlot(reg)); +void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, + Register dst) { + str(src, SafepointRegistersAndDoublesSlot(dst)); } -void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) { - str(reg, SafepointRegisterSlot(reg)); +void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { + str(src, SafepointRegisterSlot(dst)); } -void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) { - ldr(reg, SafepointRegisterSlot(reg)); +void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { + ldr(dst, SafepointRegisterSlot(src)); } @@ -714,7 +738,8 @@ int MacroAssembler::ActivationFrameAlignment() { } -void MacroAssembler::LeaveExitFrame(bool save_doubles) { +void MacroAssembler::LeaveExitFrame(bool save_doubles, + Register argument_count) { // Optionally restore all double registers. if (save_doubles) { for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { @@ -736,12 +761,20 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { str(r3, MemOperand(ip)); #endif - // Tear down the exit frame, pop the arguments, and return. Callee-saved - // register r4 still holds argc. + // Tear down the exit frame, pop the arguments, and return. mov(sp, Operand(fp)); ldm(ia_w, sp, fp.bit() | lr.bit()); - add(sp, sp, Operand(r4, LSL, kPointerSizeLog2)); - mov(pc, lr); + if (argument_count.is_valid()) { + add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); + } +} + +void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { +#if !defined(USE_ARM_EABI) + UNREACHABLE(); +#else + vmov(dst, r0, r1); +#endif } @@ -929,8 +962,8 @@ void MacroAssembler::IsInstanceJSObjectType(Register map, void MacroAssembler::IsObjectJSStringType(Register object, - Register scratch, - Label* fail) { + Register scratch, + Label* fail) { ASSERT(kNotStringTag != 0); ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); @@ -1005,6 +1038,117 @@ void MacroAssembler::PopTryHandler() { } +void MacroAssembler::Throw(Register value) { + // r0 is expected to hold the exception. + if (!value.is(r0)) { + mov(r0, value); + } + + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop the sp to the top of the handler. + mov(r3, Operand(ExternalReference(Top::k_handler_address))); + ldr(sp, MemOperand(r3)); + + // Restore the next handler and frame pointer, discard handler state. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(r2); + str(r2, MemOperand(r3)); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); + ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. + + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of a + // JS entry frame. + cmp(fp, Operand(0, RelocInfo::NONE)); + // Set cp to NULL if fp is NULL. + mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); + // Restore cp otherwise. + ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); +#ifdef DEBUG + if (FLAG_debug_code) { + mov(lr, Operand(pc)); + } +#endif + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + pop(pc); +} + + +void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, + Register value) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // r0 is expected to hold the exception. + if (!value.is(r0)) { + mov(r0, value); + } + + // Drop sp to the top stack handler. + mov(r3, Operand(ExternalReference(Top::k_handler_address))); + ldr(sp, MemOperand(r3)); + + // Unwind the handlers until the ENTRY handler is found. + Label loop, done; + bind(&loop); + // Load the type of the current stack handler. + const int kStateOffset = StackHandlerConstants::kStateOffset; + ldr(r2, MemOperand(sp, kStateOffset)); + cmp(r2, Operand(StackHandler::ENTRY)); + b(eq, &done); + // Fetch the next handler in the list. + const int kNextOffset = StackHandlerConstants::kNextOffset; + ldr(sp, MemOperand(sp, kNextOffset)); + jmp(&loop); + bind(&done); + + // Set the top handler address to next handler past the current ENTRY handler. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(r2); + str(r2, MemOperand(r3)); + + if (type == OUT_OF_MEMORY) { + // Set external caught exception to false. + ExternalReference external_caught(Top::k_external_caught_exception_address); + mov(r0, Operand(false, RelocInfo::NONE)); + mov(r2, Operand(external_caught)); + str(r0, MemOperand(r2)); + + // Set pending exception and r0 to out of memory exception. + Failure* out_of_memory = Failure::OutOfMemoryException(); + mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); + mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); + str(r0, MemOperand(r2)); + } + + // Stack layout at this point. See also StackHandlerConstants. + // sp -> state (ENTRY) + // fp + // lr + + // Discard handler state (r2 is not used) and restore frame pointer. + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); + ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of a + // JS entry frame. + cmp(fp, Operand(0, RelocInfo::NONE)); + // Set cp to NULL if fp is NULL. + mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); + // Restore cp otherwise. + ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); +#ifdef DEBUG + if (FLAG_debug_code) { + mov(lr, Operand(pc)); + } +#endif + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + pop(pc); +} + + void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label* miss) { @@ -1102,6 +1246,8 @@ void MacroAssembler::AllocateInNewSpace(int object_size, ASSERT(!result.is(scratch1)); ASSERT(!result.is(scratch2)); ASSERT(!scratch1.is(scratch2)); + ASSERT(!scratch1.is(ip)); + ASSERT(!scratch2.is(ip)); // Make object size into bytes. if ((flags & SIZE_IN_WORDS) != 0) { @@ -1391,6 +1537,14 @@ void MacroAssembler::CompareInstanceType(Register map, } +void MacroAssembler::CompareRoot(Register obj, + Heap::RootListIndex index) { + ASSERT(!obj.is(ip)); + LoadRoot(ip, index); + cmp(obj, ip); +} + + void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map, @@ -1497,7 +1651,7 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( - ApiFunction* function, int stack_space) { + ExternalReference function, int stack_space) { ExternalReference next_address = ExternalReference::handle_scope_next_address(); const int kNextOffset = 0; @@ -1554,9 +1708,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( cmp(r4, r5); b(ne, &promote_scheduled_exception); - // LeaveExitFrame expects unwind space to be in r4. + // LeaveExitFrame expects unwind space to be in a register. mov(r4, Operand(stack_space)); - LeaveExitFrame(false); + LeaveExitFrame(false, r4); + mov(pc, lr); bind(&promote_scheduled_exception); MaybeObject* result = TryTailCallExternalReference( @@ -1696,9 +1851,9 @@ void MacroAssembler::ConvertToInt32(Register source, ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); // Get exponent alone in scratch2. Ubfx(scratch2, - scratch, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); + scratch, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); // Load dest with zero. We use this either for the final shift or // for the answer. mov(dest, Operand(0, RelocInfo::NONE)); @@ -1761,6 +1916,52 @@ void MacroAssembler::ConvertToInt32(Register source, } +void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, + SwVfpRegister result, + DwVfpRegister double_input, + Register scratch1, + Register scratch2, + CheckForInexactConversion check_inexact) { + ASSERT(CpuFeatures::IsSupported(VFP3)); + CpuFeatures::Scope scope(VFP3); + Register prev_fpscr = scratch1; + Register scratch = scratch2; + + int32_t check_inexact_conversion = + (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; + + // Set custom FPCSR: + // - Set rounding mode. + // - Clear vfp cumulative exception flags. + // - Make sure Flush-to-zero mode control bit is unset. + vmrs(prev_fpscr); + bic(scratch, + prev_fpscr, + Operand(kVFPExceptionMask | + check_inexact_conversion | + kVFPRoundingModeMask | + kVFPFlushToZeroMask)); + // 'Round To Nearest' is encoded by 0b00 so no bits need to be set. + if (rounding_mode != kRoundToNearest) { + orr(scratch, scratch, Operand(rounding_mode)); + } + vmsr(scratch); + + // Convert the argument to an integer. + vcvt_s32_f64(result, + double_input, + (rounding_mode == kRoundToZero) ? kDefaultRoundToZero + : kFPSCRRounding); + + // Retrieve FPSCR. + vmrs(scratch); + // Restore FPSCR. + vmsr(prev_fpscr); + // Check for vfp exceptions. + tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion)); +} + + void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits) { @@ -2041,11 +2242,22 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) { ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); } - // The context may be an intermediate context, not a function context. - ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); - } else { // Slot is in the current function context. - // The context may be an intermediate context, not a function context. - ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX))); + } else { + // Slot is in the current function context. Move it into the + // destination register in case we store into it (the write barrier + // cannot be allowed to destroy the context in esi). + mov(dst, cp); + } + + // We should not have found a 'with' context by walking the context chain + // (i.e., the static scope chain and runtime context chain do not agree). + // A variable occurring in such a scope should have slot type LOOKUP and + // not CONTEXT. + if (FLAG_debug_code) { + ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); + cmp(dst, ip); + Check(eq, "Yo dawg, I heard you liked function contexts " + "so I put function contexts in all your contexts"); } } @@ -2122,12 +2334,23 @@ void MacroAssembler::AbortIfNotSmi(Register object) { } +void MacroAssembler::AbortIfNotString(Register object) { + STATIC_ASSERT(kSmiTag == 0); + tst(object, Operand(kSmiTagMask)); + Assert(ne, "Operand is not a string"); + push(object); + ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); + CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); + pop(object); + Assert(lo, "Operand is not a string"); +} + + + void MacroAssembler::AbortIfNotRootValue(Register src, Heap::RootListIndex root_value_index, const char* message) { - ASSERT(!src.is(ip)); - LoadRoot(ip, root_value_index); - cmp(src, ip); + CompareRoot(src, root_value_index); Assert(eq, message); } @@ -2243,6 +2466,60 @@ void MacroAssembler::CopyFields(Register dst, } +void MacroAssembler::CopyBytes(Register src, + Register dst, + Register length, + Register scratch) { + Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; + + // Align src before copying in word size chunks. + bind(&align_loop); + cmp(length, Operand(0)); + b(eq, &done); + bind(&align_loop_1); + tst(src, Operand(kPointerSize - 1)); + b(eq, &word_loop); + ldrb(scratch, MemOperand(src, 1, PostIndex)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + sub(length, length, Operand(1), SetCC); + b(ne, &byte_loop_1); + + // Copy bytes in word size chunks. + bind(&word_loop); + if (FLAG_debug_code) { + tst(src, Operand(kPointerSize - 1)); + Assert(eq, "Expecting alignment for CopyBytes"); + } + cmp(length, Operand(kPointerSize)); + b(lt, &byte_loop); + ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); +#if CAN_USE_UNALIGNED_ACCESSES + str(scratch, MemOperand(dst, kPointerSize, PostIndex)); +#else + strb(scratch, MemOperand(dst, 1, PostIndex)); + mov(scratch, Operand(scratch, LSR, 8)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + mov(scratch, Operand(scratch, LSR, 8)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + mov(scratch, Operand(scratch, LSR, 8)); + strb(scratch, MemOperand(dst, 1, PostIndex)); +#endif + sub(length, length, Operand(kPointerSize)); + b(&word_loop); + + // Copy the last bytes if any left. + bind(&byte_loop); + cmp(length, Operand(0)); + b(eq, &done); + bind(&byte_loop_1); + ldrb(scratch, MemOperand(src, 1, PostIndex)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + sub(length, length, Operand(1), SetCC); + b(ne, &byte_loop_1); + bind(&done); +} + + void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. Register source, // Input. Register scratch) { diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 36e4a1fe..aaf4458e 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -121,6 +121,15 @@ class MacroAssembler: public Assembler { Condition cond = al); void Sbfx(Register dst, Register src, int lsb, int width, Condition cond = al); + // The scratch register is not used for ARMv7. + // scratch can be the same register as src (in which case it is trashed), but + // not the same as dst. + void Bfi(Register dst, + Register src, + Register scratch, + int lsb, + int width, + Condition cond = al); void Bfc(Register dst, int lsb, int width, Condition cond = al); void Usat(Register dst, int satpos, const Operand& src, Condition cond = al); @@ -234,18 +243,30 @@ class MacroAssembler: public Assembler { } } + // Pop two registers. Pops rightmost register first (from lower address). + void Pop(Register src1, Register src2, Condition cond = al) { + ASSERT(!src1.is(src2)); + if (src1.code() > src2.code()) { + ldm(ia_w, sp, src1.bit() | src2.bit(), cond); + } else { + ldr(src2, MemOperand(sp, 4, PostIndex), cond); + ldr(src1, MemOperand(sp, 4, PostIndex), cond); + } + } + // Push and pop the registers that can hold pointers, as defined by the // RegList constant kSafepointSavedRegisters. void PushSafepointRegisters(); void PopSafepointRegisters(); void PushSafepointRegistersAndDoubles(); void PopSafepointRegistersAndDoubles(); - void StoreToSafepointRegisterSlot(Register reg); - void StoreToSafepointRegistersAndDoublesSlot(Register reg); - void LoadFromSafepointRegisterSlot(Register reg); - static int SafepointRegisterStackIndex(int reg_code); - static MemOperand SafepointRegisterSlot(Register reg); - static MemOperand SafepointRegistersAndDoublesSlot(Register reg); + // Store value in register src in the safepoint stack slot for + // register dst. + void StoreToSafepointRegisterSlot(Register src, Register dst); + void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); + // Load the value of the src register from its safepoint stack slot + // into register dst. + void LoadFromSafepointRegisterSlot(Register dst, Register src); // Load two consecutive registers with two consecutive memory locations. void Ldrd(Register dst1, @@ -297,7 +318,9 @@ class MacroAssembler: public Assembler { void EnterExitFrame(bool save_doubles, int stack_space = 0); // Leave the current exit frame. Expects the return value in r0. - void LeaveExitFrame(bool save_doubles); + // Expect the number of values, pushed prior to the exit frame, to + // remove in a register (or no_reg, if there is nothing to remove). + void LeaveExitFrame(bool save_doubles, Register argument_count); // Get the actual activation frame alignment for target environment. static int ActivationFrameAlignment(); @@ -371,6 +394,13 @@ class MacroAssembler: public Assembler { // Must preserve the result register. void PopTryHandler(); + // Passes thrown value (in r0) to the handler of top of the try handler chain. + void Throw(Register value); + + // Propagates an uncatchable exception to the top of the current JS stack's + // handler chain. + void ThrowUncatchable(UncatchableExceptionType type, Register value); + // --------------------------------------------------------------------------- // Inline caching support @@ -487,6 +517,14 @@ class MacroAssembler: public Assembler { // Copies a fixed number of fields of heap objects from src to dst. void CopyFields(Register dst, Register src, RegList temps, int field_count); + // Copies a number of bytes from src to dst. All registers are clobbered. On + // exit src and dst will point to the place just after where the last byte was + // read or written and length will be zero. + void CopyBytes(Register src, + Register dst, + Register length, + Register scratch); + // --------------------------------------------------------------------------- // Support functions. @@ -539,6 +577,11 @@ class MacroAssembler: public Assembler { bool is_heap_object); + // Compare the object in a register to a value from the root list. + // Uses the ip register as scratch. + void CompareRoot(Register obj, Heap::RootListIndex index); + + // Load and check the instance type of an object for being a string. // Loads the type into the second argument register. // Returns a condition that will be enabled if the object was a string. @@ -603,6 +646,19 @@ class MacroAssembler: public Assembler { DwVfpRegister double_scratch, Label *not_int32); +// Truncates a double using a specific rounding mode. +// Clears the z flag (ne condition) if an overflow occurs. +// If exact_conversion is true, the z flag is also cleared if the conversion +// was inexact, ie. if the double value could not be converted exactly +// to a 32bit integer. + void EmitVFPTruncate(VFPRoundingMode rounding_mode, + SwVfpRegister result, + DwVfpRegister double_input, + Register scratch1, + Register scratch2, + CheckForInexactConversion check + = kDontCheckForInexactConversion); + // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // instruction. On pre-ARM5 hardware this routine gives the wrong answer // for 0 (31 instead of 32). Source and scratch can be the same in which case @@ -674,11 +730,13 @@ class MacroAssembler: public Assembler { void CallCFunction(ExternalReference function, int num_arguments); void CallCFunction(Register function, int num_arguments); + void GetCFunctionDoubleResult(const DoubleRegister dst); + // Calls an API function. Allocates HandleScope, extracts returned value // from handle and propagates exceptions. Restores context. // stack_space - space to be unwound on exit (includes the call js // arguments space and the additional space allocated for the fast call). - MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function, + MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function, int stack_space); // Jump to a runtime routine. @@ -765,11 +823,11 @@ class MacroAssembler: public Assembler { mov(reg, scratch); } - void SmiUntag(Register reg) { - mov(reg, Operand(reg, ASR, kSmiTagSize)); + void SmiUntag(Register reg, SBit s = LeaveCC) { + mov(reg, Operand(reg, ASR, kSmiTagSize), s); } - void SmiUntag(Register dst, Register src) { - mov(dst, Operand(src, ASR, kSmiTagSize)); + void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { + mov(dst, Operand(src, ASR, kSmiTagSize), s); } // Jump the register contains a smi. @@ -791,6 +849,9 @@ class MacroAssembler: public Assembler { void AbortIfSmi(Register object); void AbortIfNotSmi(Register object); + // Abort execution if argument is a string. Used in debug code. + void AbortIfNotString(Register object); + // Abort execution if argument is not the root value with the given index. void AbortIfNotRootValue(Register src, Heap::RootListIndex root_value_index, @@ -871,10 +932,19 @@ class MacroAssembler: public Assembler { Register scratch1, Register scratch2); + // Compute memory operands for safepoint stack slots. + static int SafepointRegisterStackIndex(int reg_code); + MemOperand SafepointRegisterSlot(Register reg); + MemOperand SafepointRegistersAndDoublesSlot(Register reg); + bool generating_stub_; bool allow_stub_calls_; // This handle will be patched with the code object on installation. Handle<Object> code_object_; + + // Needs access to SafepointRegisterStackIndex for optimized frame + // traversal. + friend class OptimizedFrame; }; diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc index 94da0424..1f6ed671 100644 --- a/src/arm/regexp-macro-assembler-arm.cc +++ b/src/arm/regexp-macro-assembler-arm.cc @@ -57,48 +57,57 @@ namespace internal { * - r13/sp : points to tip of C stack. * * The remaining registers are free for computations. - * * Each call to a public method should retain this convention. + * * The stack will have the following structure: - * - direct_call (if 1, direct call from JavaScript code, if 0 call - * through the runtime system) - * - stack_area_base (High end of the memory area to use as - * backtracking stack) - * - int* capture_array (int[num_saved_registers_], for output). - * --- sp when called --- - * - link address - * - backup of registers r4..r11 - * - end of input (Address of end of string) - * - start of input (Address of first character in string) - * - start index (character index of start) - * --- frame pointer ---- - * - void* input_string (location of a handle containing the string) - * - Offset of location before start of input (effectively character - * position -1). Used to initialize capture registers to a non-position. - * - At start (if 1, we are starting at the start of the - * string, otherwise 0) - * - register 0 (Only positions must be stored in the first - * - register 1 num_saved_registers_ registers) - * - ... - * - register num_registers-1 - * --- sp --- + * - fp[48] direct_call (if 1, direct call from JavaScript code, + * if 0, call through the runtime system). + * - fp[44] stack_area_base (High end of the memory area to use as + * backtracking stack). + * - fp[40] int* capture_array (int[num_saved_registers_], for output). + * - fp[36] secondary link/return address used by native call. + * --- sp when called --- + * - fp[32] return address (lr). + * - fp[28] old frame pointer (r11). + * - fp[0..24] backup of registers r4..r10. + * --- frame pointer ---- + * - fp[-4] end of input (Address of end of string). + * - fp[-8] start of input (Address of first character in string). + * - fp[-12] start index (character index of start). + * - fp[-16] void* input_string (location of a handle containing the string). + * - fp[-20] Offset of location before start of input (effectively character + * position -1). Used to initialize capture registers to a + * non-position. + * - fp[-24] At start (if 1, we are starting at the start of the + * string, otherwise 0) + * - fp[-28] register 0 (Only positions must be stored in the first + * - register 1 num_saved_registers_ registers) + * - ... + * - register num_registers-1 + * --- sp --- * * The first num_saved_registers_ registers are initialized to point to * "character -1" in the string (i.e., char_size() bytes before the first * character of the string). The remaining registers start out as garbage. * * The data up to the return address must be placed there by the calling - * code, by calling the code entry as cast to a function with the signature: + * code and the remaining arguments are passed in registers, e.g. by calling the + * code entry as cast to a function with the signature: * int (*match)(String* input_string, * int start_index, * Address start, * Address end, + * Address secondary_return_address, // Only used by native call. * int* capture_output_array, - * bool at_start, * byte* stack_area_base, - * bool direct_call) + * bool direct_call = false) * The call is performed by NativeRegExpMacroAssembler::Execute() - * (in regexp-macro-assembler.cc). + * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro + * in arm/simulator-arm.h. + * When calling as a non-direct call (i.e., from C++ code), the return address + * area is overwritten with the LR register by the RegExp code. When doing a + * direct call from generated code, the return address is placed there by + * the calling code, as in a normal exit frame. */ #define __ ACCESS_MASM(masm_) @@ -598,16 +607,17 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { // Entry code: __ bind(&entry_label_); - // Push Link register. // Push arguments // Save callee-save registers. // Start new stack frame. + // Store link register in existing stack-cell. // Order here should correspond to order of offset constants in header file. RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit(); RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit(); __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit()); - // Set frame pointer just above the arguments. + // Set frame pointer in space for it if this is not a direct call + // from generated code. __ add(frame_pointer(), sp, Operand(4 * kPointerSize)); __ push(r0); // Make room for "position - 1" constant (value is irrelevant). __ push(r0); // Make room for "at start" constant (value is irrelevant). @@ -764,10 +774,9 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { if (stack_overflow_label_.is_linked()) { SafeCallTarget(&stack_overflow_label_); // Reached if the backtrack-stack limit has been hit. - Label grow_failed; - // Call GrowStack(backtrack_stackpointer()) + // Call GrowStack(backtrack_stackpointer(), &stack_base) static const int num_arguments = 2; __ PrepareCallCFunction(num_arguments, r0); __ mov(r0, backtrack_stackpointer()); diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h index b487ba59..d9d0b356 100644 --- a/src/arm/regexp-macro-assembler-arm.h +++ b/src/arm/regexp-macro-assembler-arm.h @@ -122,8 +122,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { static const int kStoredRegisters = kFramePointer; // Return address (stored from link register, read into pc on return). static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize; + static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; // Stack parameters placed by caller. - static const int kRegisterOutput = kReturnAddress + kPointerSize; + static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize; static const int kStackHighEnd = kRegisterOutput + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize; diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc index 8104747f..f475a18b 100644 --- a/src/arm/simulator-arm.cc +++ b/src/arm/simulator-arm.cc @@ -1005,7 +1005,9 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) { intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); return *ptr; } - PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr); + PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); UNIMPLEMENTED(); return 0; #endif @@ -1023,7 +1025,9 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) { *ptr = value; return; } - PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr); + PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); UNIMPLEMENTED(); #endif } @@ -1038,7 +1042,9 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) { uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); return *ptr; } - PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr); + PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); UNIMPLEMENTED(); return 0; #endif @@ -1072,7 +1078,9 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) { *ptr = value; return; } - PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr); + PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); UNIMPLEMENTED(); #endif } @@ -1089,7 +1097,9 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) { *ptr = value; return; } - PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr); + PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); UNIMPLEMENTED(); #endif } @@ -1531,7 +1541,11 @@ typedef double (*SimulatorRuntimeFPCall)(int32_t arg0, // This signature supports direct call in to API function native callback // (refer to InvocationCallback in v8.h). -typedef v8::Handle<v8::Value> (*SimulatorRuntimeApiCall)(int32_t arg0); +typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0); + +// This signature supports direct call to accessor getter callback. +typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, + int32_t arg1); // Software interrupt instructions are used by the simulator to call into the // C-based V8 runtime. @@ -1572,14 +1586,12 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { CHECK(stack_aligned); double result = target(arg0, arg1, arg2, arg3); SetFpResult(result); - } else if (redirection->type() == ExternalReference::DIRECT_CALL) { - SimulatorRuntimeApiCall target = - reinterpret_cast<SimulatorRuntimeApiCall>(external); + } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { + SimulatorRuntimeDirectApiCall target = + reinterpret_cast<SimulatorRuntimeDirectApiCall>(external); if (::v8::internal::FLAG_trace_sim || !stack_aligned) { - PrintF( - "Call to host function at %p args %08x", - FUNCTION_ADDR(target), - arg0); + PrintF("Call to host function at %p args %08x", + FUNCTION_ADDR(target), arg0); if (!stack_aligned) { PrintF(" with unaligned stack %08x\n", get_register(sp)); } @@ -1591,6 +1603,23 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { PrintF("Returned %p\n", reinterpret_cast<void *>(*result)); } set_register(r0, (int32_t) *result); + } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { + SimulatorRuntimeDirectGetterCall target = + reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external); + if (::v8::internal::FLAG_trace_sim || !stack_aligned) { + PrintF("Call to host function at %p args %08x %08x", + FUNCTION_ADDR(target), arg0, arg1); + if (!stack_aligned) { + PrintF(" with unaligned stack %08x\n", get_register(sp)); + } + PrintF("\n"); + } + CHECK(stack_aligned); + v8::Handle<v8::Value> result = target(arg0, arg1); + if (::v8::internal::FLAG_trace_sim) { + PrintF("Returned %p\n", reinterpret_cast<void *>(*result)); + } + set_register(r0, (int32_t) *result); } else { // builtin call. ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL); @@ -2535,6 +2564,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { double dn_value = get_double_from_d_register(vn); double dm_value = get_double_from_d_register(vm); double dd_value = dn_value / dm_value; + div_zero_vfp_flag_ = (dm_value == 0); set_d_register_from_double(vd, dd_value); } else { UNIMPLEMENTED(); // Not used by V8. @@ -2769,14 +2799,17 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer); + double abs_diff = + unsigned_integer ? fabs(val - static_cast<uint32_t>(temp)) + : fabs(val - temp); + + inexact_vfp_flag_ = (abs_diff != 0); + if (inv_op_vfp_flag_) { temp = VFPConversionSaturate(val, unsigned_integer); } else { switch (mode) { case RN: { - double abs_diff = - unsigned_integer ? fabs(val - static_cast<uint32_t>(temp)) - : fabs(val - temp); int val_sign = (val > 0) ? 1 : -1; if (abs_diff > 0.5) { temp += val_sign; diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h index 5256ae35..bdf1f8a1 100644 --- a/src/arm/simulator-arm.h +++ b/src/arm/simulator-arm.h @@ -48,10 +48,16 @@ namespace internal { #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ (entry(p0, p1, p2, p3, p4)) -// Call the generated regexp code directly. The entry function pointer should -// expect seven int/pointer sized arguments and return an int. +typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, + void*, int*, Address, int); + + +// Call the generated regexp code directly. The code at the entry address +// should act as a function matching the type arm_regexp_matcher. +// The fifth argument is a dummy that reserves the space used for +// the return address added by the ExitFrame in native calls. #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ - (entry(p0, p1, p2, p3, p4, p5, p6)) + (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ (reinterpret_cast<TryCatch*>(try_catch_address)) @@ -362,8 +368,7 @@ class Simulator { FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ - Simulator::current()->Call( \ - FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6) + Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ try_catch_address == \ diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc index 9ef61158..60a11f3c 100644 --- a/src/arm/stub-cache-arm.cc +++ b/src/arm/stub-cache-arm.cc @@ -655,12 +655,10 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, // already generated). Do not allow the assembler to perform a // garbage collection but instead return the allocation failure // object. - MaybeObject* result = masm->TryCallApiFunctionAndReturn( - &fun, argc + kFastApiCallArguments + 1); - if (result->IsFailure()) { - return result; - } - return Heap::undefined_value(); + const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; + ExternalReference ref = + ExternalReference(&fun, ExternalReference::DIRECT_API_CALL); + return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } class CallInterceptorCompiler BASE_EMBEDDED { @@ -1245,18 +1243,38 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, name, miss); - // Push the arguments on the JS stack of the caller. - __ push(receiver); // Receiver. - __ mov(scratch3, Operand(Handle<AccessorInfo>(callback))); // callback data - __ ldr(ip, FieldMemOperand(scratch3, AccessorInfo::kDataOffset)); - __ Push(reg, ip, scratch3, name_reg); + // Build AccessorInfo::args_ list on the stack and push property name below + // the exit frame to make GC aware of them and store pointers to them. + __ push(receiver); + __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_ + Handle<AccessorInfo> callback_handle(callback); + if (Heap::InNewSpace(callback_handle->data())) { + __ Move(scratch3, callback_handle); + __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset)); + } else { + __ Move(scratch3, Handle<Object>(callback_handle->data())); + } + __ Push(reg, scratch3, name_reg); + __ mov(r0, sp); // r0 = Handle<String> + + Address getter_address = v8::ToCData<Address>(callback->getter()); + ApiFunction fun(getter_address); - // Do tail-call to the runtime system. - ExternalReference load_callback_property = - ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallExternalReference(load_callback_property, 5, 1); + const int kApiStackSpace = 1; + __ EnterExitFrame(false, kApiStackSpace); + // Create AccessorInfo instance on the stack above the exit frame with + // scratch2 (internal::Object **args_) as the data. + __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); + __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& - return Heap::undefined_value(); // Success. + // Emitting a stub call may try to allocate (if the code is not + // already generated). Do not allow the assembler to perform a + // garbage collection but instead return the allocation failure + // object. + const int kStackUnwindSpace = 4; + ExternalReference ref = + ExternalReference(&fun, ExternalReference::DIRECT_GETTER_CALL); + return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } @@ -2332,8 +2350,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case STRING_CHECK: - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { // Check that the object is a two-byte string or a symbol. @@ -2348,8 +2367,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case NUMBER_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2369,8 +2389,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } case BOOLEAN_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2650,10 +2671,13 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, __ Push(r1, r2, r0); // Receiver, name, value. + __ mov(r0, Operand(Smi::FromInt(strict_mode_))); + __ push(r0); // strict mode + // Do tail-call to the runtime system. ExternalReference store_ic_property = ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); - __ TailCallExternalReference(store_ic_property, 3, 1); + __ TailCallExternalReference(store_ic_property, 4, 1); // Handle store cache miss. __ bind(&miss); @@ -3259,6 +3283,47 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized( } +MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray( + JSObject* receiver) { + // ----------- S t a t e ------------- + // -- r0 : value + // -- r1 : key + // -- r2 : receiver + // -- r3 : scratch + // -- r4 : scratch + // -- r5 : scratch + // -- r6 : scratch + // -- lr : return address + // ----------------------------------- + Label miss; + + // Check that the map matches. + __ CheckMap(r2, r6, Handle<Map>(receiver->map()), &miss, false); + + GenerateFastPixelArrayStore(masm(), + r2, + r1, + r0, + r3, + r4, + r5, + r6, + true, + true, + &miss, + &miss, + NULL, + &miss); + + __ bind(&miss); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, NULL); +} + + MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { // ----------- S t a t e ------------- // -- r0 : argc @@ -3994,7 +4059,12 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub( // Push receiver, key and value for runtime call. __ Push(r2, r1, r0); - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes + __ mov(r0, Operand(Smi::FromInt( + Code::ExtractExtraICStateFromFlags(flags) & kStrictMode))); + __ Push(r1, r0); + + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); return GetCode(flags); } diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc index b4b518cf..544e405d 100644 --- a/src/arm/virtual-frame-arm.cc +++ b/src/arm/virtual-frame-arm.cc @@ -332,9 +332,9 @@ void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) { void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual, StrictModeFlag strict_mode) { - Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode - ? Builtins::StoreIC_Initialize_Strict - : Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); PopToR0(); RelocInfo::Mode mode; if (is_contextual) { @@ -359,8 +359,10 @@ void VirtualFrame::CallKeyedLoadIC() { } -void VirtualFrame::CallKeyedStoreIC() { - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); +void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) { + Handle<Code> ic(Builtins::builtin( + (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); PopToR1R0(); SpillAll(); EmitPop(r2); diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h index b6e794a5..76470bdc 100644 --- a/src/arm/virtual-frame-arm.h +++ b/src/arm/virtual-frame-arm.h @@ -303,7 +303,7 @@ class VirtualFrame : public ZoneObject { // Call keyed store IC. Value, key and receiver are on the stack. All three // are consumed. Result is returned in r0. - void CallKeyedStoreIC(); + void CallKeyedStoreIC(StrictModeFlag strict_mode); // Call into an IC stub given the number of arguments it removes // from the stack. Register arguments to the IC stub are implicit, diff --git a/src/array.js b/src/array.js index 1298434d..6ed14760 100644 --- a/src/array.js +++ b/src/array.js @@ -33,7 +33,7 @@ // Global list of arrays visited during toString, toLocaleString and // join invocations. -var visited_arrays = new $Array(); +var visited_arrays = new InternalArray(); // Gets a sorted array of array keys. Useful for operations on sparse @@ -73,7 +73,7 @@ function SparseJoin(array, len, convert) { var last_key = -1; var keys_length = keys.length; - var elements = new $Array(keys_length); + var elements = new InternalArray(keys_length); var elements_length = 0; for (var i = 0; i < keys_length; i++) { @@ -122,7 +122,7 @@ function Join(array, length, separator, convert) { } // Construct an array for the elements. - var elements = new $Array(length); + var elements = new InternalArray(length); // We pull the empty separator check outside the loop for speed! if (separator.length == 0) { @@ -140,7 +140,7 @@ function Join(array, length, separator, convert) { return %StringBuilderConcat(elements, elements_length, ''); } // Non-empty separator case. - // If the first element is a number then use the heuristic that the + // If the first element is a number then use the heuristic that the // remaining elements are also likely to be numbers. if (!IS_NUMBER(array[0])) { for (var i = 0; i < length; i++) { @@ -148,7 +148,7 @@ function Join(array, length, separator, convert) { if (!IS_STRING(e)) e = convert(e); elements[i] = e; } - } else { + } else { for (var i = 0; i < length; i++) { var e = array[i]; if (IS_NUMBER(e)) elements[i] = %_NumberToString(e); @@ -157,19 +157,11 @@ function Join(array, length, separator, convert) { elements[i] = e; } } - } - var result = %_FastAsciiArrayJoin(elements, separator); - if (!IS_UNDEFINED(result)) return result; - - var length2 = (length << 1) - 1; - var j = length2; - var i = length; - elements[--j] = elements[--i]; - while (i > 0) { - elements[--j] = separator; - elements[--j] = elements[--i]; } - return %StringBuilderConcat(elements, length2, ''); + var result = %_FastAsciiArrayJoin(elements, separator); + if (!IS_UNDEFINED(result)) return result; + + return %StringBuilderJoin(elements, length, separator); } finally { // Make sure to remove the last element of the visited array no // matter what happens. @@ -179,7 +171,7 @@ function Join(array, length, separator, convert) { function ConvertToString(x) { - // Assumes x is a non-string. + // Assumes x is a non-string. if (IS_NUMBER(x)) return %_NumberToString(x); if (IS_BOOLEAN(x)) return x ? 'true' : 'false'; return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x)); @@ -249,7 +241,7 @@ function SmartSlice(array, start_i, del_count, len, deleted_elements) { // special array operations to handle sparse arrays in a sensible fashion. function SmartMove(array, start_i, del_count, len, num_additional_args) { // Move data to new array. - var new_array = new $Array(len - del_count + num_additional_args); + var new_array = new InternalArray(len - del_count + num_additional_args); var intervals = %GetArrayKeys(array, len); var length = intervals.length; for (var k = 0; k < length; k++) { @@ -426,9 +418,8 @@ function ArrayPush() { function ArrayConcat(arg1) { // length == 1 - // TODO: can we just use arguments? var arg_count = %_ArgumentsLength(); - var arrays = new $Array(1 + arg_count); + var arrays = new InternalArray(1 + arg_count); arrays[0] = this; for (var i = 0; i < arg_count; i++) { arrays[i + 1] = %_Arguments(i); @@ -934,7 +925,9 @@ function ArrayFilter(f, receiver) { for (var i = 0; i < length; i++) { var current = this[i]; if (!IS_UNDEFINED(current) || i in this) { - if (f.call(receiver, current, i, this)) result[result_length++] = current; + if (f.call(receiver, current, i, this)) { + result[result_length++] = current; + } } } return result; @@ -999,13 +992,15 @@ function ArrayMap(f, receiver) { // Pull out the length so that modifications to the length in the // loop will not affect the looping. var length = TO_UINT32(this.length); - var result = new $Array(length); + var result = new $Array(); + var accumulator = new InternalArray(length); for (var i = 0; i < length; i++) { var current = this[i]; if (!IS_UNDEFINED(current) || i in this) { - result[i] = f.call(receiver, current, i, this); + accumulator[i] = f.call(receiver, current, i, this); } } + %MoveArrayContents(accumulator, result); return result; } @@ -1026,13 +1021,13 @@ function ArrayIndexOf(element, index) { } var min = index; var max = length; - if (UseSparseVariant(this, length, true)) { + if (UseSparseVariant(this, length, IS_ARRAY(this))) { var intervals = %GetArrayKeys(this, length); if (intervals.length == 2 && intervals[0] < 0) { // A single interval. var intervalMin = -(intervals[0] + 1); var intervalMax = intervalMin + intervals[1]; - min = MAX(min, intervalMin); + if (min < intervalMin) min = intervalMin; max = intervalMax; // Capped by length already. // Fall through to loop below. } else { @@ -1082,13 +1077,13 @@ function ArrayLastIndexOf(element, index) { } var min = 0; var max = index; - if (UseSparseVariant(this, length, true)) { + if (UseSparseVariant(this, length, IS_ARRAY(this))) { var intervals = %GetArrayKeys(this, index + 1); if (intervals.length == 2 && intervals[0] < 0) { // A single interval. var intervalMin = -(intervals[0] + 1); var intervalMax = intervalMin + intervals[1]; - min = MAX(min, intervalMin); + if (min < intervalMin) min = intervalMin; max = intervalMax; // Capped by index already. // Fall through to loop below. } else { @@ -1234,6 +1229,20 @@ function SetupArray() { )); %FinishArrayPrototypeSetup($Array.prototype); + + // The internal Array prototype doesn't need to be fancy, since it's never + // exposed to user code, so no hidden prototypes or DONT_ENUM attributes + // are necessary. + // The null __proto__ ensures that we never inherit any user created + // getters or setters from, e.g., Object.prototype. + InternalArray.prototype.__proto__ = null; + // Adding only the functions that are actually used, and a toString. + InternalArray.prototype.join = getFunction("join", ArrayJoin); + InternalArray.prototype.pop = getFunction("pop", ArrayPop); + InternalArray.prototype.push = getFunction("push", ArrayPush); + InternalArray.prototype.toString = function() { + return "Internal Array, length " + this.length; + }; } diff --git a/src/assembler.cc b/src/assembler.cc index ef2094f6..b0b44fd9 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -68,7 +68,7 @@ const double DoubleConstant::min_int = kMinInt; const double DoubleConstant::one_half = 0.5; const double DoubleConstant::minus_zero = -0.0; const double DoubleConstant::negative_infinity = -V8_INFINITY; - +const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; // ----------------------------------------------------------------------------- // Implementation of Label @@ -228,6 +228,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { WriteTaggedPC(pc_delta, kEmbeddedObjectTag); } else if (rmode == RelocInfo::CODE_TARGET) { WriteTaggedPC(pc_delta, kCodeTargetTag); + ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize); } else if (RelocInfo::IsPosition(rmode)) { // Use signed delta-encoding for data. intptr_t data_delta = rinfo->data() - last_data_; @@ -251,6 +252,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { WriteExtraTaggedPC(pc_delta, kPCJumpTag); WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag); last_data_ = rinfo->data(); + ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize); } else { // For all other modes we simply use the mode as the extra tag. // None of these modes need a data component. @@ -814,6 +816,39 @@ static double mod_two_doubles(double x, double y) { } +static double math_sin_double(double x) { + return sin(x); +} + + +static double math_cos_double(double x) { + return cos(x); +} + + +static double math_log_double(double x) { + return log(x); +} + + +ExternalReference ExternalReference::math_sin_double_function() { + return ExternalReference(Redirect(FUNCTION_ADDR(math_sin_double), + FP_RETURN_CALL)); +} + + +ExternalReference ExternalReference::math_cos_double_function() { + return ExternalReference(Redirect(FUNCTION_ADDR(math_cos_double), + FP_RETURN_CALL)); +} + + +ExternalReference ExternalReference::math_log_double_function() { + return ExternalReference(Redirect(FUNCTION_ADDR(math_log_double), + FP_RETURN_CALL)); +} + + // Helper function to compute x^y, where y is known to be an // integer. Uses binary decomposition to limit the number of // multiplications; see the discussion in "Hacker's Delight" by Henry @@ -850,12 +885,14 @@ double power_double_double(double x, double y) { ExternalReference ExternalReference::power_double_double_function() { - return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double))); + return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double), + FP_RETURN_CALL)); } ExternalReference ExternalReference::power_double_int_function() { - return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int))); + return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int), + FP_RETURN_CALL)); } diff --git a/src/assembler.h b/src/assembler.h index b089b090..8ebbfadf 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -178,6 +178,20 @@ class RelocInfo BASE_EMBEDDED { // invalid/uninitialized position value. static const int kNoPosition = -1; + // This string is used to add padding comments to the reloc info in cases + // where we are not sure to have enough space for patching in during + // lazy deoptimization. This is the case if we have indirect calls for which + // we do not normally record relocation info. + static const char* kFillerCommentString; + + // The minimum size of a comment is equal to three bytes for the extra tagged + // pc + the tag for the data, and kPointerSize for the actual pointer to the + // comment. + static const int kMinRelocCommentSize = 3 + kPointerSize; + + // The maximum size for a call instruction including pc-jump. + static const int kMaxCallSize = 6; + enum Mode { // Please note the order is important (see IsCodeTarget, IsGCRelocMode). CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor. @@ -467,21 +481,22 @@ class Debug_Address; class ExternalReference BASE_EMBEDDED { public: // Used in the simulator to support different native api calls. - // - // BUILTIN_CALL - builtin call. - // MaybeObject* f(v8::internal::Arguments). - // - // FP_RETURN_CALL - builtin call that returns floating point. - // double f(double, double). - // - // DIRECT_CALL - direct call to API function native callback - // from generated code. - // Handle<Value> f(v8::Arguments&) - // enum Type { + // Builtin call. + // MaybeObject* f(v8::internal::Arguments). BUILTIN_CALL, // default + + // Builtin call that returns floating point. + // double f(double, double). FP_RETURN_CALL, - DIRECT_CALL + + // Direct call to API function callback. + // Handle<Value> f(v8::Arguments&) + DIRECT_API_CALL, + + // Direct call to accessor getter callback. + // Handle<value> f(Local<String> property, AccessorInfo& info) + DIRECT_GETTER_CALL }; typedef void* ExternalReferenceRedirector(void* original, Type type); @@ -576,6 +591,10 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference address_of_minus_zero(); static ExternalReference address_of_negative_infinity(); + static ExternalReference math_sin_double_function(); + static ExternalReference math_cos_double_function(); + static ExternalReference math_log_double_function(); + Address address() const {return reinterpret_cast<Address>(address_);} #ifdef ENABLE_DEBUGGER_SUPPORT diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc index 415d2dd8..8cd29b21 100644 --- a/src/bootstrapper.cc +++ b/src/bootstrapper.cc @@ -1240,6 +1240,43 @@ bool Genesis::InstallNatives() { global_context()->set_opaque_reference_function(*opaque_reference_fun); } + { // --- I n t e r n a l A r r a y --- + // An array constructor on the builtins object that works like + // the public Array constructor, except that its prototype + // doesn't inherit from Object.prototype. + // To be used only for internal work by builtins. Instances + // must not be leaked to user code. + // Only works correctly when called as a constructor. The normal + // Array code uses Array.prototype as prototype when called as + // a function. + Handle<JSFunction> array_function = + InstallFunction(builtins, + "InternalArray", + JS_ARRAY_TYPE, + JSArray::kSize, + Top::initial_object_prototype(), + Builtins::ArrayCode, + true); + Handle<JSObject> prototype = + Factory::NewJSObject(Top::object_function(), TENURED); + SetPrototype(array_function, prototype); + + array_function->shared()->set_construct_stub( + Builtins::builtin(Builtins::ArrayConstructCode)); + array_function->shared()->DontAdaptArguments(); + + // Make "length" magic on instances. + Handle<DescriptorArray> array_descriptors = + Factory::CopyAppendProxyDescriptor( + Factory::empty_descriptor_array(), + Factory::length_symbol(), + Factory::NewProxy(&Accessors::ArrayLength), + static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE)); + + array_function->initial_map()->set_instance_descriptors( + *array_descriptors); + } + if (FLAG_disable_native_files) { PrintF("Warning: Running without installed natives!\n"); return true; @@ -1358,6 +1395,7 @@ bool Genesis::InstallNatives() { global_context()->set_regexp_result_map(*initial_map); } + #ifdef DEBUG builtins->Verify(); #endif diff --git a/src/builtins.cc b/src/builtins.cc index 8fdc1b13..01e8deb4 100644 --- a/src/builtins.cc +++ b/src/builtins.cc @@ -1328,12 +1328,12 @@ static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) { static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) { - StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICNonStrict); + StoreIC::GenerateMegamorphic(masm, kNonStrictMode); } static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) { - StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICStrict); + StoreIC::GenerateMegamorphic(masm, kStrictMode); } @@ -1348,17 +1348,22 @@ static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) { static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) { - StoreIC::GenerateGlobalProxy(masm); + StoreIC::GenerateGlobalProxy(masm, kNonStrictMode); } static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) { - StoreIC::GenerateGlobalProxy(masm); + StoreIC::GenerateGlobalProxy(masm, kStrictMode); } static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) { - KeyedStoreIC::GenerateGeneric(masm); + KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode); +} + + +static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) { + KeyedStoreIC::GenerateGeneric(masm, kStrictMode); } @@ -1372,6 +1377,11 @@ static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) { } +static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) { + KeyedStoreIC::GenerateInitialize(masm); +} + + #ifdef ENABLE_DEBUGGER_SUPPORT static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) { Debug::GenerateLoadICDebugBreak(masm); diff --git a/src/builtins.h b/src/builtins.h index ada23a75..5ea46651 100644 --- a/src/builtins.h +++ b/src/builtins.h @@ -62,111 +62,116 @@ enum BuiltinExtraArguments { // Define list of builtins implemented in assembly. -#define BUILTIN_LIST_A(V) \ - V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSConstructCall, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(LazyCompile, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(LazyRecompile, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(NotifyOSR, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - \ - V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - \ - V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - \ - V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - \ - V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \ - StoreIC::kStoreICStrict) \ - V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \ - StoreIC::kStoreICStrict) \ - V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \ - StoreIC::kStoreICStrict) \ - V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \ - StoreIC::kStoreICStrict) \ - V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \ - StoreIC::kStoreICStrict) \ - \ - V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \ - Code::kNoExtraICState) \ - \ - /* Uses KeyedLoadIC_Initialize; must be after in list. */ \ - V(FunctionCall, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(FunctionApply, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - \ - V(ArrayCode, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - \ - V(StringConstructCode, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - \ - V(OnStackReplacement, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) +#define BUILTIN_LIST_A(V) \ + V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructCall, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(LazyCompile, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(LazyRecompile, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(NotifyOSR, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + \ + V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + \ + V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + \ + V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + \ + V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \ + kStrictMode) \ + V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \ + kStrictMode) \ + V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \ + kStrictMode) \ + V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \ + kStrictMode) \ + V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \ + kStrictMode) \ + \ + V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + \ + V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \ + kStrictMode) \ + V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \ + kStrictMode) \ + \ + /* Uses KeyedLoadIC_Initialize; must be after in list. */ \ + V(FunctionCall, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(FunctionApply, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + \ + V(ArrayCode, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + \ + V(StringConstructCode, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + \ + V(OnStackReplacement, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) #ifdef ENABLE_DEBUGGER_SUPPORT @@ -214,7 +219,7 @@ enum BuiltinExtraArguments { V(SHL, 1) \ V(SAR, 1) \ V(SHR, 1) \ - V(DELETE, 1) \ + V(DELETE, 2) \ V(IN, 1) \ V(INSTANCE_OF, 1) \ V(GET_KEYS, 0) \ diff --git a/src/code-stubs.h b/src/code-stubs.h index 0d0e37ff..96ac7335 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -86,9 +86,6 @@ namespace internal { CODE_STUB_LIST_ALL_PLATFORMS(V) \ CODE_STUB_LIST_ARM(V) -// Types of uncatchable exceptions. -enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION }; - // Mode to overwrite BinaryExpression values. enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT }; enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE }; diff --git a/src/compiler.cc b/src/compiler.cc index ae7b2b9f..667432f2 100755 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -221,11 +221,12 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { // or perform on-stack replacement for function with too many // stack-allocated local variables. // - // The encoding is as a signed value, with parameters using the negative - // indices and locals the non-negative ones. + // The encoding is as a signed value, with parameters and receiver using + // the negative indices and locals the non-negative ones. const int limit = LUnallocated::kMaxFixedIndices / 2; Scope* scope = info->scope(); - if (scope->num_parameters() > limit || scope->num_stack_slots() > limit) { + if ((scope->num_parameters() + 1) > limit || + scope->num_stack_slots() > limit) { AbortAndDisable(info); // True indicates the compilation pipeline is still going, not // necessarily that we optimized the code. @@ -261,10 +262,8 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { Handle<SharedFunctionInfo> shared = info->shared_info(); shared->EnableDeoptimizationSupport(*unoptimized.code()); // The existing unoptimized code was replaced with the new one. - Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, - Handle<String>(shared->DebugName()), - shared->start_position(), - &unoptimized); + Compiler::RecordFunctionCompilation( + Logger::LAZY_COMPILE_TAG, &unoptimized, shared); } } @@ -273,7 +272,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { // optimizable marker in the code object and optimize anyway. This // is safe as long as the unoptimized code has deoptimization // support. - ASSERT(FLAG_always_opt || info->shared_info()->code()->optimizable()); + ASSERT(FLAG_always_opt || code->optimizable()); ASSERT(info->shared_info()->has_deoptimization_support()); if (FLAG_trace_hydrogen) { @@ -282,21 +281,20 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { HTracer::Instance()->TraceCompilation(info->function()); } - TypeFeedbackOracle oracle( - Handle<Code>(info->shared_info()->code()), - Handle<Context>(info->closure()->context()->global_context())); - HGraphBuilder builder(&oracle); + Handle<Context> global_context(info->closure()->context()->global_context()); + TypeFeedbackOracle oracle(code, global_context); + HGraphBuilder builder(info, &oracle); HPhase phase(HPhase::kTotal); - HGraph* graph = builder.CreateGraph(info); + HGraph* graph = builder.CreateGraph(); if (Top::has_pending_exception()) { info->SetCode(Handle<Code>::null()); return false; } if (graph != NULL && FLAG_build_lithium) { - Handle<Code> code = graph->Compile(); - if (!code.is_null()) { - info->SetCode(code); + Handle<Code> optimized_code = graph->Compile(info); + if (!optimized_code.is_null()) { + info->SetCode(optimized_code); FinishOptimization(info->closure(), start); return true; } @@ -415,13 +413,25 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) { return Handle<SharedFunctionInfo>::null(); } + // Allocate function. ASSERT(!info->code().is_null()); + Handle<SharedFunctionInfo> result = + Factory::NewSharedFunctionInfo( + lit->name(), + lit->materialized_literal_count(), + info->code(), + SerializedScopeInfo::Create(info->scope())); + + ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); + Compiler::SetFunctionInfo(result, lit, true, script); + if (script->name()->IsString()) { PROFILE(CodeCreateEvent( info->is_eval() ? Logger::EVAL_TAG : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), + *result, String::cast(script->name()))); GDBJIT(AddCode(Handle<String>(String::cast(script->name())), script, @@ -432,21 +442,11 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) { ? Logger::EVAL_TAG : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), - "")); + *result, + Heap::empty_string())); GDBJIT(AddCode(Handle<String>(), script, info->code())); } - // Allocate function. - Handle<SharedFunctionInfo> result = - Factory::NewSharedFunctionInfo( - lit->name(), - lit->materialized_literal_count(), - info->code(), - SerializedScopeInfo::Create(info->scope())); - - ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); - Compiler::SetFunctionInfo(result, lit, true, script); - // Hint to the runtime system used when allocating space for initial // property space by setting the expected number of properties for // the instances of the function. @@ -613,10 +613,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) { ASSERT(!info->code().is_null()); Handle<Code> code = info->code(); Handle<JSFunction> function = info->closure(); - RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, - Handle<String>(shared->DebugName()), - shared->start_position(), - info); + RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared); if (info->IsOptimizing()) { function->ReplaceCode(*code); @@ -724,10 +721,6 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal, ASSERT(!info.code().is_null()); // Function compilation complete. - RecordFunctionCompilation(Logger::FUNCTION_TAG, - literal->debug_name(), - literal->start_position(), - &info); scope_info = SerializedScopeInfo::Create(info.scope()); } @@ -738,6 +731,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal, info.code(), scope_info); SetFunctionInfo(result, literal, false, script); + RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result); result->set_allows_lazy_compilation(allow_lazy); // Set the expected number of properties for instances and return @@ -776,28 +770,31 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info, void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag, - Handle<String> name, - int start_position, - CompilationInfo* info) { + CompilationInfo* info, + Handle<SharedFunctionInfo> shared) { + // SharedFunctionInfo is passed separately, because if CompilationInfo + // was created using Script object, it will not have it. + // Log the code generation. If source information is available include // script name and line number. Check explicitly whether logging is // enabled as finding the line number is not free. - if (Logger::is_logging() || - CpuProfiler::is_profiling()) { + if (Logger::is_logging() || CpuProfiler::is_profiling()) { Handle<Script> script = info->script(); Handle<Code> code = info->code(); + if (*code == Builtins::builtin(Builtins::LazyCompile)) return; if (script->name()->IsString()) { - int line_num = GetScriptLineNumber(script, start_position) + 1; + int line_num = GetScriptLineNumber(script, shared->start_position()) + 1; USE(line_num); PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script), *code, - *name, + *shared, String::cast(script->name()), line_num)); } else { PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script), *code, - *name)); + *shared, + shared->DebugName())); } } diff --git a/src/compiler.h b/src/compiler.h index 239bea35..e0a437ac 100644 --- a/src/compiler.h +++ b/src/compiler.h @@ -265,9 +265,8 @@ class Compiler : public AllStatic { #endif static void RecordFunctionCompilation(Logger::LogEventsAndTags tag, - Handle<String> name, - int start_position, - CompilationInfo* info); + CompilationInfo* info, + Handle<SharedFunctionInfo> shared); }; diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h index 5df5893f..440dedca 100644 --- a/src/cpu-profiler-inl.h +++ b/src/cpu-profiler-inl.h @@ -41,6 +41,9 @@ namespace internal { void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) { code_map->AddCode(start, entry, size); + if (sfi_address != NULL) { + entry->set_shared_id(code_map->GetSFITag(sfi_address)); + } } @@ -54,8 +57,8 @@ void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) { } -void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) { - code_map->AddAlias(start, entry, code_start); +void SFIMoveEventRecord::UpdateCodeMap(CodeMap* code_map) { + code_map->MoveCode(from, to); } diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc index fcf539f3..ad04a003 100644 --- a/src/cpu-profiler.cc +++ b/src/cpu-profiler.cc @@ -53,13 +53,7 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) ticks_buffer_(sizeof(TickSampleEventRecord), kTickSamplesBufferChunkSize, kTickSamplesBufferChunksCount), - enqueue_order_(0), - known_functions_(new HashMap(AddressesMatch)) { -} - - -ProfilerEventsProcessor::~ProfilerEventsProcessor() { - delete known_functions_; + enqueue_order_(0) { } @@ -75,6 +69,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, rec->start = start; rec->entry = generator_->NewCodeEntry(tag, prefix, name); rec->size = 1; + rec->sfi_address = NULL; events_buffer_.Enqueue(evt_rec); } @@ -84,7 +79,8 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, String* resource_name, int line_number, Address start, - unsigned size) { + unsigned size, + Address sfi_address) { if (FilterOutCodeCreateEvent(tag)) return; CodeEventsContainer evt_rec; CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; @@ -93,6 +89,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->start = start; rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number); rec->size = size; + rec->sfi_address = sfi_address; events_buffer_.Enqueue(evt_rec); } @@ -109,6 +106,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->start = start; rec->entry = generator_->NewCodeEntry(tag, name); rec->size = size; + rec->sfi_address = NULL; events_buffer_.Enqueue(evt_rec); } @@ -125,6 +123,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->start = start; rec->entry = generator_->NewCodeEntry(tag, args_count); rec->size = size; + rec->sfi_address = NULL; events_buffer_.Enqueue(evt_rec); } @@ -150,57 +149,14 @@ void ProfilerEventsProcessor::CodeDeleteEvent(Address from) { } -void ProfilerEventsProcessor::FunctionCreateEvent(Address alias, - Address start, - int security_token_id) { +void ProfilerEventsProcessor::SFIMoveEvent(Address from, Address to) { CodeEventsContainer evt_rec; - CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_; - rec->type = CodeEventRecord::CODE_ALIAS; + SFIMoveEventRecord* rec = &evt_rec.SFIMoveEventRecord_; + rec->type = CodeEventRecord::SFI_MOVE; rec->order = ++enqueue_order_; - rec->start = alias; - rec->entry = generator_->NewCodeEntry(security_token_id); - rec->code_start = start; + rec->from = from; + rec->to = to; events_buffer_.Enqueue(evt_rec); - - known_functions_->Lookup(alias, AddressHash(alias), true); -} - - -void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) { - CodeMoveEvent(from, to); - - if (IsKnownFunction(from)) { - known_functions_->Remove(from, AddressHash(from)); - known_functions_->Lookup(to, AddressHash(to), true); - } -} - - -void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) { - CodeDeleteEvent(from); - - known_functions_->Remove(from, AddressHash(from)); -} - - -bool ProfilerEventsProcessor::IsKnownFunction(Address start) { - HashMap::Entry* entry = - known_functions_->Lookup(start, AddressHash(start), false); - return entry != NULL; -} - - -void ProfilerEventsProcessor::ProcessMovedFunctions() { - for (int i = 0; i < moved_functions_.length(); ++i) { - JSFunction* function = moved_functions_[i]; - CpuProfiler::FunctionCreateEvent(function); - } - moved_functions_.Clear(); -} - - -void ProfilerEventsProcessor::RememberMovedFunction(JSFunction* function) { - moved_functions_.Add(function); } @@ -227,13 +183,12 @@ void ProfilerEventsProcessor::AddCurrentStack() { TickSample* sample = &record.sample; sample->state = Top::current_vm_state(); sample->pc = reinterpret_cast<Address>(sample); // Not NULL. + sample->tos = NULL; sample->frames_count = 0; for (StackTraceFrameIterator it; !it.done() && sample->frames_count < TickSample::kMaxFramesCount; it.Advance()) { - JavaScriptFrame* frame = it.frame(); - sample->stack[sample->frames_count++] = - reinterpret_cast<Address>(frame->function()); + sample->stack[sample->frames_count++] = it.frame()->pc(); } record.order = enqueue_order_; ticks_from_vm_buffer_.Enqueue(record); @@ -393,20 +348,38 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Heap::empty_string(), v8::CpuProfileNode::kNoLineNumberInfo, code->address(), - code->ExecutableSize()); + code->ExecutableSize(), + NULL); } void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, String* name, - String* source, int line) { + Code* code, + SharedFunctionInfo* shared, + String* name) { singleton_->processor_->CodeCreateEvent( tag, name, + Heap::empty_string(), + v8::CpuProfileNode::kNoLineNumberInfo, + code->address(), + code->ExecutableSize(), + shared->address()); +} + + +void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, + SharedFunctionInfo* shared, + String* source, int line) { + singleton_->processor_->CodeCreateEvent( + tag, + shared->DebugName(), source, line, code->address(), - code->ExecutableSize()); + code->ExecutableSize(), + shared->address()); } @@ -430,44 +403,8 @@ void CpuProfiler::CodeDeleteEvent(Address from) { } -void CpuProfiler::FunctionCreateEvent(JSFunction* function) { - int security_token_id = TokenEnumerator::kNoSecurityToken; - if (function->unchecked_context()->IsContext()) { - security_token_id = singleton_->token_enumerator_->GetTokenId( - function->context()->global_context()->security_token()); - } - singleton_->processor_->FunctionCreateEvent( - function->address(), - function->shared()->code()->address(), - security_token_id); -} - - -void CpuProfiler::ProcessMovedFunctions() { - singleton_->processor_->ProcessMovedFunctions(); -} - - -void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function) { - // This function is called from GC iterators (during Scavenge, - // MC, and MS), so marking bits can be set on objects. That's - // why unchecked accessors are used here. - - // The same function can be reported several times. - if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile) - || singleton_->processor_->IsKnownFunction(function->address())) return; - - singleton_->processor_->RememberMovedFunction(function); -} - - -void CpuProfiler::FunctionMoveEvent(Address from, Address to) { - singleton_->processor_->FunctionMoveEvent(from, to); -} - - -void CpuProfiler::FunctionDeleteEvent(Address from) { - singleton_->processor_->FunctionDeleteEvent(from); +void CpuProfiler::SFIMoveEvent(Address from, Address to) { + singleton_->processor_->SFIMoveEvent(from, to); } @@ -539,7 +476,6 @@ void CpuProfiler::StartProcessorIfNotStarted() { FLAG_log_code = saved_log_code_flag; } Logger::LogCompiledFunctions(); - Logger::LogFunctionObjects(); Logger::LogAccessorCallbacks(); } // Enable stack sampling. diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h index 10165f67..1ebbfebf 100644 --- a/src/cpu-profiler.h +++ b/src/cpu-profiler.h @@ -50,7 +50,7 @@ class TokenEnumerator; V(CODE_CREATION, CodeCreateEventRecord) \ V(CODE_MOVE, CodeMoveEventRecord) \ V(CODE_DELETE, CodeDeleteEventRecord) \ - V(CODE_ALIAS, CodeAliasEventRecord) + V(SFI_MOVE, SFIMoveEventRecord) class CodeEventRecord { @@ -73,6 +73,7 @@ class CodeCreateEventRecord : public CodeEventRecord { Address start; CodeEntry* entry; unsigned size; + Address sfi_address; INLINE(void UpdateCodeMap(CodeMap* code_map)); }; @@ -95,11 +96,10 @@ class CodeDeleteEventRecord : public CodeEventRecord { }; -class CodeAliasEventRecord : public CodeEventRecord { +class SFIMoveEventRecord : public CodeEventRecord { public: - Address start; - CodeEntry* entry; - Address code_start; + Address from; + Address to; INLINE(void UpdateCodeMap(CodeMap* code_map)); }; @@ -134,7 +134,7 @@ class TickSampleEventRecord BASE_EMBEDDED { class ProfilerEventsProcessor : public Thread { public: explicit ProfilerEventsProcessor(ProfileGenerator* generator); - virtual ~ProfilerEventsProcessor(); + virtual ~ProfilerEventsProcessor() {} // Thread control. virtual void Run(); @@ -148,7 +148,8 @@ class ProfilerEventsProcessor : public Thread { void CodeCreateEvent(Logger::LogEventsAndTags tag, String* name, String* resource_name, int line_number, - Address start, unsigned size); + Address start, unsigned size, + Address sfi_address); void CodeCreateEvent(Logger::LogEventsAndTags tag, const char* name, Address start, unsigned size); @@ -157,17 +158,12 @@ class ProfilerEventsProcessor : public Thread { Address start, unsigned size); void CodeMoveEvent(Address from, Address to); void CodeDeleteEvent(Address from); - void FunctionCreateEvent(Address alias, Address start, int security_token_id); - void FunctionMoveEvent(Address from, Address to); - void FunctionDeleteEvent(Address from); + void SFIMoveEvent(Address from, Address to); void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag, const char* prefix, String* name, Address start, unsigned size); // Puts current stack into tick sample events buffer. void AddCurrentStack(); - bool IsKnownFunction(Address start); - void ProcessMovedFunctions(); - void RememberMovedFunction(JSFunction* function); // Tick sample events are filled directly in the buffer of the circular // queue (because the structure is of fixed width, but usually not all @@ -188,13 +184,6 @@ class ProfilerEventsProcessor : public Thread { bool ProcessTicks(unsigned dequeue_order); INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag)); - INLINE(static bool AddressesMatch(void* key1, void* key2)) { - return key1 == key2; - } - INLINE(static uint32_t AddressHash(Address addr)) { - return ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr))); - } ProfileGenerator* generator_; bool running_; @@ -202,10 +191,6 @@ class ProfilerEventsProcessor : public Thread { SamplingCircularQueue ticks_buffer_; UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_; unsigned enqueue_order_; - - // Used from the VM thread. - HashMap* known_functions_; - List<JSFunction*> moved_functions_; }; } } // namespace v8::internal @@ -251,23 +236,22 @@ class CpuProfiler { static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, String* name); static void CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, String* name, + Code* code, + SharedFunctionInfo *shared, + String* name); + static void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, + SharedFunctionInfo *shared, String* source, int line); static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, int args_count); static void CodeMovingGCEvent() {} static void CodeMoveEvent(Address from, Address to); static void CodeDeleteEvent(Address from); - static void FunctionCreateEvent(JSFunction* function); - // Reports function creation in case we had missed it (e.g. - // if it was created from compiled code). - static void FunctionCreateEventFromMove(JSFunction* function); - static void FunctionMoveEvent(Address from, Address to); - static void FunctionDeleteEvent(Address from); static void GetterCallbackEvent(String* name, Address entry_point); static void RegExpCodeCreateEvent(Code* code, String* source); - static void ProcessMovedFunctions(); static void SetterCallbackEvent(String* name, Address entry_point); + static void SFIMoveEvent(Address from, Address to); static INLINE(bool is_profiling()) { return NoBarrier_Load(&is_profiling_); @@ -127,11 +127,13 @@ bool Shell::ExecuteString(Handle<String> source, } else { Handle<Value> result = script->Run(); if (result.IsEmpty()) { + ASSERT(try_catch.HasCaught()); // Print errors that happened during execution. if (report_exceptions && !i::FLAG_debugger) ReportException(&try_catch); return false; } else { + ASSERT(!try_catch.HasCaught()); if (print_result && !result->IsUndefined()) { // If all went well and the result wasn't undefined then print // the returned value. @@ -403,7 +405,7 @@ void Shell::AddHistogramSample(void* histogram, int sample) { void Shell::Initialize() { Shell::counter_map_ = new CounterMap(); // Set up counters - if (i::FLAG_map_counters != NULL) + if (i::StrLength(i::FLAG_map_counters) != 0) MapCounters(i::FLAG_map_counters); if (i::FLAG_dump_counters) { V8::SetCounterFunction(LookupCounter); @@ -423,6 +425,12 @@ void Shell::Initialize() { global_template->Set(String::New("quit"), FunctionTemplate::New(Quit)); global_template->Set(String::New("version"), FunctionTemplate::New(Version)); +#ifdef LIVE_OBJECT_LIST + global_template->Set(String::New("lol_is_enabled"), Boolean::New(true)); +#else + global_template->Set(String::New("lol_is_enabled"), Boolean::New(false)); +#endif + Handle<ObjectTemplate> os_templ = ObjectTemplate::New(); AddOSMethods(os_templ); global_template->Set(String::New("os"), os_templ); @@ -117,6 +117,10 @@ Debug.State = { var trace_compile = false; // Tracing all compile events? var trace_debug_json = false; // Tracing all debug json packets? var last_cmd_line = ''; +//var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined. +var lol_next_dump_index = 0; +const kDefaultLolLinesToPrintAtATime = 10; +const kMaxLolLinesToPrintAtATime = 1000; var repeat_cmd_line = ''; var is_running = true; @@ -495,6 +499,13 @@ function DebugRequest(cmd_line) { this.request_ = void 0; break; + case 'liveobjectlist': + case 'lol': + if (lol_is_enabled) { + this.request_ = this.lolToJSONRequest_(args, is_repeating); + break; + } + default: throw new Error('Unknown command "' + cmd + '"'); } @@ -539,10 +550,54 @@ DebugRequest.prototype.createRequest = function(command) { }; +// Note: we use detected command repetition as a signal for continuation here. +DebugRequest.prototype.createLOLRequest = function(command, + start_index, + lines_to_dump, + is_continuation) { + if (is_continuation) { + start_index = lol_next_dump_index; + } + + if (lines_to_dump) { + lines_to_dump = parseInt(lines_to_dump); + } else { + lines_to_dump = kDefaultLolLinesToPrintAtATime; + } + if (lines_to_dump > kMaxLolLinesToPrintAtATime) { + lines_to_dump = kMaxLolLinesToPrintAtATime; + } + + // Save the next start_index to dump from: + lol_next_dump_index = start_index + lines_to_dump; + + var request = this.createRequest(command); + request.arguments = {}; + request.arguments.start = start_index; + request.arguments.count = lines_to_dump; + + return request; +}; + + // Create a JSON request for the evaluation command. DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) { // Global varaible used to store whether a handle was requested. lookup_handle = null; + + if (lol_is_enabled) { + // Check if the expression is a obj id in the form @<obj id>. + var obj_id_match = expression.match(/^@([0-9]+)$/); + if (obj_id_match) { + var obj_id = parseInt(obj_id_match[1]); + // Build a dump request. + var request = this.createRequest('getobj'); + request.arguments = {}; + request.arguments.obj_id = obj_id; + return request.toJSONProtocol(); + } + } + // Check if the expression is a handle id in the form #<handle>#. var handle_match = expression.match(/^#([0-9]*)#$/); if (handle_match) { @@ -1103,6 +1158,10 @@ DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) { // Build a evaluate request from the text command. request = this.createRequest('frame'); last_cmd = 'info args'; + } else if (lol_is_enabled && + args && (args == 'liveobjectlist' || args == 'lol')) { + // Build a evaluate request from the text command. + return this.liveObjectListToJSONRequest_(null); } else { throw new Error('Invalid info arguments.'); } @@ -1153,6 +1212,262 @@ DebugRequest.prototype.gcToJSONRequest_ = function(args) { }; +// Args: [v[erbose]] [<N>] [i[ndex] <i>] [t[ype] <type>] [sp[ace] <space>] +DebugRequest.prototype.lolMakeListRequest = + function(cmd, args, first_arg_index, is_repeating) { + + var request; + var start_index = 0; + var dump_limit = void 0; + var type_filter = void 0; + var space_filter = void 0; + var prop_filter = void 0; + var is_verbose = false; + var i; + + for (i = first_arg_index; i < args.length; i++) { + var arg = args[i]; + // Check for [v[erbose]]: + if (arg === 'verbose' || arg === 'v') { + // Nothing to do. This is already implied by args.length > 3. + is_verbose = true; + + // Check for [<N>]: + } else if (arg.match(/^[0-9]+$/)) { + dump_limit = arg; + is_verbose = true; + + // Check for i[ndex] <i>: + } else if (arg === 'index' || arg === 'i') { + i++; + if (args.length < i) { + throw new Error('Missing index after ' + arg + '.'); + } + start_index = parseInt(args[i]); + // The user input start index starts at 1: + if (start_index <= 0) { + throw new Error('Invalid index ' + args[i] + '.'); + } + start_index -= 1; + is_verbose = true; + + // Check for t[ype] <type>: + } else if (arg === 'type' || arg === 't') { + i++; + if (args.length < i) { + throw new Error('Missing type after ' + arg + '.'); + } + type_filter = args[i]; + + // Check for space <heap space name>: + } else if (arg === 'space' || arg === 'sp') { + i++; + if (args.length < i) { + throw new Error('Missing space name after ' + arg + '.'); + } + space_filter = args[i]; + + // Check for property <prop name>: + } else if (arg === 'property' || arg === 'prop') { + i++; + if (args.length < i) { + throw new Error('Missing property name after ' + arg + '.'); + } + prop_filter = args[i]; + + } else { + throw new Error('Unknown args at ' + arg + '.'); + } + } + + // Build the verbose request: + if (is_verbose) { + request = this.createLOLRequest('lol-'+cmd, + start_index, + dump_limit, + is_repeating); + request.arguments.verbose = true; + } else { + request = this.createRequest('lol-'+cmd); + request.arguments = {}; + } + + request.arguments.filter = {}; + if (type_filter) { + request.arguments.filter.type = type_filter; + } + if (space_filter) { + request.arguments.filter.space = space_filter; + } + if (prop_filter) { + request.arguments.filter.prop = prop_filter; + } + + return request; +} + + +function extractObjId(args) { + var id = args; + id = id.match(/^@([0-9]+)$/); + if (id) { + id = id[1]; + } else { + throw new Error('Invalid obj id ' + args + '.'); + } + return parseInt(id); +} + + +DebugRequest.prototype.lolToJSONRequest_ = function(args, is_repeating) { + var request; + // Use default command if one is not specified: + if (!args) { + args = 'info'; + } + + var orig_args = args; + var first_arg_index; + + var arg, i; + var args = args.split(/\s+/g); + var cmd = args[0]; + var id; + + // Command: <id> [v[erbose]] ... + if (cmd.match(/^[0-9]+$/)) { + // Convert to the padded list command: + // Command: l[ist] <dummy> <id> [v[erbose]] ... + + // Insert the implicit 'list' in front and process as normal: + cmd = 'list'; + args.unshift(cmd); + } + + switch(cmd) { + // Command: c[apture] + case 'capture': + case 'c': + request = this.createRequest('lol-capture'); + break; + + // Command: clear|d[elete] <id>|all + case 'clear': + case 'delete': + case 'del': { + if (args.length < 2) { + throw new Error('Missing argument after ' + cmd + '.'); + } else if (args.length > 2) { + throw new Error('Too many arguments after ' + cmd + '.'); + } + id = args[1]; + if (id.match(/^[0-9]+$/)) { + // Delete a specific lol record: + request = this.createRequest('lol-delete'); + request.arguments = {}; + request.arguments.id = parseInt(id); + } else if (id === 'all') { + // Delete all: + request = this.createRequest('lol-reset'); + } else { + throw new Error('Invalid argument after ' + cmd + '.'); + } + break; + } + + // Command: diff <id1> <id2> [<dump options>] + case 'diff': + first_arg_index = 3; + + // Command: list <dummy> <id> [<dump options>] + case 'list': + + // Command: ret[ainers] <obj id> [<dump options>] + case 'retainers': + case 'ret': + case 'retaining-paths': + case 'rp': { + if (cmd === 'ret') cmd = 'retainers'; + else if (cmd === 'rp') cmd = 'retaining-paths'; + + if (!first_arg_index) first_arg_index = 2; + + if (args.length < first_arg_index) { + throw new Error('Too few arguments after ' + cmd + '.'); + } + + var request_cmd = (cmd === 'list') ? 'diff':cmd; + request = this.lolMakeListRequest(request_cmd, + args, + first_arg_index, + is_repeating); + + if (cmd === 'diff') { + request.arguments.id1 = parseInt(args[1]); + request.arguments.id2 = parseInt(args[2]); + } else if (cmd == 'list') { + request.arguments.id1 = 0; + request.arguments.id2 = parseInt(args[1]); + } else { + request.arguments.id = extractObjId(args[1]); + } + break; + } + + // Command: getid + case 'getid': { + request = this.createRequest('lol-getid'); + request.arguments = {}; + request.arguments.address = args[1]; + break; + } + + // Command: inf[o] [<N>] + case 'info': + case 'inf': { + if (args.length > 2) { + throw new Error('Too many arguments after ' + cmd + '.'); + } + // Built the info request: + request = this.createLOLRequest('lol-info', 0, args[1], is_repeating); + break; + } + + // Command: path <obj id 1> <obj id 2> + case 'path': { + request = this.createRequest('lol-path'); + request.arguments = {}; + if (args.length > 2) { + request.arguments.id1 = extractObjId(args[1]); + request.arguments.id2 = extractObjId(args[2]); + } else { + request.arguments.id1 = 0; + request.arguments.id2 = extractObjId(args[1]); + } + break; + } + + // Command: print + case 'print': { + request = this.createRequest('lol-print'); + request.arguments = {}; + request.arguments.id = extractObjId(args[1]); + break; + } + + // Command: reset + case 'reset': { + request = this.createRequest('lol-reset'); + break; + } + + default: + throw new Error('Invalid arguments.'); + } + return request.toJSONProtocol(); +}; + + // Create a JSON request for the threads command. DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) { // Build a threads request from the text command. @@ -1239,6 +1554,49 @@ DebugRequest.prototype.helpCommand_ = function(args) { print(''); print('gc - runs the garbage collector'); print(''); + + if (lol_is_enabled) { + print('liveobjectlist|lol <command> - live object list tracking.'); + print(' where <command> can be:'); + print(' c[apture] - captures a LOL list.'); + print(' clear|del[ete] <id>|all - clears LOL of id <id>.'); + print(' If \'all\' is unspecified instead, will clear all.'); + print(' diff <id1> <id2> [<dump options>]'); + print(' - prints the diff between LOLs id1 and id2.'); + print(' - also see <dump options> below.'); + print(' getid <address> - gets the obj id for the specified address if available.'); + print(' The address must be in hex form prefixed with 0x.'); + print(' inf[o] [<N>] - lists summary info of all LOL lists.'); + print(' If N is specified, will print N items at a time.'); + print(' [l[ist]] <id> [<dump options>]'); + print(' - prints the listing of objects in LOL id.'); + print(' - also see <dump options> below.'); + print(' reset - clears all LOL lists.'); + print(' ret[ainers] <id> [<dump options>]'); + print(' - prints the list of retainers of obj id.'); + print(' - also see <dump options> below.'); + print(' path <id1> <id2> - prints the retaining path from obj id1 to id2.'); + print(' If only one id is specified, will print the path from'); + print(' roots to the specified object if available.'); + print(' print <id> - prints the obj for the specified obj id if available.'); + print(''); + print(' <dump options> includes:'); + print(' [v[erbose]] - do verbose dump.'); + print(' [<N>] - dump N items at a time. Implies verbose dump.'); + print(' If unspecified, N will default to '+ + kDefaultLolLinesToPrintAtATime+'. Max N is '+ + kMaxLolLinesToPrintAtATime+'.'); + print(' [i[ndex] <i>] - start dump from index i. Implies verbose dump.'); + print(' [t[ype] <type>] - filter by type.'); + print(' [sp[ace] <space name>] - filter by heap space where <space name> is one of'); + print(' { cell, code, lo, map, new, old-data, old-pointer }.'); + print(''); + print(' If the verbose option, or an option that implies a verbose dump'); + print(' is specified, then a verbose dump will requested. Else, a summary dump'); + print(' will be requested.'); + print(''); + } + print('trace compile'); // hidden command: trace debug json - toggles tracing of debug json packets print(''); @@ -1339,6 +1697,237 @@ function refObjectToString_(protocolPackage, handle) { } +function decodeLolCaptureResponse(body) { + var result; + result = 'Captured live object list '+ body.id + + ': count '+ body.count + ' size ' + body.size; + return result; +} + + +function decodeLolDeleteResponse(body) { + var result; + result = 'Deleted live object list '+ body.id; + return result; +} + + +function digitsIn(value) { + var digits = 0; + if (value === 0) value = 1; + while (value >= 1) { + digits++; + value /= 10; + } + return digits; +} + + +function padding(value, max_digits) { + var padding_digits = max_digits - digitsIn(value); + var padding = ''; + while (padding_digits > 0) { + padding += ' '; + padding_digits--; + } + return padding; +} + + +function decodeLolInfoResponse(body) { + var result; + var lists = body.lists; + var length = lists.length; + var first_index = body.first_index + 1; + var has_more = ((first_index + length) <= body.count); + result = 'captured live object lists'; + if (has_more || (first_index != 1)) { + result += ' ['+ length +' of '+ body.count + + ': starting from '+ first_index +']'; + } + result += ':\n'; + var max_digits = digitsIn(body.count); + var last_count = 0; + var last_size = 0; + for (var i = 0; i < length; i++) { + var entry = lists[i]; + var count = entry.count; + var size = entry.size; + var index = first_index + i; + result += ' [' + padding(index, max_digits) + index + '] id '+ entry.id + + ': count '+ count; + if (last_count > 0) { + result += '(+' + (count - last_count) + ')'; + } + result += ' size '+ size; + if (last_size > 0) { + result += '(+' + (size - last_size) + ')'; + } + result += '\n'; + last_count = count; + last_size = size; + } + result += ' total: '+length+' lists\n'; + if (has_more) { + result += ' -- press <enter> for more --\n'; + } else { + repeat_cmd_line = ''; + } + if (length === 0) result += ' none\n'; + + return result; +} + + +function decodeLolListResponse(body, title) { + + var result; + var total_count = body.count; + var total_size = body.size; + var length; + var max_digits; + var i; + var entry; + var index; + + var max_count_digits = digitsIn(total_count); + var max_size_digits; + + var summary = body.summary; + if (summary) { + + var roots_count = 0; + var found_root = body.found_root || 0; + var found_weak_root = body.found_weak_root || 0; + + // Print the summary result: + result = 'summary of objects:\n'; + length = summary.length; + if (found_root !== 0) { + roots_count++; + } + if (found_weak_root !== 0) { + roots_count++; + } + max_digits = digitsIn(length + roots_count); + max_size_digits = digitsIn(total_size); + + index = 1; + if (found_root !== 0) { + result += ' [' + padding(index, max_digits) + index + '] ' + + ' count '+ 1 + padding(0, max_count_digits) + + ' '+ padding(0, max_size_digits+1) + + ' : <root>\n'; + index++; + } + if (found_weak_root !== 0) { + result += ' [' + padding(index, max_digits) + index + '] ' + + ' count '+ 1 + padding(0, max_count_digits) + + ' '+ padding(0, max_size_digits+1) + + ' : <weak root>\n'; + index++; + } + + for (i = 0; i < length; i++) { + entry = summary[i]; + var count = entry.count; + var size = entry.size; + result += ' [' + padding(index, max_digits) + index + '] ' + + ' count '+ count + padding(count, max_count_digits) + + ' size '+ size + padding(size, max_size_digits) + + ' : <' + entry.desc + '>\n'; + index++; + } + result += '\n total count: '+(total_count+roots_count)+'\n'; + if (body.size) { + result += ' total size: '+body.size+'\n'; + } + + } else { + // Print the full dump result: + var first_index = body.first_index + 1; + var elements = body.elements; + length = elements.length; + var has_more = ((first_index + length) <= total_count); + result = title; + if (has_more || (first_index != 1)) { + result += ' ['+ length +' of '+ total_count + + ': starting from '+ first_index +']'; + } + result += ':\n'; + if (length === 0) result += ' none\n'; + max_digits = digitsIn(length); + + var max_id = 0; + var max_size = 0; + for (i = 0; i < length; i++) { + entry = elements[i]; + if (entry.id > max_id) max_id = entry.id; + if (entry.size > max_size) max_size = entry.size; + } + var max_id_digits = digitsIn(max_id); + max_size_digits = digitsIn(max_size); + + for (i = 0; i < length; i++) { + entry = elements[i]; + index = first_index + i; + result += ' ['+ padding(index, max_digits) + index +']'; + if (entry.id !== 0) { + result += ' @' + entry.id + padding(entry.id, max_id_digits) + + ': size ' + entry.size + ', ' + + padding(entry.size, max_size_digits) + entry.desc + '\n'; + } else { + // Must be a root or weak root: + result += ' ' + entry.desc + '\n'; + } + } + if (has_more) { + result += ' -- press <enter> for more --\n'; + } else { + repeat_cmd_line = ''; + } + if (length === 0) result += ' none\n'; + } + + return result; +} + + +function decodeLolDiffResponse(body) { + var title = 'objects'; + return decodeLolListResponse(body, title); +} + + +function decodeLolRetainersResponse(body) { + var title = 'retainers for @' + body.id; + return decodeLolListResponse(body, title); +} + + +function decodeLolPathResponse(body) { + return body.path; +} + + +function decodeLolResetResponse(body) { + return 'Reset all live object lists.'; +} + + +function decodeLolGetIdResponse(body) { + if (body.id == 0) { + return 'Address is invalid, or object has been moved or collected'; + } + return 'obj id is @' + body.id; +} + + +function decodeLolPrintResponse(body) { + return body.dump; +} + + // Rounds number 'num' to 'length' decimal places. function roundNumber(num, length) { var factor = Math.pow(10, length); @@ -1510,6 +2099,7 @@ function DebugResponseDetails(response) { case 'evaluate': case 'lookup': + case 'getobj': if (last_cmd == 'p' || last_cmd == 'print') { result = body.text; } else { @@ -1671,6 +2261,34 @@ function DebugResponseDetails(response) { } break; + case 'lol-capture': + details.text = decodeLolCaptureResponse(body); + break; + case 'lol-delete': + details.text = decodeLolDeleteResponse(body); + break; + case 'lol-diff': + details.text = decodeLolDiffResponse(body); + break; + case 'lol-getid': + details.text = decodeLolGetIdResponse(body); + break; + case 'lol-info': + details.text = decodeLolInfoResponse(body); + break; + case 'lol-print': + details.text = decodeLolPrintResponse(body); + break; + case 'lol-reset': + details.text = decodeLolResetResponse(body); + break; + case 'lol-retainers': + details.text = decodeLolRetainersResponse(body); + break; + case 'lol-path': + details.text = decodeLolPathResponse(body); + break; + default: details.text = 'Response for unknown command \'' + response.command() + '\'' + diff --git a/src/date.js b/src/date.js index 1fb48979..242ab7bb 100644 --- a/src/date.js +++ b/src/date.js @@ -81,12 +81,7 @@ function TimeFromYear(year) { function InLeapYear(time) { - return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0; -} - - -function DayWithinYear(time) { - return DAY(time) - DayFromYear(YearFromTime(time)); + return DaysInYear(YearFromTime(time)) - 365; // Returns 1 or 0. } diff --git a/src/debug-debugger.js b/src/debug-debugger.js index 1adf73ac..bc0f966f 100644 --- a/src/debug-debugger.js +++ b/src/debug-debugger.js @@ -109,6 +109,7 @@ var debugger_flags = { } }, }; +var lol_is_enabled = %HasLOLEnabled(); // Create a new break point object and add it to the list of break points. @@ -1391,6 +1392,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) this.scopeRequest_(request, response); } else if (request.command == 'evaluate') { this.evaluateRequest_(request, response); + } else if (lol_is_enabled && request.command == 'getobj') { + this.getobjRequest_(request, response); } else if (request.command == 'lookup') { this.lookupRequest_(request, response); } else if (request.command == 'references') { @@ -1418,6 +1421,28 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) } else if (request.command == 'gc') { this.gcRequest_(request, response); + // LiveObjectList tools: + } else if (lol_is_enabled && request.command == 'lol-capture') { + this.lolCaptureRequest_(request, response); + } else if (lol_is_enabled && request.command == 'lol-delete') { + this.lolDeleteRequest_(request, response); + } else if (lol_is_enabled && request.command == 'lol-diff') { + this.lolDiffRequest_(request, response); + } else if (lol_is_enabled && request.command == 'lol-getid') { + this.lolGetIdRequest_(request, response); + } else if (lol_is_enabled && request.command == 'lol-info') { + this.lolInfoRequest_(request, response); + } else if (lol_is_enabled && request.command == 'lol-reset') { + this.lolResetRequest_(request, response); + } else if (lol_is_enabled && request.command == 'lol-retainers') { + this.lolRetainersRequest_(request, response); + } else if (lol_is_enabled && request.command == 'lol-path') { + this.lolPathRequest_(request, response); + } else if (lol_is_enabled && request.command == 'lol-print') { + this.lolPrintRequest_(request, response); + } else if (lol_is_enabled && request.command == 'lol-stats') { + this.lolStatsRequest_(request, response); + } else { throw new Error('Unknown command "' + request.command + '" in request'); } @@ -2011,6 +2036,24 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) { }; +DebugCommandProcessor.prototype.getobjRequest_ = function(request, response) { + if (!request.arguments) { + return response.failed('Missing arguments'); + } + + // Pull out arguments. + var obj_id = request.arguments.obj_id; + + // Check for legal arguments. + if (IS_UNDEFINED(obj_id)) { + return response.failed('Argument "obj_id" missing'); + } + + // Dump the object. + response.body = MakeMirror(%GetLOLObj(obj_id)); +}; + + DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) { if (!request.arguments) { return response.failed('Missing arguments'); @@ -2341,6 +2384,84 @@ DebugCommandProcessor.prototype.gcRequest_ = function(request, response) { }; +DebugCommandProcessor.prototype.lolCaptureRequest_ = + function(request, response) { + response.body = %CaptureLOL(); +}; + + +DebugCommandProcessor.prototype.lolDeleteRequest_ = + function(request, response) { + var id = request.arguments.id; + var result = %DeleteLOL(id); + if (result) { + response.body = { id: id }; + } else { + response.failed('Failed to delete: live object list ' + id + ' not found.'); + } +}; + + +DebugCommandProcessor.prototype.lolDiffRequest_ = function(request, response) { + var id1 = request.arguments.id1; + var id2 = request.arguments.id2; + var verbose = request.arguments.verbose; + var filter = request.arguments.filter; + if (verbose === true) { + var start = request.arguments.start; + var count = request.arguments.count; + response.body = %DumpLOL(id1, id2, start, count, filter); + } else { + response.body = %SummarizeLOL(id1, id2, filter); + } +}; + + +DebugCommandProcessor.prototype.lolGetIdRequest_ = function(request, response) { + var address = request.arguments.address; + response.body = {}; + response.body.id = %GetLOLObjId(address); +}; + + +DebugCommandProcessor.prototype.lolInfoRequest_ = function(request, response) { + var start = request.arguments.start; + var count = request.arguments.count; + response.body = %InfoLOL(start, count); +}; + + +DebugCommandProcessor.prototype.lolResetRequest_ = function(request, response) { + %ResetLOL(); +}; + + +DebugCommandProcessor.prototype.lolRetainersRequest_ = + function(request, response) { + var id = request.arguments.id; + var verbose = request.arguments.verbose; + var start = request.arguments.start; + var count = request.arguments.count; + var filter = request.arguments.filter; + + response.body = %GetLOLObjRetainers(id, Mirror.prototype, verbose, + start, count, filter); +}; + + +DebugCommandProcessor.prototype.lolPathRequest_ = function(request, response) { + var id1 = request.arguments.id1; + var id2 = request.arguments.id2; + response.body = {}; + response.body.path = %GetLOLPath(id1, id2, Mirror.prototype); +}; + + +DebugCommandProcessor.prototype.lolPrintRequest_ = function(request, response) { + var id = request.arguments.id; + response.body = {}; + response.body.dump = %PrintLOLObj(id); +}; // Check whether the previously processed command caused the VM to become diff --git a/src/debug.cc b/src/debug.cc index d8201a18..d91ad92e 100644 --- a/src/debug.cc +++ b/src/debug.cc @@ -836,7 +836,8 @@ bool Debug::Load() { Handle<String> key = Factory::LookupAsciiSymbol("builtins"); Handle<GlobalObject> global = Handle<GlobalObject>(context->global()); RETURN_IF_EMPTY_HANDLE_VALUE( - SetProperty(global, key, Handle<Object>(global->builtins()), NONE), + SetProperty(global, key, Handle<Object>(global->builtins()), + NONE, kNonStrictMode), false); // Compile the JavaScript for the debugger in the debugger context. @@ -1012,14 +1013,18 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) { for (int i = 0; i < array->length(); i++) { Handle<Object> o(array->get(i)); if (CheckBreakPoint(o)) { - SetElement(break_points_hit, break_points_hit_count++, o); + SetElement(break_points_hit, + break_points_hit_count++, + o, + kNonStrictMode); } } } else { if (CheckBreakPoint(break_point_objects)) { SetElement(break_points_hit, break_points_hit_count++, - break_point_objects); + break_point_objects, + kNonStrictMode); } } diff --git a/src/execution.cc b/src/execution.cc index f484d8d9..de8f0a46 100644 --- a/src/execution.cc +++ b/src/execution.cc @@ -106,6 +106,11 @@ static Handle<Object> Invoke(bool construct, ASSERT(*has_pending_exception == Top::has_pending_exception()); if (*has_pending_exception) { Top::ReportPendingMessages(); + if (Top::pending_exception() == Failure::OutOfMemoryException()) { + if (!HandleScopeImplementer::instance()->ignore_out_of_memory()) { + V8::FatalProcessOutOfMemory("JS", true); + } + } return Handle<Object>(); } else { Top::clear_pending_message(); diff --git a/src/extensions/experimental/break-iterator.cc b/src/extensions/experimental/break-iterator.cc new file mode 100644 index 00000000..6f574d4a --- /dev/null +++ b/src/extensions/experimental/break-iterator.cc @@ -0,0 +1,249 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "break-iterator.h" + +#include "unicode/brkiter.h" +#include "unicode/locid.h" +#include "unicode/rbbi.h" + +namespace v8 { +namespace internal { + +v8::Persistent<v8::FunctionTemplate> BreakIterator::break_iterator_template_; + +icu::BreakIterator* BreakIterator::UnpackBreakIterator( + v8::Handle<v8::Object> obj) { + if (break_iterator_template_->HasInstance(obj)) { + return static_cast<icu::BreakIterator*>( + obj->GetPointerFromInternalField(0)); + } + + return NULL; +} + +UnicodeString* BreakIterator::ResetAdoptedText( + v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) { + // Get the previous value from the internal field. + UnicodeString* text = static_cast<UnicodeString*>( + obj->GetPointerFromInternalField(1)); + delete text; + + // Assign new value to the internal pointer. + v8::String::Value text_value(value); + text = new UnicodeString( + reinterpret_cast<const UChar*>(*text_value), text_value.length()); + obj->SetPointerInInternalField(1, text); + + // Return new unicode string pointer. + return text; +} + +void BreakIterator::DeleteBreakIterator(v8::Persistent<v8::Value> object, + void* param) { + v8::Persistent<v8::Object> persistent_object = + v8::Persistent<v8::Object>::Cast(object); + + // First delete the hidden C++ object. + // Unpacking should never return NULL here. That would only happen if + // this method is used as the weak callback for persistent handles not + // pointing to a break iterator. + delete UnpackBreakIterator(persistent_object); + + delete static_cast<UnicodeString*>( + persistent_object->GetPointerFromInternalField(1)); + + // Then dispose of the persistent handle to JS object. + persistent_object.Dispose(); +} + +// Throws a JavaScript exception. +static v8::Handle<v8::Value> ThrowUnexpectedObjectError() { + // Returns undefined, and schedules an exception to be thrown. + return v8::ThrowException(v8::Exception::Error( + v8::String::New("BreakIterator method called on an object " + "that is not a BreakIterator."))); +} + +v8::Handle<v8::Value> BreakIterator::BreakIteratorAdoptText( + const v8::Arguments& args) { + if (args.Length() != 1 || !args[0]->IsString()) { + return v8::ThrowException(v8::Exception::SyntaxError( + v8::String::New("Text input is required."))); + } + + icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder()); + if (!break_iterator) { + return ThrowUnexpectedObjectError(); + } + + break_iterator->setText(*ResetAdoptedText(args.Holder(), args[0])); + + return v8::Undefined(); +} + +v8::Handle<v8::Value> BreakIterator::BreakIteratorFirst( + const v8::Arguments& args) { + icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder()); + if (!break_iterator) { + return ThrowUnexpectedObjectError(); + } + + return v8::Int32::New(break_iterator->first()); +} + +v8::Handle<v8::Value> BreakIterator::BreakIteratorNext( + const v8::Arguments& args) { + icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder()); + if (!break_iterator) { + return ThrowUnexpectedObjectError(); + } + + return v8::Int32::New(break_iterator->next()); +} + +v8::Handle<v8::Value> BreakIterator::BreakIteratorCurrent( + const v8::Arguments& args) { + icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder()); + if (!break_iterator) { + return ThrowUnexpectedObjectError(); + } + + return v8::Int32::New(break_iterator->current()); +} + +v8::Handle<v8::Value> BreakIterator::BreakIteratorBreakType( + const v8::Arguments& args) { + icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder()); + if (!break_iterator) { + return ThrowUnexpectedObjectError(); + } + + // TODO(cira): Remove cast once ICU fixes base BreakIterator class. + int32_t status = + static_cast<RuleBasedBreakIterator*>(break_iterator)->getRuleStatus(); + // Keep return values in sync with JavaScript BreakType enum. + if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) { + return v8::Int32::New(UBRK_WORD_NONE); + } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) { + return v8::Int32::New(UBRK_WORD_NUMBER); + } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) { + return v8::Int32::New(UBRK_WORD_LETTER); + } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) { + return v8::Int32::New(UBRK_WORD_KANA); + } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) { + return v8::Int32::New(UBRK_WORD_IDEO); + } else { + return v8::Int32::New(-1); + } +} + +v8::Handle<v8::Value> BreakIterator::JSBreakIterator( + const v8::Arguments& args) { + v8::HandleScope handle_scope; + + if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) { + return v8::ThrowException(v8::Exception::SyntaxError( + v8::String::New("Locale and iterator type are required."))); + } + + v8::String::Utf8Value locale(args[0]); + icu::Locale icu_locale(*locale); + + UErrorCode status = U_ZERO_ERROR; + icu::BreakIterator* break_iterator = NULL; + v8::String::Utf8Value type(args[1]); + if (!strcmp(*type, "character")) { + break_iterator = + icu::BreakIterator::createCharacterInstance(icu_locale, status); + } else if (!strcmp(*type, "word")) { + break_iterator = + icu::BreakIterator::createWordInstance(icu_locale, status); + } else if (!strcmp(*type, "sentence")) { + break_iterator = + icu::BreakIterator::createSentenceInstance(icu_locale, status); + } else if (!strcmp(*type, "line")) { + break_iterator = + icu::BreakIterator::createLineInstance(icu_locale, status); + } else { + return v8::ThrowException(v8::Exception::SyntaxError( + v8::String::New("Invalid iterator type."))); + } + + if (U_FAILURE(status)) { + delete break_iterator; + return v8::ThrowException(v8::Exception::Error( + v8::String::New("Failed to create break iterator."))); + } + + if (break_iterator_template_.IsEmpty()) { + v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New()); + + raw_template->SetClassName(v8::String::New("v8Locale.v8BreakIterator")); + + // Define internal field count on instance template. + v8::Local<v8::ObjectTemplate> object_template = + raw_template->InstanceTemplate(); + + // Set aside internal fields for icu break iterator and adopted text. + object_template->SetInternalFieldCount(2); + + // Define all of the prototype methods on prototype template. + v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate(); + proto->Set(v8::String::New("adoptText"), + v8::FunctionTemplate::New(BreakIteratorAdoptText)); + proto->Set(v8::String::New("first"), + v8::FunctionTemplate::New(BreakIteratorFirst)); + proto->Set(v8::String::New("next"), + v8::FunctionTemplate::New(BreakIteratorNext)); + proto->Set(v8::String::New("current"), + v8::FunctionTemplate::New(BreakIteratorCurrent)); + proto->Set(v8::String::New("breakType"), + v8::FunctionTemplate::New(BreakIteratorBreakType)); + + break_iterator_template_ = + v8::Persistent<v8::FunctionTemplate>::New(raw_template); + } + + // Create an empty object wrapper. + v8::Local<v8::Object> local_object = + break_iterator_template_->GetFunction()->NewInstance(); + v8::Persistent<v8::Object> wrapper = + v8::Persistent<v8::Object>::New(local_object); + + // Set break iterator as internal field of the resulting JS object. + wrapper->SetPointerInInternalField(0, break_iterator); + // Make sure that the pointer to adopted text is NULL. + wrapper->SetPointerInInternalField(1, NULL); + + // Make object handle weak so we can delete iterator once GC kicks in. + wrapper.MakeWeak(NULL, DeleteBreakIterator); + + return wrapper; +} + +} } // namespace v8::internal diff --git a/src/extensions/experimental/break-iterator.h b/src/extensions/experimental/break-iterator.h new file mode 100644 index 00000000..473bc893 --- /dev/null +++ b/src/extensions/experimental/break-iterator.h @@ -0,0 +1,89 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_ +#define V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_ + +#include <v8.h> + +#include "unicode/uversion.h" + +namespace U_ICU_NAMESPACE { +class BreakIterator; +class UnicodeString; +} + +namespace v8 { +namespace internal { + +class BreakIterator { + public: + static v8::Handle<v8::Value> JSBreakIterator(const v8::Arguments& args); + + // Helper methods for various bindings. + + // Unpacks break iterator object from corresponding JavaScript object. + static icu::BreakIterator* UnpackBreakIterator(v8::Handle<v8::Object> obj); + + // Deletes the old value and sets the adopted text in + // corresponding JavaScript object. + static UnicodeString* ResetAdoptedText(v8::Handle<v8::Object> obj, + v8::Handle<v8::Value> text_value); + + // Release memory we allocated for the BreakIterator once the JS object that + // holds the pointer gets garbage collected. + static void DeleteBreakIterator(v8::Persistent<v8::Value> object, + void* param); + + // Assigns new text to the iterator. + static v8::Handle<v8::Value> BreakIteratorAdoptText( + const v8::Arguments& args); + + // Moves iterator to the beginning of the string and returns new position. + static v8::Handle<v8::Value> BreakIteratorFirst(const v8::Arguments& args); + + // Moves iterator to the next position and returns it. + static v8::Handle<v8::Value> BreakIteratorNext(const v8::Arguments& args); + + // Returns current iterator's current position. + static v8::Handle<v8::Value> BreakIteratorCurrent( + const v8::Arguments& args); + + // Returns type of the item from current position. + // This call is only valid for word break iterators. Others just return 0. + static v8::Handle<v8::Value> BreakIteratorBreakType( + const v8::Arguments& args); + + private: + BreakIterator() {} + + static v8::Persistent<v8::FunctionTemplate> break_iterator_template_; +}; + +} } // namespace v8::internal + +#endif // V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_ diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp index 4d7a9363..761f4c79 100644 --- a/src/extensions/experimental/experimental.gyp +++ b/src/extensions/experimental/experimental.gyp @@ -37,6 +37,8 @@ 'target_name': 'i18n_api', 'type': 'static_library', 'sources': [ + 'break-iterator.cc', + 'break-iterator.h', 'i18n-extension.cc', 'i18n-extension.h', ], diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc index a721ba5e..e65fdcc8 100644 --- a/src/extensions/experimental/i18n-extension.cc +++ b/src/extensions/experimental/i18n-extension.cc @@ -30,6 +30,7 @@ #include <algorithm> #include <string> +#include "break-iterator.h" #include "unicode/locid.h" #include "unicode/uloc.h" @@ -87,6 +88,23 @@ const char* const I18NExtension::kSource = " var displayLocale = this.displayLocale_(optDisplayLocale);" " native function NativeJSDisplayName();" " return NativeJSDisplayName(this.locale, displayLocale);" + "};" + "v8Locale.v8BreakIterator = function(locale, type) {" + " native function NativeJSBreakIterator();" + " var iterator = NativeJSBreakIterator(locale, type);" + " iterator.type = type;" + " return iterator;" + "};" + "v8Locale.v8BreakIterator.BreakType = {" + " 'unknown': -1," + " 'none': 0," + " 'number': 100," + " 'word': 200," + " 'kana': 300," + " 'ideo': 400" + "};" + "v8Locale.prototype.v8CreateBreakIterator = function(type) {" + " return new v8Locale.v8BreakIterator(this.locale, type);" "};"; v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction( @@ -107,6 +125,8 @@ v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction( return v8::FunctionTemplate::New(JSDisplayRegion); } else if (name->Equals(v8::String::New("NativeJSDisplayName"))) { return v8::FunctionTemplate::New(JSDisplayName); + } else if (name->Equals(v8::String::New("NativeJSBreakIterator"))) { + return v8::FunctionTemplate::New(BreakIterator::JSBreakIterator); } return v8::Handle<v8::FunctionTemplate>(); diff --git a/src/flag-definitions.h b/src/flag-definitions.h index 57defdc7..2566766e 100644 --- a/src/flag-definitions.h +++ b/src/flag-definitions.h @@ -97,11 +97,7 @@ private: #define FLAG FLAG_FULL // Flags for Crankshaft. -#ifdef V8_TARGET_ARCH_IA32 DEFINE_bool(crankshaft, true, "use crankshaft") -#else -DEFINE_bool(crankshaft, false, "use crankshaft") -#endif DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter") DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation") DEFINE_bool(build_lithium, true, "use lithium chunk builder") @@ -110,7 +106,6 @@ DEFINE_bool(use_lithium, true, "use lithium code generator") DEFINE_bool(use_range, true, "use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis") DEFINE_bool(use_gvn, true, "use hydrogen global value numbering") -DEFINE_bool(use_peeling, false, "use loop peeling") DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true, "use function inlining") DEFINE_bool(limit_inlining, true, "limit code size growth from inlining") @@ -120,6 +115,7 @@ DEFINE_bool(time_hydrogen, false, "timing for hydrogen") DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file") DEFINE_bool(trace_inlining, false, "trace inlining decisions") DEFINE_bool(trace_alloc, false, "trace register allocator") +DEFINE_bool(trace_all_uses, false, "trace all use positions") DEFINE_bool(trace_range, false, "trace range analysis") DEFINE_bool(trace_gvn, false, "trace global value numbering") DEFINE_bool(trace_representation, false, "trace representation types") @@ -134,11 +130,8 @@ DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") DEFINE_bool(aggressive_loop_invariant_motion, true, "aggressive motion of instructions out of loops") -#ifdef V8_TARGET_ARCH_IA32 DEFINE_bool(use_osr, true, "use on-stack replacement") -#else -DEFINE_bool(use_osr, false, "use on-stack replacement") -#endif + DEFINE_bool(trace_osr, false, "trace on-stack replacement") DEFINE_int(stress_runs, 0, "number of stress runs") DEFINE_bool(optimize_closures, true, "optimize closures") @@ -269,6 +262,12 @@ DEFINE_bool(use_idle_notification, true, // ic.cc DEFINE_bool(use_ic, true, "use inline caching") +#ifdef LIVE_OBJECT_LIST +// liveobjectlist.cc +DEFINE_string(lol_workdir, NULL, "path for lol temp files") +DEFINE_bool(verify_lol, false, "perform debugging verification for lol") +#endif + // macro-assembler-ia32.cc DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") @@ -357,7 +356,7 @@ DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the " "debugger agent in another process") DEFINE_bool(debugger_agent, false, "Enable debugger agent") DEFINE_int(debugger_port, 5858, "Port to use for remote debugging") -DEFINE_string(map_counters, NULL, "Map counters to a file") +DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSArguments(), "Pass all remaining arguments to the script. Alias for \"--\".") @@ -378,6 +377,7 @@ DEFINE_bool(debug_script_collected_events, true, DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects") +DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk") // // Debug only flags diff --git a/src/frame-element.h b/src/frame-element.h index 3b91b9d3..ae5d6a1b 100644 --- a/src/frame-element.h +++ b/src/frame-element.h @@ -113,6 +113,10 @@ class FrameElement BASE_EMBEDDED { static ZoneObjectList* ConstantList(); + static bool ConstantPoolOverflowed() { + return !DataField::is_valid(ConstantList()->length()); + } + // Clear the constants indirection table. static void ClearConstantList() { ConstantList()->Clear(); diff --git a/src/full-codegen.cc b/src/full-codegen.cc index 252fb925..b3dc95bd 100644 --- a/src/full-codegen.cc +++ b/src/full-codegen.cc @@ -739,25 +739,13 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { case Token::SHL: case Token::SHR: case Token::SAR: { - // Figure out if either of the operands is a constant. - ConstantOperand constant = ShouldInlineSmiCase(op) - ? GetConstantOperand(op, left, right) - : kNoConstants; - - // Load only the operands that we need to materialize. - if (constant == kNoConstants) { - VisitForStackValue(left); - VisitForAccumulatorValue(right); - } else if (constant == kRightConstant) { - VisitForAccumulatorValue(left); - } else { - ASSERT(constant == kLeftConstant); - VisitForAccumulatorValue(right); - } + // Load both operands. + VisitForStackValue(left); + VisitForAccumulatorValue(right); SetSourcePosition(expr->position()); if (ShouldInlineSmiCase(op)) { - EmitInlineSmiBinaryOp(expr, op, mode, left, right, constant); + EmitInlineSmiBinaryOp(expr, op, mode, left, right); } else { EmitBinaryOp(op, mode); } diff --git a/src/full-codegen.h b/src/full-codegen.h index 655e560e..5fb11b43 100644 --- a/src/full-codegen.h +++ b/src/full-codegen.h @@ -274,12 +274,6 @@ class FullCodeGenerator: public AstVisitor { ForwardBailoutStack* const parent_; }; - enum ConstantOperand { - kNoConstants, - kLeftConstant, - kRightConstant - }; - // Type of a member function that generates inline code for a native function. typedef void (FullCodeGenerator::*InlineFunctionGenerator) (ZoneList<Expression*>*); @@ -298,11 +292,6 @@ class FullCodeGenerator: public AstVisitor { // operation. bool ShouldInlineSmiCase(Token::Value op); - // Compute which (if any) of the operands is a compile-time constant. - ConstantOperand GetConstantOperand(Token::Value op, - Expression* left, - Expression* right); - // Helper function to convert a pure value into a test context. The value // is expected on the stack or the accumulator, depending on the platform. // See the platform-specific implementation for details. @@ -432,6 +421,14 @@ class FullCodeGenerator: public AstVisitor { Label* done); void EmitVariableLoad(Variable* expr); + enum ResolveEvalFlag { + SKIP_CONTEXT_LOOKUP, + PERFORM_CONTEXT_LOOKUP + }; + + // Expects the arguments and the function already pushed. + void EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, int arg_count); + // Platform-specific support for allocating a new closure based on // the given function info. void EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure); @@ -457,34 +454,7 @@ class FullCodeGenerator: public AstVisitor { Token::Value op, OverwriteMode mode, Expression* left, - Expression* right, - ConstantOperand constant); - - void EmitConstantSmiBinaryOp(Expression* expr, - Token::Value op, - OverwriteMode mode, - bool left_is_constant_smi, - Smi* value); - - void EmitConstantSmiBitOp(Expression* expr, - Token::Value op, - OverwriteMode mode, - Smi* value); - - void EmitConstantSmiShiftOp(Expression* expr, - Token::Value op, - OverwriteMode mode, - Smi* value); - - void EmitConstantSmiAdd(Expression* expr, - OverwriteMode mode, - bool left_is_constant_smi, - Smi* value); - - void EmitConstantSmiSub(Expression* expr, - OverwriteMode mode, - bool left_is_constant_smi, - Smi* value); + Expression* right); // Assign to the given expression as if via '='. The right-hand-side value // is expected in the accumulator. diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc index c26ecf5e..5136dedd 100644 --- a/src/gdb-jit.cc +++ b/src/gdb-jit.cc @@ -395,7 +395,7 @@ class ELF BASE_EMBEDDED { void WriteHeader(Writer* w) { ASSERT(w->position() == 0); Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>(); -#if defined(V8_TARGET_ARCH_IA32) +#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM) const uint8_t ident[16] = { 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #elif defined(V8_TARGET_ARCH_X64) @@ -413,6 +413,10 @@ class ELF BASE_EMBEDDED { // System V ABI, AMD64 Supplement // http://www.x86-64.org/documentation/abi.pdf header->machine = 62; +#elif defined(V8_TARGET_ARCH_ARM) + // Set to EM_ARM, defined as 40, in "ARM ELF File Format" at + // infocenter.arm.com/help/topic/com.arm.doc.dui0101a/DUI0101A_Elf.pdf + header->machine = 40; #else #error Unsupported target architecture. #endif @@ -503,8 +507,7 @@ class ELFSymbol BASE_EMBEDDED { Binding binding() const { return static_cast<Binding>(info >> 4); } - -#if defined(V8_TARGET_ARCH_IA32) +#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM) struct SerializedLayout { SerializedLayout(uint32_t name, uintptr_t value, @@ -857,14 +860,20 @@ class DebugLineSection : public ELFSection { Writer::Slot<uint32_t> total_length = w->CreateSlotHere<uint32_t>(); uintptr_t start = w->position(); + // Used for special opcodes + const int8_t line_base = 1; + const uint8_t line_range = 7; + const int8_t max_line_incr = (line_base + line_range - 1); + const uint8_t opcode_base = DW_LNS_NEGATE_STMT + 1; + w->Write<uint16_t>(2); // Field version. Writer::Slot<uint32_t> prologue_length = w->CreateSlotHere<uint32_t>(); uintptr_t prologue_start = w->position(); w->Write<uint8_t>(1); // Field minimum_instruction_length. w->Write<uint8_t>(1); // Field default_is_stmt. - w->Write<int8_t>(0); // Field line_base. - w->Write<uint8_t>(2); // Field line_range. - w->Write<uint8_t>(DW_LNS_NEGATE_STMT + 1); // Field opcode_base. + w->Write<int8_t>(line_base); // Field line_base. + w->Write<uint8_t>(line_range); // Field line_range. + w->Write<uint8_t>(opcode_base); // Field opcode_base. w->Write<uint8_t>(0); // DW_LNS_COPY operands count. w->Write<uint8_t>(1); // DW_LNS_ADVANCE_PC operands count. w->Write<uint8_t>(1); // DW_LNS_ADVANCE_LINE operands count. @@ -881,6 +890,7 @@ class DebugLineSection : public ELFSection { WriteExtendedOpcode(w, DW_LNE_SET_ADDRESS, sizeof(intptr_t)); w->Write<intptr_t>(desc_->CodeStart()); + w->Write<uint8_t>(DW_LNS_COPY); intptr_t pc = 0; intptr_t line = 1; @@ -888,29 +898,66 @@ class DebugLineSection : public ELFSection { List<GDBJITLineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info(); pc_info->Sort(&ComparePCInfo); - for (int i = 0; i < pc_info->length(); i++) { + + int pc_info_length = pc_info->length(); + for (int i = 0; i < pc_info_length; i++) { GDBJITLineInfo::PCInfo* info = &pc_info->at(i); - uintptr_t pc_diff = info->pc_ - pc; ASSERT(info->pc_ >= pc); - if (pc_diff != 0) { - w->Write<uint8_t>(DW_LNS_ADVANCE_PC); - w->WriteSLEB128(pc_diff); - pc += pc_diff; - } - intptr_t line_diff = desc_->GetScriptLineNumber(info->pos_) - line; - if (line_diff != 0) { - w->Write<uint8_t>(DW_LNS_ADVANCE_LINE); - w->WriteSLEB128(line_diff); - line += line_diff; + + // Reduce bloating in the debug line table by removing duplicate line + // entries (per DWARF2 standard). + intptr_t new_line = desc_->GetScriptLineNumber(info->pos_); + if (new_line == line) { + continue; } - if (is_statement != info->is_statement_) { + + // Mark statement boundaries. For a better debugging experience, mark + // the last pc address in the function as a statement (e.g. "}"), so that + // a user can see the result of the last line executed in the function, + // should control reach the end. + if ((i+1) == pc_info_length) { + if (!is_statement) { + w->Write<uint8_t>(DW_LNS_NEGATE_STMT); + } + } else if (is_statement != info->is_statement_) { w->Write<uint8_t>(DW_LNS_NEGATE_STMT); is_statement = !is_statement; } - if (pc_diff != 0 || i == 0) { + + // Generate special opcodes, if possible. This results in more compact + // debug line tables. See the DWARF 2.0 standard to learn more about + // special opcodes. + uintptr_t pc_diff = info->pc_ - pc; + intptr_t line_diff = new_line - line; + + // Compute special opcode (see DWARF 2.0 standard) + intptr_t special_opcode = (line_diff - line_base) + + (line_range * pc_diff) + opcode_base; + + // If special_opcode is less than or equal to 255, it can be used as a + // special opcode. If line_diff is larger than the max line increment + // allowed for a special opcode, or if line_diff is less than the minimum + // line that can be added to the line register (i.e. line_base), then + // special_opcode can't be used. + if ((special_opcode >= opcode_base) && (special_opcode <= 255) && + (line_diff <= max_line_incr) && (line_diff >= line_base)) { + w->Write<uint8_t>(special_opcode); + } else { + w->Write<uint8_t>(DW_LNS_ADVANCE_PC); + w->WriteSLEB128(pc_diff); + w->Write<uint8_t>(DW_LNS_ADVANCE_LINE); + w->WriteSLEB128(line_diff); w->Write<uint8_t>(DW_LNS_COPY); } + + // Increment the pc and line operands. + pc += pc_diff; + line += line_diff; } + // Advance the pc to the end of the routine, since the end sequence opcode + // requires this. + w->Write<uint8_t>(DW_LNS_ADVANCE_PC); + w->WriteSLEB128(desc_->CodeSize() - pc); WriteExtendedOpcode(w, DW_LNE_END_SEQUENCE, 0); total_length.set(static_cast<uint32_t>(w->position() - start)); return true; @@ -1237,6 +1284,20 @@ static void DestroyCodeEntry(JITCodeEntry* entry) { static void RegisterCodeEntry(JITCodeEntry* entry) { +#if defined(DEBUG) && !defined(WIN32) + static int file_num = 0; + if (FLAG_gdbjit_dump) { + static const int kMaxFileNameSize = 64; + static const char* kElfFilePrefix = "/tmp/elfdump"; + static const char* kObjFileExt = ".o"; + char file_name[64]; + + OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "%s%d%s", + kElfFilePrefix, file_num++, kObjFileExt); + WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_); + } +#endif + entry->next_ = __jit_debug_descriptor.first_entry_; if (entry->next_ != NULL) entry->next_->prev_ = entry; __jit_debug_descriptor.first_entry_ = @@ -1294,7 +1355,13 @@ static bool SameCodeObjects(void* key1, void* key2) { } -static HashMap entries(&SameCodeObjects); +static HashMap* GetEntries() { + static HashMap* entries = NULL; + if (entries == NULL) { + entries = new HashMap(&SameCodeObjects); + } + return entries; +} static uint32_t HashForCodeObject(Code* code) { @@ -1344,9 +1411,8 @@ static void AddUnwindInfo(CodeDescription *desc) { #ifdef V8_TARGET_ARCH_X64 if (desc->tag() == GDBJITInterface::FUNCTION) { // To avoid propagating unwinding information through - // compilation pipeline we rely on function prologue - // and epilogue being the same for all code objects generated - // by the full code generator. + // compilation pipeline we use an approximation. + // For most use cases this should not affect usability. static const int kFramePointerPushOffset = 1; static const int kFramePointerSetOffset = 4; static const int kFramePointerPopOffset = -3; @@ -1360,19 +1426,6 @@ static void AddUnwindInfo(CodeDescription *desc) { uintptr_t frame_pointer_pop_address = desc->CodeEnd() + kFramePointerPopOffset; -#ifdef DEBUG - static const uint8_t kFramePointerPushInstruction = 0x48; // push ebp - static const uint16_t kFramePointerSetInstruction = 0x5756; // mov ebp, esp - static const uint8_t kFramePointerPopInstruction = 0xBE; // pop ebp - - ASSERT(*reinterpret_cast<uint8_t*>(frame_pointer_push_address) == - kFramePointerPushInstruction); - ASSERT(*reinterpret_cast<uint16_t*>(frame_pointer_set_address) == - kFramePointerSetInstruction); - ASSERT(*reinterpret_cast<uint8_t*>(frame_pointer_pop_address) == - kFramePointerPopInstruction); -#endif - desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH, frame_pointer_push_address); desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET, @@ -1398,7 +1451,7 @@ void GDBJITInterface::AddCode(const char* name, if (!FLAG_gdbjit) return; AssertNoAllocation no_gc; - HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), true); + HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true); if (e->value != NULL && !IsLineInfoTagged(e->value)) return; GDBJITLineInfo* lineinfo = UntagLineInfo(e->value); @@ -1411,7 +1464,7 @@ void GDBJITInterface::AddCode(const char* name, if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) { delete lineinfo; - entries.Remove(code, HashForCodeObject(code)); + GetEntries()->Remove(code, HashForCodeObject(code)); return; } @@ -1464,7 +1517,9 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) { void GDBJITInterface::RemoveCode(Code* code) { if (!FLAG_gdbjit) return; - HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), false); + HashMap::Entry* e = GetEntries()->Lookup(code, + HashForCodeObject(code), + false); if (e == NULL) return; if (IsLineInfoTagged(e->value)) { @@ -1475,14 +1530,14 @@ void GDBJITInterface::RemoveCode(Code* code) { DestroyCodeEntry(entry); } e->value = NULL; - entries.Remove(code, HashForCodeObject(code)); + GetEntries()->Remove(code, HashForCodeObject(code)); } void GDBJITInterface::RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info) { ASSERT(!IsLineInfoTagged(line_info)); - HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), true); + HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true); ASSERT(e->value == NULL); e->value = TagLineInfo(line_info); } diff --git a/src/handles-inl.h b/src/handles-inl.h index b3135125..c0f2fda9 100644 --- a/src/handles-inl.h +++ b/src/handles-inl.h @@ -36,14 +36,14 @@ namespace v8 { namespace internal { -template<class T> +template<typename T> Handle<T>::Handle(T* obj) { ASSERT(!obj->IsFailure()); location_ = HandleScope::CreateHandle(obj); } -template <class T> +template <typename T> inline T* Handle<T>::operator*() const { ASSERT(location_ != NULL); ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue); diff --git a/src/handles.cc b/src/handles.cc index d625d644..efef095a 100644 --- a/src/handles.cc +++ b/src/handles.cc @@ -242,17 +242,21 @@ Handle<Object> SetPrototype(Handle<JSFunction> function, Handle<Object> SetProperty(Handle<JSObject> object, Handle<String> key, Handle<Object> value, - PropertyAttributes attributes) { - CALL_HEAP_FUNCTION(object->SetProperty(*key, *value, attributes), Object); + PropertyAttributes attributes, + StrictModeFlag strict_mode) { + CALL_HEAP_FUNCTION(object->SetProperty(*key, *value, attributes, strict_mode), + Object); } Handle<Object> SetProperty(Handle<Object> object, Handle<Object> key, Handle<Object> value, - PropertyAttributes attributes) { + PropertyAttributes attributes, + StrictModeFlag strict_mode) { CALL_HEAP_FUNCTION( - Runtime::SetObjectProperty(object, key, value, attributes), Object); + Runtime::SetObjectProperty(object, key, value, attributes, strict_mode), + Object); } @@ -261,7 +265,9 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object, Handle<Object> value, PropertyAttributes attributes) { CALL_HEAP_FUNCTION( - Runtime::ForceSetObjectProperty(object, key, value, attributes), Object); + Runtime::ForceSetObjectProperty( + object, key, value, attributes), + Object); } @@ -304,10 +310,12 @@ void SetLocalPropertyNoThrow(Handle<JSObject> object, Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object, Handle<String> key, Handle<Object> value, - PropertyAttributes attributes) { + PropertyAttributes attributes, + StrictModeFlag strict_mode) { CALL_HEAP_FUNCTION(object->SetPropertyWithInterceptor(*key, *value, - attributes), + attributes, + strict_mode), Object); } @@ -420,7 +428,8 @@ Handle<String> SubString(Handle<String> str, Handle<Object> SetElement(Handle<JSObject> object, uint32_t index, - Handle<Object> value) { + Handle<Object> value, + StrictModeFlag strict_mode) { if (object->HasPixelElements() || object->HasExternalArrayElements()) { if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) { bool has_exception; @@ -429,16 +438,18 @@ Handle<Object> SetElement(Handle<JSObject> object, value = number; } } - CALL_HEAP_FUNCTION(object->SetElement(index, *value), Object); + CALL_HEAP_FUNCTION(object->SetElement(index, *value, strict_mode), Object); } Handle<Object> SetOwnElement(Handle<JSObject> object, uint32_t index, - Handle<Object> value) { + Handle<Object> value, + StrictModeFlag strict_mode) { ASSERT(!object->HasPixelElements()); ASSERT(!object->HasExternalArrayElements()); - CALL_HEAP_FUNCTION(object->SetElement(index, *value, false), Object); + CALL_HEAP_FUNCTION(object->SetElement(index, *value, strict_mode, false), + Object); } @@ -834,49 +845,41 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared, } -bool CompileLazy(Handle<JSFunction> function, - ClearExceptionFlag flag) { +static bool CompileLazyFunction(Handle<JSFunction> function, + ClearExceptionFlag flag, + InLoopFlag in_loop_flag) { bool result = true; if (function->shared()->is_compiled()) { function->ReplaceCode(function->shared()->code()); function->shared()->set_code_age(0); } else { CompilationInfo info(function); + if (in_loop_flag == IN_LOOP) info.MarkAsInLoop(); result = CompileLazyHelper(&info, flag); ASSERT(!result || function->is_compiled()); } - if (result && function->is_compiled()) { - PROFILE(FunctionCreateEvent(*function)); - } return result; } +bool CompileLazy(Handle<JSFunction> function, + ClearExceptionFlag flag) { + return CompileLazyFunction(function, flag, NOT_IN_LOOP); +} + + bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) { - bool result = true; - if (function->shared()->is_compiled()) { - function->ReplaceCode(function->shared()->code()); - function->shared()->set_code_age(0); - } else { - CompilationInfo info(function); - info.MarkAsInLoop(); - result = CompileLazyHelper(&info, flag); - ASSERT(!result || function->is_compiled()); - } - if (result && function->is_compiled()) { - PROFILE(FunctionCreateEvent(*function)); - } - return result; + return CompileLazyFunction(function, flag, IN_LOOP); } -bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id) { +bool CompileOptimized(Handle<JSFunction> function, + int osr_ast_id, + ClearExceptionFlag flag) { CompilationInfo info(function); info.SetOptimizing(osr_ast_id); - bool result = CompileLazyHelper(&info, KEEP_EXCEPTION); - if (result) PROFILE(FunctionCreateEvent(*function)); - return result; + return CompileLazyHelper(&info, flag); } diff --git a/src/handles.h b/src/handles.h index d95ca911..bb519688 100644 --- a/src/handles.h +++ b/src/handles.h @@ -39,7 +39,7 @@ namespace internal { // Handles are only valid within a HandleScope. // When a handle is created for an object a cell is allocated in the heap. -template<class T> +template<typename T> class Handle { public: INLINE(explicit Handle(T** location)) { location_ = location; } @@ -112,15 +112,7 @@ class HandleScope { } ~HandleScope() { - current_.next = prev_next_; - current_.level--; - if (current_.limit != prev_limit_) { - current_.limit = prev_limit_; - DeleteExtensions(); - } -#ifdef DEBUG - ZapRange(prev_next_, prev_limit_); -#endif + CloseScope(); } // Counts the number of allocated handles. @@ -148,6 +140,26 @@ class HandleScope { static Address current_limit_address(); static Address current_level_address(); + // Closes the HandleScope (invalidating all handles + // created in the scope of the HandleScope) and returns + // a Handle backed by the parent scope holding the + // value of the argument handle. + template <typename T> + Handle<T> CloseAndEscape(Handle<T> handle_value) { + T* value = *handle_value; + // Throw away all handles in the current scope. + CloseScope(); + // Allocate one handle in the parent scope. + ASSERT(current_.level > 0); + Handle<T> result(CreateHandle<T>(value)); + // Reinitialize the current scope (so that it's ready + // to be used or closed again). + prev_next_ = current_.next; + prev_limit_ = current_.limit; + current_.level++; + return result; + } + private: // Prevent heap allocation or illegal handle scopes. HandleScope(const HandleScope&); @@ -155,9 +167,23 @@ class HandleScope { void* operator new(size_t size); void operator delete(void* size_t); + inline void CloseScope() { + current_.next = prev_next_; + current_.level--; + if (current_.limit != prev_limit_) { + current_.limit = prev_limit_; + DeleteExtensions(); + } +#ifdef DEBUG + ZapRange(prev_next_, prev_limit_); +#endif + } + static v8::ImplementationUtilities::HandleScopeData current_; - Object** const prev_next_; - Object** const prev_limit_; + // Holds values on entry. The prev_next_ value is never NULL + // on_entry, but is set to NULL when this scope is closed. + Object** prev_next_; + Object** prev_limit_; // Extend the handle scope making room for more handles. static internal::Object** Extend(); @@ -197,12 +223,14 @@ Handle<String> FlattenGetString(Handle<String> str); Handle<Object> SetProperty(Handle<JSObject> object, Handle<String> key, Handle<Object> value, - PropertyAttributes attributes); + PropertyAttributes attributes, + StrictModeFlag strict_mode); Handle<Object> SetProperty(Handle<Object> object, Handle<Object> key, Handle<Object> value, - PropertyAttributes attributes); + PropertyAttributes attributes, + StrictModeFlag strict_mode); Handle<Object> ForceSetProperty(Handle<JSObject> object, Handle<Object> key, @@ -233,15 +261,18 @@ void SetLocalPropertyNoThrow(Handle<JSObject> object, Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object, Handle<String> key, Handle<Object> value, - PropertyAttributes attributes); + PropertyAttributes attributes, + StrictModeFlag strict_mode); Handle<Object> SetElement(Handle<JSObject> object, uint32_t index, - Handle<Object> value); + Handle<Object> value, + StrictModeFlag strict_mode); Handle<Object> SetOwnElement(Handle<JSObject> object, uint32_t index, - Handle<Object> value); + Handle<Object> value, + StrictModeFlag strict_mode); Handle<Object> GetProperty(Handle<JSObject> obj, const char* name); @@ -354,7 +385,9 @@ bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag); bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag); -bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id); +bool CompileOptimized(Handle<JSFunction> function, + int osr_ast_id, + ClearExceptionFlag flag); class NoHandleAllocation BASE_EMBEDDED { public: diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc index 732d2f41..07b631fa 100644 --- a/src/heap-profiler.cc +++ b/src/heap-profiler.cc @@ -911,22 +911,27 @@ static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) { class CountingRetainersIterator { public: CountingRetainersIterator(const JSObjectsCluster& child_cluster, + HeapEntriesAllocator* allocator, HeapEntriesMap* map) - : child_(ClusterAsHeapObject(child_cluster)), map_(map) { + : child_(ClusterAsHeapObject(child_cluster)), + allocator_(allocator), + map_(map) { if (map_->Map(child_) == NULL) - map_->Pair(child_, HeapEntriesMap::kHeapEntryPlaceholder); + map_->Pair(child_, allocator_, HeapEntriesMap::kHeapEntryPlaceholder); } void Call(const JSObjectsCluster& cluster, const NumberAndSizeInfo& number_and_size) { if (map_->Map(ClusterAsHeapObject(cluster)) == NULL) map_->Pair(ClusterAsHeapObject(cluster), + allocator_, HeapEntriesMap::kHeapEntryPlaceholder); map_->CountReference(ClusterAsHeapObject(cluster), child_); } private: HeapObject* child_; + HeapEntriesAllocator* allocator_; HeapEntriesMap* map_; }; @@ -934,6 +939,7 @@ class CountingRetainersIterator { class AllocatingRetainersIterator { public: AllocatingRetainersIterator(const JSObjectsCluster& child_cluster, + HeapEntriesAllocator*, HeapEntriesMap* map) : child_(ClusterAsHeapObject(child_cluster)), map_(map) { child_entry_ = map_->Map(child_); @@ -966,8 +972,9 @@ template<class RetainersIterator> class AggregatingRetainerTreeIterator { public: explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser, + HeapEntriesAllocator* allocator, HeapEntriesMap* map) - : coarser_(coarser), map_(map) { + : coarser_(coarser), allocator_(allocator), map_(map) { } void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) { @@ -981,25 +988,28 @@ class AggregatingRetainerTreeIterator { tree->ForEach(&retainers_aggregator); tree_to_iterate = &dest_tree_; } - RetainersIterator iterator(cluster, map_); + RetainersIterator iterator(cluster, allocator_, map_); tree_to_iterate->ForEach(&iterator); } private: ClustersCoarser* coarser_; + HeapEntriesAllocator* allocator_; HeapEntriesMap* map_; }; -class AggregatedRetainerTreeAllocator { +class AggregatedRetainerTreeAllocator : public HeapEntriesAllocator { public: AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot, int* root_child_index) : snapshot_(snapshot), root_child_index_(root_child_index) { } + ~AggregatedRetainerTreeAllocator() { } - HeapEntry* GetEntry( - HeapObject* obj, int children_count, int retainers_count) { + HeapEntry* AllocateEntry( + HeapThing ptr, int children_count, int retainers_count) { + HeapObject* obj = reinterpret_cast<HeapObject*>(ptr); JSObjectsCluster cluster = HeapObjectAsCluster(obj); const char* name = cluster.GetSpecialCaseName(); if (name == NULL) { @@ -1018,12 +1028,13 @@ class AggregatedRetainerTreeAllocator { template<class Iterator> void AggregatedHeapSnapshotGenerator::IterateRetainers( - HeapEntriesMap* entries_map) { + HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map) { RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile(); AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1( - p->coarser(), entries_map); + p->coarser(), allocator, entries_map); p->retainers_tree()->ForEach(&agg_ret_iter_1); - AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(NULL, entries_map); + AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2( + NULL, allocator, entries_map); p->aggregator()->output_tree().ForEach(&agg_ret_iter_2); } @@ -1042,7 +1053,9 @@ void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) { agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter); histogram_entities_count += counting_cons_iter.entities_count(); HeapEntriesMap entries_map; - IterateRetainers<CountingRetainersIterator>(&entries_map); + int root_child_index = 0; + AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index); + IterateRetainers<CountingRetainersIterator>(&allocator, &entries_map); histogram_entities_count += entries_map.entries_count(); histogram_children_count += entries_map.total_children_count(); histogram_retainers_count += entries_map.total_retainers_count(); @@ -1056,10 +1069,7 @@ void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) { snapshot->AllocateEntries(histogram_entities_count, histogram_children_count, histogram_retainers_count); - snapshot->AddEntry(HeapSnapshot::kInternalRootObject, - root_children_count, - 0); - int root_child_index = 0; + snapshot->AddRootEntry(root_children_count); for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) { if (agg_snapshot_->info()[i].bytes() > 0) { AddEntryFromAggregatedSnapshot(snapshot, @@ -1075,11 +1085,10 @@ void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) { AllocatingConstructorHeapProfileIterator alloc_cons_iter( snapshot, &root_child_index); agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter); - AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index); - entries_map.UpdateEntries(&allocator); + entries_map.AllocateEntries(); // Fill up references. - IterateRetainers<AllocatingRetainersIterator>(&entries_map); + IterateRetainers<AllocatingRetainersIterator>(&allocator, &entries_map); snapshot->SetDominatorsToSelf(); } diff --git a/src/heap-profiler.h b/src/heap-profiler.h index 90c664ed..20ba457c 100644 --- a/src/heap-profiler.h +++ b/src/heap-profiler.h @@ -340,6 +340,7 @@ class AggregatedHeapSnapshot { class HeapEntriesMap; +class HeapEntriesAllocator; class HeapSnapshot; class AggregatedHeapSnapshotGenerator { @@ -354,7 +355,8 @@ class AggregatedHeapSnapshotGenerator { void CalculateStringsStats(); void CollectStats(HeapObject* obj); template<class Iterator> - void IterateRetainers(HeapEntriesMap* entries_map); + void IterateRetainers( + HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map); AggregatedHeapSnapshot* agg_snapshot_; }; diff --git a/src/heap.cc b/src/heap.cc index f88ebda5..34ab9aaf 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -134,7 +134,7 @@ Heap::HeapState Heap::gc_state_ = NOT_IN_GC; int Heap::mc_count_ = 0; int Heap::ms_count_ = 0; -int Heap::gc_count_ = 0; +unsigned int Heap::gc_count_ = 0; GCTracer* Heap::tracer_ = NULL; @@ -515,7 +515,6 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) { #ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log_gc) HeapProfiler::WriteSample(); - if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions(); #endif return next_gc_likely_to_collect_more; @@ -845,8 +844,6 @@ void Heap::MarkCompactPrologue(bool is_compacting) { ContextSlotCache::Clear(); DescriptorLookupCache::Clear(); - RuntimeProfiler::MarkCompactPrologue(is_compacting); - CompilationCache::MarkCompactPrologue(); CompletelyClearInstanceofCache(); @@ -1057,20 +1054,13 @@ void Heap::Scavenge() { // Scavenge object reachable from the global contexts list directly. scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_)); - // Scavenge objects reachable from the runtime-profiler sampler - // window directly. - Object** sampler_window_address = RuntimeProfiler::SamplerWindowAddress(); - int sampler_window_size = RuntimeProfiler::SamplerWindowSize(); - scavenge_visitor.VisitPointers( - sampler_window_address, - sampler_window_address + sampler_window_size); - new_space_front = DoScavenge(&scavenge_visitor, new_space_front); UpdateNewSpaceReferencesInExternalStringTable( &UpdateNewSpaceReferenceInExternalStringTableEntry); LiveObjectList::UpdateReferencesForScavengeGC(); + RuntimeProfiler::UpdateSamplesAfterScavenge(); ASSERT(new_space_front == new_space_.top()); @@ -1350,9 +1340,8 @@ class ScavengingVisitor : public StaticVisitorBase { HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address())); #if defined(ENABLE_LOGGING_AND_PROFILING) if (Logger::is_logging() || CpuProfiler::is_profiling()) { - if (target->IsJSFunction()) { - PROFILE(FunctionMoveEvent(source->address(), target->address())); - PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target))); + if (target->IsSharedFunctionInfo()) { + PROFILE(SFIMoveEvent(source->address(), target->address())); } } #endif @@ -2924,9 +2913,8 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) { // constructor to the function. Object* result; { MaybeObject* maybe_result = - JSObject::cast(prototype)->SetProperty(constructor_symbol(), - function, - DONT_ENUM); + JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes( + constructor_symbol(), function, DONT_ENUM); if (!maybe_result->ToObject(&result)) return maybe_result; } return prototype; @@ -3797,9 +3785,9 @@ bool Heap::IdleNotification() { static const int kIdlesBeforeMarkSweep = 7; static const int kIdlesBeforeMarkCompact = 8; static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1; - static const int kGCsBetweenCleanup = 4; + static const unsigned int kGCsBetweenCleanup = 4; static int number_idle_notifications = 0; - static int last_gc_count = gc_count_; + static unsigned int last_gc_count = gc_count_; bool uncommit = true; bool finished = false; @@ -3808,7 +3796,7 @@ bool Heap::IdleNotification() { // GCs have taken place. This allows another round of cleanup based // on idle notifications if enough work has been carried out to // provoke a number of garbage collections. - if (gc_count_ < last_gc_count + kGCsBetweenCleanup) { + if (gc_count_ - last_gc_count < kGCsBetweenCleanup) { number_idle_notifications = Min(number_idle_notifications + 1, kMaxIdleCount); } else { @@ -5182,32 +5170,77 @@ void HeapIterator::reset() { } -#ifdef DEBUG +#if defined(DEBUG) || defined(LIVE_OBJECT_LIST) -static bool search_for_any_global; -static Object* search_target; -static bool found_target; -static List<Object*> object_stack(20); +Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL); +class PathTracer::MarkVisitor: public ObjectVisitor { + public: + explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {} + void VisitPointers(Object** start, Object** end) { + // Scan all HeapObject pointers in [start, end) + for (Object** p = start; !tracer_->found() && (p < end); p++) { + if ((*p)->IsHeapObject()) + tracer_->MarkRecursively(p, this); + } + } -// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. -static const int kMarkTag = 2; + private: + PathTracer* tracer_; +}; -static void MarkObjectRecursively(Object** p); -class MarkObjectVisitor : public ObjectVisitor { + +class PathTracer::UnmarkVisitor: public ObjectVisitor { public: + explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {} void VisitPointers(Object** start, Object** end) { - // Copy all HeapObject pointers in [start, end) + // Scan all HeapObject pointers in [start, end) for (Object** p = start; p < end; p++) { if ((*p)->IsHeapObject()) - MarkObjectRecursively(p); + tracer_->UnmarkRecursively(p, this); } } + + private: + PathTracer* tracer_; }; -static MarkObjectVisitor mark_visitor; -static void MarkObjectRecursively(Object** p) { +void PathTracer::VisitPointers(Object** start, Object** end) { + bool done = ((what_to_find_ == FIND_FIRST) && found_target_); + // Visit all HeapObject pointers in [start, end) + for (Object** p = start; !done && (p < end); p++) { + if ((*p)->IsHeapObject()) { + TracePathFrom(p); + done = ((what_to_find_ == FIND_FIRST) && found_target_); + } + } +} + + +void PathTracer::Reset() { + found_target_ = false; + object_stack_.Clear(); +} + + +void PathTracer::TracePathFrom(Object** root) { + ASSERT((search_target_ == kAnyGlobalObject) || + search_target_->IsHeapObject()); + found_target_in_trace_ = false; + object_stack_.Clear(); + + MarkVisitor mark_visitor(this); + MarkRecursively(root, &mark_visitor); + + UnmarkVisitor unmark_visitor(this); + UnmarkRecursively(root, &unmark_visitor); + + ProcessResults(); +} + + +void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { if (!(*p)->IsHeapObject()) return; HeapObject* obj = HeapObject::cast(*p); @@ -5216,14 +5249,17 @@ static void MarkObjectRecursively(Object** p) { if (!map->IsHeapObject()) return; // visited before - if (found_target) return; // stop if target found - object_stack.Add(obj); - if ((search_for_any_global && obj->IsJSGlobalObject()) || - (!search_for_any_global && (obj == search_target))) { - found_target = true; + if (found_target_in_trace_) return; // stop if target found + object_stack_.Add(obj); + if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) || + (obj == search_target_)) { + found_target_in_trace_ = true; + found_target_ = true; return; } + bool is_global_context = obj->IsGlobalContext(); + // not visited yet Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map)); @@ -5231,31 +5267,30 @@ static void MarkObjectRecursively(Object** p) { obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag)); - MarkObjectRecursively(&map); + // Scan the object body. + if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) { + // This is specialized to scan Context's properly. + Object** start = reinterpret_cast<Object**>(obj->address() + + Context::kHeaderSize); + Object** end = reinterpret_cast<Object**>(obj->address() + + Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize); + mark_visitor->VisitPointers(start, end); + } else { + obj->IterateBody(map_p->instance_type(), + obj->SizeFromMap(map_p), + mark_visitor); + } - obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p), - &mark_visitor); + // Scan the map after the body because the body is a lot more interesting + // when doing leak detection. + MarkRecursively(&map, mark_visitor); - if (!found_target) // don't pop if found the target - object_stack.RemoveLast(); + if (!found_target_in_trace_) // don't pop if found the target + object_stack_.RemoveLast(); } -static void UnmarkObjectRecursively(Object** p); -class UnmarkObjectVisitor : public ObjectVisitor { - public: - void VisitPointers(Object** start, Object** end) { - // Copy all HeapObject pointers in [start, end) - for (Object** p = start; p < end; p++) { - if ((*p)->IsHeapObject()) - UnmarkObjectRecursively(p); - } - } -}; - -static UnmarkObjectVisitor unmark_visitor; - -static void UnmarkObjectRecursively(Object** p) { +void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) { if (!(*p)->IsHeapObject()) return; HeapObject* obj = HeapObject::cast(*p); @@ -5274,63 +5309,42 @@ static void UnmarkObjectRecursively(Object** p) { obj->set_map(reinterpret_cast<Map*>(map_p)); - UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p)); + UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor); obj->IterateBody(Map::cast(map_p)->instance_type(), obj->SizeFromMap(Map::cast(map_p)), - &unmark_visitor); + unmark_visitor); } -static void MarkRootObjectRecursively(Object** root) { - if (search_for_any_global) { - ASSERT(search_target == NULL); - } else { - ASSERT(search_target->IsHeapObject()); - } - found_target = false; - object_stack.Clear(); - - MarkObjectRecursively(root); - UnmarkObjectRecursively(root); - - if (found_target) { +void PathTracer::ProcessResults() { + if (found_target_) { PrintF("=====================================\n"); PrintF("==== Path to object ====\n"); PrintF("=====================================\n\n"); - ASSERT(!object_stack.is_empty()); - for (int i = 0; i < object_stack.length(); i++) { + ASSERT(!object_stack_.is_empty()); + for (int i = 0; i < object_stack_.length(); i++) { if (i > 0) PrintF("\n |\n |\n V\n\n"); - Object* obj = object_stack[i]; + Object* obj = object_stack_[i]; +#ifdef OBJECT_PRINT obj->Print(); +#else + obj->ShortPrint(); +#endif } PrintF("=====================================\n"); } } +#endif // DEBUG || LIVE_OBJECT_LIST -// Helper class for visiting HeapObjects recursively. -class MarkRootVisitor: public ObjectVisitor { - public: - void VisitPointers(Object** start, Object** end) { - // Visit all HeapObject pointers in [start, end) - for (Object** p = start; p < end; p++) { - if ((*p)->IsHeapObject()) - MarkRootObjectRecursively(p); - } - } -}; - - +#ifdef DEBUG // Triggers a depth-first traversal of reachable objects from roots // and finds a path to a specific heap object and prints it. void Heap::TracePathToObject(Object* target) { - search_target = target; - search_for_any_global = false; - - MarkRootVisitor root_visitor; - IterateRoots(&root_visitor, VISIT_ONLY_STRONG); + PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL); + IterateRoots(&tracer, VISIT_ONLY_STRONG); } @@ -5338,11 +5352,10 @@ void Heap::TracePathToObject(Object* target) { // and finds a path to any global object and prints it. Useful for // determining the source for leaks of global objects. void Heap::TracePathToGlobal() { - search_target = NULL; - search_for_any_global = true; - - MarkRootVisitor root_visitor; - IterateRoots(&root_visitor, VISIT_ONLY_STRONG); + PathTracer tracer(PathTracer::kAnyGlobalObject, + PathTracer::FIND_ALL, + VISIT_ALL); + IterateRoots(&tracer, VISIT_ONLY_STRONG); } #endif @@ -30,6 +30,8 @@ #include <math.h> +#include "globals.h" +#include "list.h" #include "spaces.h" #include "splay-tree-inl.h" #include "v8-counters.h" @@ -47,7 +49,6 @@ namespace internal { V(Map, one_pointer_filler_map, OnePointerFillerMap) \ V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ /* Cluster the most popular ones in a few cache lines here at the top. */ \ - V(Smi, stack_limit, StackLimit) \ V(Object, undefined_value, UndefinedValue) \ V(Object, the_hole_value, TheHoleValue) \ V(Object, null_value, NullValue) \ @@ -60,21 +61,29 @@ namespace internal { V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ V(Map, meta_map, MetaMap) \ - V(Object, termination_exception, TerminationException) \ V(Map, hash_table_map, HashTableMap) \ + V(Smi, stack_limit, StackLimit) \ + V(FixedArray, number_string_cache, NumberStringCache) \ + V(Object, instanceof_cache_function, InstanceofCacheFunction) \ + V(Object, instanceof_cache_map, InstanceofCacheMap) \ + V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \ + V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ + V(Object, termination_exception, TerminationException) \ V(FixedArray, empty_fixed_array, EmptyFixedArray) \ V(ByteArray, empty_byte_array, EmptyByteArray) \ + V(String, empty_string, EmptyString) \ + V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(Map, string_map, StringMap) \ V(Map, ascii_string_map, AsciiStringMap) \ V(Map, symbol_map, SymbolMap) \ + V(Map, cons_string_map, ConsStringMap) \ + V(Map, cons_ascii_string_map, ConsAsciiStringMap) \ V(Map, ascii_symbol_map, AsciiSymbolMap) \ V(Map, cons_symbol_map, ConsSymbolMap) \ V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \ V(Map, external_symbol_map, ExternalSymbolMap) \ V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \ V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \ - V(Map, cons_string_map, ConsStringMap) \ - V(Map, cons_ascii_string_map, ConsAsciiStringMap) \ V(Map, external_string_map, ExternalStringMap) \ V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ @@ -98,11 +107,6 @@ namespace internal { V(Map, proxy_map, ProxyMap) \ V(Object, nan_value, NanValue) \ V(Object, minus_zero_value, MinusZeroValue) \ - V(Object, instanceof_cache_function, InstanceofCacheFunction) \ - V(Object, instanceof_cache_map, InstanceofCacheMap) \ - V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \ - V(String, empty_string, EmptyString) \ - V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(Map, neander_map, NeanderMap) \ V(JSObject, message_listeners, MessageListeners) \ V(Proxy, prototype_accessors, PrototypeAccessors) \ @@ -111,8 +115,6 @@ namespace internal { V(Code, js_entry_code, JsEntryCode) \ V(Code, js_construct_entry_code, JsConstructEntryCode) \ V(Code, c_entry_code, CEntryCode) \ - V(FixedArray, number_string_cache, NumberStringCache) \ - V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ V(FixedArray, natives_source_cache, NativesSourceCache) \ V(Object, last_script_id, LastScriptId) \ V(Script, empty_script, EmptyScript) \ @@ -184,6 +186,7 @@ namespace internal { V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \ V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \ V(KeyedLoadPixelArray_symbol, "KeyedLoadPixelArray") \ + V(KeyedStorePixelArray_symbol, "KeyedStorePixelArray") \ V(stack_overflow_symbol, "kStackOverflowBoilerplate") \ V(illegal_access_symbol, "illegal access") \ V(out_of_memory_symbol, "out-of-memory") \ @@ -215,7 +218,6 @@ namespace internal { V(KeyedLoadExternalArray_symbol, "KeyedLoadExternalArray") \ V(KeyedStoreExternalArray_symbol, "KeyedStoreExternalArray") - // Forward declarations. class GCTracer; class HeapStats; @@ -1179,7 +1181,7 @@ class Heap : public AllStatic { static int mc_count_; // how many mark-compact collections happened static int ms_count_; // how many mark-sweep collections happened - static int gc_count_; // how many gc happened + static unsigned int gc_count_; // how many gc happened // Total length of the strings we failed to flatten since the last GC. static int unflattened_strings_length_; @@ -1906,7 +1908,7 @@ class GCTracer BASE_EMBEDDED { void set_collector(GarbageCollector collector) { collector_ = collector; } // Sets the GC count. - void set_gc_count(int count) { gc_count_ = count; } + void set_gc_count(unsigned int count) { gc_count_ = count; } // Sets the full GC count. void set_full_gc_count(int count) { full_gc_count_ = count; } @@ -1949,7 +1951,7 @@ class GCTracer BASE_EMBEDDED { // A count (including this one, eg, the first collection is 1) of the // number of garbage collections. - int gc_count_; + unsigned int gc_count_; // A count (including this one) of the number of full garbage collections. int full_gc_count_; @@ -2151,6 +2153,65 @@ class WeakObjectRetainer { }; +#if defined(DEBUG) || defined(LIVE_OBJECT_LIST) +// Helper class for tracing paths to a search target Object from all roots. +// The TracePathFrom() method can be used to trace paths from a specific +// object to the search target object. +class PathTracer : public ObjectVisitor { + public: + enum WhatToFind { + FIND_ALL, // Will find all matches. + FIND_FIRST // Will stop the search after first match. + }; + + // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop + // after the first match. If FIND_ALL is specified, then tracing will be + // done for all matches. + PathTracer(Object* search_target, + WhatToFind what_to_find, + VisitMode visit_mode) + : search_target_(search_target), + found_target_(false), + found_target_in_trace_(false), + what_to_find_(what_to_find), + visit_mode_(visit_mode), + object_stack_(20), + no_alloc() {} + + virtual void VisitPointers(Object** start, Object** end); + + void Reset(); + void TracePathFrom(Object** root); + + bool found() const { return found_target_; } + + static Object* const kAnyGlobalObject; + + protected: + class MarkVisitor; + class UnmarkVisitor; + + void MarkRecursively(Object** p, MarkVisitor* mark_visitor); + void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor); + virtual void ProcessResults(); + + // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. + static const int kMarkTag = 2; + + Object* search_target_; + bool found_target_; + bool found_target_in_trace_; + WhatToFind what_to_find_; + VisitMode visit_mode_; + List<Object*> object_stack_; + + AssertNoAllocation no_alloc; // i.e. no gc allowed. + + DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); +}; +#endif // DEBUG || LIVE_OBJECT_LIST + + } } // namespace v8::internal #endif // V8_HEAP_H_ diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc index 16100e46..c2e5c8bd 100644 --- a/src/hydrogen-instructions.cc +++ b/src/hydrogen-instructions.cc @@ -57,10 +57,13 @@ const char* Representation::Mnemonic() const { case kTagged: return "t"; case kDouble: return "d"; case kInteger32: return "i"; - default: + case kExternal: return "x"; + case kNumRepresentations: UNREACHABLE(); return NULL; } + UNREACHABLE(); + return NULL; } @@ -117,6 +120,44 @@ void Range::AddConstant(int32_t value) { } +void Range::Intersect(Range* other) { + upper_ = Min(upper_, other->upper_); + lower_ = Max(lower_, other->lower_); + bool b = CanBeMinusZero() && other->CanBeMinusZero(); + set_can_be_minus_zero(b); +} + + +void Range::Union(Range* other) { + upper_ = Max(upper_, other->upper_); + lower_ = Min(lower_, other->lower_); + bool b = CanBeMinusZero() || other->CanBeMinusZero(); + set_can_be_minus_zero(b); +} + + +void Range::Sar(int32_t value) { + int32_t bits = value & 0x1F; + lower_ = lower_ >> bits; + upper_ = upper_ >> bits; + set_can_be_minus_zero(false); +} + + +void Range::Shl(int32_t value) { + int32_t bits = value & 0x1F; + int old_lower = lower_; + int old_upper = upper_; + lower_ = lower_ << bits; + upper_ = upper_ << bits; + if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) { + upper_ = kMaxInt; + lower_ = kMinInt; + } + set_can_be_minus_zero(false); +} + + bool Range::AddAndCheckOverflow(Range* other) { bool may_overflow = false; lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow); @@ -221,7 +262,7 @@ HType HType::TypeFromValue(Handle<Object> value) { } -int HValue::LookupOperandIndex(int occurrence_index, HValue* op) const { +int HValue::LookupOperandIndex(int occurrence_index, HValue* op) { for (int i = 0; i < OperandCount(); ++i) { if (OperandAt(i) == op) { if (occurrence_index == 0) return i; @@ -237,7 +278,7 @@ bool HValue::IsDefinedAfter(HBasicBlock* other) const { } -bool HValue::UsesMultipleTimes(HValue* op) const { +bool HValue::UsesMultipleTimes(HValue* op) { bool seen = false; for (int i = 0; i < OperandCount(); ++i) { if (OperandAt(i) == op) { @@ -249,7 +290,7 @@ bool HValue::UsesMultipleTimes(HValue* op) const { } -bool HValue::Equals(HValue* other) const { +bool HValue::Equals(HValue* other) { if (other->opcode() != opcode()) return false; if (!other->representation().Equals(representation())) return false; if (!other->type_.Equals(type_)) return false; @@ -264,7 +305,7 @@ bool HValue::Equals(HValue* other) const { } -intptr_t HValue::Hashcode() const { +intptr_t HValue::Hashcode() { intptr_t result = opcode(); int count = OperandCount(); for (int i = 0; i < count; ++i) { @@ -281,52 +322,20 @@ void HValue::SetOperandAt(int index, HValue* value) { } -void HLoadKeyedGeneric::InternalSetOperandAt(int index, HValue* value) { - if (index < 2) { - operands_[index] = value; - } else { - context_ = value; - } -} - - -void HStoreKeyedGeneric::InternalSetOperandAt(int index, HValue* value) { - if (index < 3) { - operands_[index] = value; - } else { - context_ = value; - } -} - - -void HStoreNamedGeneric::InternalSetOperandAt(int index, HValue* value) { - if (index < 2) { - operands_[index] = value; - } else { - context_ = value; - } -} - - void HValue::ReplaceAndDelete(HValue* other) { - ReplaceValue(other); + if (other != NULL) ReplaceValue(other); Delete(); } void HValue::ReplaceValue(HValue* other) { - ZoneList<HValue*> start_uses(2); for (int i = 0; i < uses_.length(); ++i) { - HValue* use = uses_.at(i); - if (!use->block()->IsStartBlock()) { - InternalReplaceAtUse(use, other); - other->uses_.Add(use); - } else { - start_uses.Add(use); - } + HValue* use = uses_[i]; + ASSERT(!use->block()->IsStartBlock()); + InternalReplaceAtUse(use, other); + other->uses_.Add(use); } - uses_.Clear(); - uses_.AddAll(start_uses); + uses_.Rewind(0); } @@ -438,13 +447,15 @@ void HValue::ComputeInitialRange() { } -void HInstruction::PrintTo(StringStream* stream) const { +void HInstruction::PrintTo(StringStream* stream) { stream->Add("%s", Mnemonic()); if (HasSideEffects()) stream->Add("*"); stream->Add(" "); PrintDataTo(stream); - if (range() != NULL) { + if (range() != NULL && + !range()->IsMostGeneric() && + !range()->CanBeMinusZero()) { stream->Add(" range[%d,%d,m0=%d]", range()->lower(), range()->upper(), @@ -465,9 +476,16 @@ void HInstruction::PrintTo(StringStream* stream) const { void HInstruction::Unlink() { ASSERT(IsLinked()); ASSERT(!IsControlInstruction()); // Must never move control instructions. + ASSERT(!IsBlockEntry()); // Doesn't make sense to delete these. + ASSERT(previous_ != NULL); + previous_->next_ = next_; + if (next_ == NULL) { + ASSERT(block()->last() == this); + block()->set_last(previous_); + } else { + next_->previous_ = previous_; + } clear_block(); - if (previous_ != NULL) previous_->next_ = next_; - if (next_ != NULL) next_->previous_ = previous_; } @@ -554,69 +572,64 @@ void HInstruction::Verify() { #endif -void HCall::PrintDataTo(StringStream* stream) const { - stream->Add("#%d", argument_count()); -} - - -void HUnaryCall::PrintDataTo(StringStream* stream) const { +void HUnaryCall::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" "); - HCall::PrintDataTo(stream); + stream->Add("#%d", argument_count()); } -void HBinaryCall::PrintDataTo(StringStream* stream) const { +void HBinaryCall::PrintDataTo(StringStream* stream) { first()->PrintNameTo(stream); stream->Add(" "); second()->PrintNameTo(stream); stream->Add(" "); - HCall::PrintDataTo(stream); + stream->Add("#%d", argument_count()); } -void HCallConstantFunction::PrintDataTo(StringStream* stream) const { +void HCallConstantFunction::PrintDataTo(StringStream* stream) { if (IsApplyFunction()) { stream->Add("optimized apply "); } else { stream->Add("%o ", function()->shared()->DebugName()); } - HCall::PrintDataTo(stream); + stream->Add("#%d", argument_count()); } -void HCallNamed::PrintDataTo(StringStream* stream) const { +void HCallNamed::PrintDataTo(StringStream* stream) { stream->Add("%o ", *name()); HUnaryCall::PrintDataTo(stream); } -void HCallGlobal::PrintDataTo(StringStream* stream) const { +void HCallGlobal::PrintDataTo(StringStream* stream) { stream->Add("%o ", *name()); HUnaryCall::PrintDataTo(stream); } -void HCallKnownGlobal::PrintDataTo(StringStream* stream) const { +void HCallKnownGlobal::PrintDataTo(StringStream* stream) { stream->Add("o ", target()->shared()->DebugName()); - HCall::PrintDataTo(stream); + stream->Add("#%d", argument_count()); } -void HCallRuntime::PrintDataTo(StringStream* stream) const { +void HCallRuntime::PrintDataTo(StringStream* stream) { stream->Add("%o ", *name()); - HCall::PrintDataTo(stream); + stream->Add("#%d", argument_count()); } -void HClassOfTest::PrintDataTo(StringStream* stream) const { +void HClassOfTest::PrintDataTo(StringStream* stream) { stream->Add("class_of_test("); value()->PrintNameTo(stream); stream->Add(", \"%o\")", *class_name()); } -void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const { +void HAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintNameTo(stream); stream->Add("["); index()->PrintNameTo(stream); @@ -625,7 +638,7 @@ void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const { } -void HControlInstruction::PrintDataTo(StringStream* stream) const { +void HControlInstruction::PrintDataTo(StringStream* stream) { if (FirstSuccessor() != NULL) { int first_id = FirstSuccessor()->block_id(); if (SecondSuccessor() == NULL) { @@ -638,13 +651,13 @@ void HControlInstruction::PrintDataTo(StringStream* stream) const { } -void HUnaryControlInstruction::PrintDataTo(StringStream* stream) const { +void HUnaryControlInstruction::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); HControlInstruction::PrintDataTo(stream); } -void HCompareMap::PrintDataTo(StringStream* stream) const { +void HCompareMap::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" (%p)", *map()); HControlInstruction::PrintDataTo(stream); @@ -672,19 +685,19 @@ const char* HUnaryMathOperation::OpName() const { } -void HUnaryMathOperation::PrintDataTo(StringStream* stream) const { +void HUnaryMathOperation::PrintDataTo(StringStream* stream) { const char* name = OpName(); stream->Add("%s ", name); value()->PrintNameTo(stream); } -void HUnaryOperation::PrintDataTo(StringStream* stream) const { +void HUnaryOperation::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); } -void HHasInstanceType::PrintDataTo(StringStream* stream) const { +void HHasInstanceType::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); switch (from_) { case FIRST_JS_OBJECT_TYPE: @@ -705,14 +718,14 @@ void HHasInstanceType::PrintDataTo(StringStream* stream) const { } -void HTypeofIs::PrintDataTo(StringStream* stream) const { +void HTypeofIs::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" == "); stream->Add(type_literal_->ToAsciiVector()); } -void HChange::PrintDataTo(StringStream* stream) const { +void HChange::PrintDataTo(StringStream* stream) { HUnaryOperation::PrintDataTo(stream); stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic()); @@ -728,26 +741,26 @@ HCheckInstanceType* HCheckInstanceType::NewIsJSObjectOrJSFunction( } -void HCheckMap::PrintDataTo(StringStream* stream) const { +void HCheckMap::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" %p", *map()); } -void HCheckFunction::PrintDataTo(StringStream* stream) const { +void HCheckFunction::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" %p", *target()); } -void HCallStub::PrintDataTo(StringStream* stream) const { +void HCallStub::PrintDataTo(StringStream* stream) { stream->Add("%s ", CodeStub::MajorName(major_key_, false)); HUnaryCall::PrintDataTo(stream); } -void HInstanceOf::PrintDataTo(StringStream* stream) const { +void HInstanceOf::PrintDataTo(StringStream* stream) { left()->PrintNameTo(stream); stream->Add(" "); right()->PrintNameTo(stream); @@ -766,6 +779,8 @@ Range* HValue::InferRange() { } else if (representation().IsNone()) { return NULL; } else { + // Untagged integer32 cannot be -0 and we don't compute ranges for + // untagged doubles. return new Range(); } } @@ -777,7 +792,7 @@ Range* HConstant::InferRange() { result->set_can_be_minus_zero(false); return result; } - return HInstruction::InferRange(); + return HValue::InferRange(); } @@ -811,7 +826,7 @@ Range* HAdd::InferRange() { res->set_can_be_minus_zero(m0); return res; } else { - return HArithmeticBinaryOperation::InferRange(); + return HValue::InferRange(); } } @@ -827,7 +842,7 @@ Range* HSub::InferRange() { res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero()); return res; } else { - return HArithmeticBinaryOperation::InferRange(); + return HValue::InferRange(); } } @@ -845,7 +860,7 @@ Range* HMul::InferRange() { res->set_can_be_minus_zero(m0); return res; } else { - return HArithmeticBinaryOperation::InferRange(); + return HValue::InferRange(); } } @@ -870,7 +885,7 @@ Range* HDiv::InferRange() { } return result; } else { - return HArithmeticBinaryOperation::InferRange(); + return HValue::InferRange(); } } @@ -887,12 +902,12 @@ Range* HMod::InferRange() { } return result; } else { - return HArithmeticBinaryOperation::InferRange(); + return HValue::InferRange(); } } -void HPhi::PrintTo(StringStream* stream) const { +void HPhi::PrintTo(StringStream* stream) { stream->Add("["); for (int i = 0; i < OperandCount(); ++i) { HValue* value = OperandAt(i); @@ -918,7 +933,7 @@ void HPhi::AddInput(HValue* value) { } -HValue* HPhi::GetRedundantReplacement() const { +HValue* HPhi::GetRedundantReplacement() { HValue* candidate = NULL; int count = OperandCount(); int position = 0; @@ -970,7 +985,7 @@ void HPhi::AddIndirectUsesTo(int* dest) { } -void HSimulate::PrintDataTo(StringStream* stream) const { +void HSimulate::PrintDataTo(StringStream* stream) { stream->Add("id=%d ", ast_id()); if (pop_count_ > 0) stream->Add("pop %d", pop_count_); if (values_.length() > 0) { @@ -987,7 +1002,7 @@ void HSimulate::PrintDataTo(StringStream* stream) const { } -void HEnterInlined::PrintDataTo(StringStream* stream) const { +void HEnterInlined::PrintDataTo(StringStream* stream) { SmartPointer<char> name = function()->debug_name()->ToCString(); stream->Add("%s, id=%d", *name, function()->id()); } @@ -1028,7 +1043,7 @@ HConstant* HConstant::CopyToTruncatedInt32() const { } -void HConstant::PrintDataTo(StringStream* stream) const { +void HConstant::PrintDataTo(StringStream* stream) { handle()->ShortPrint(stream); } @@ -1038,7 +1053,7 @@ bool HArrayLiteral::IsCopyOnWrite() const { } -void HBinaryOperation::PrintDataTo(StringStream* stream) const { +void HBinaryOperation::PrintDataTo(StringStream* stream) { left()->PrintNameTo(stream); stream->Add(" "); right()->PrintNameTo(stream); @@ -1048,34 +1063,30 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) const { Range* HBitAnd::InferRange() { - Range* a = left()->range(); - Range* b = right()->range(); - int32_t a_mask = 0xffffffff; - int32_t b_mask = 0xffffffff; - if (a != NULL) a_mask = a->Mask(); - if (b != NULL) b_mask = b->Mask(); - int32_t result_mask = a_mask & b_mask; - if (result_mask >= 0) { - return new Range(0, result_mask); - } else { - return HBinaryOperation::InferRange(); - } + int32_t left_mask = (left()->range() != NULL) + ? left()->range()->Mask() + : 0xffffffff; + int32_t right_mask = (right()->range() != NULL) + ? right()->range()->Mask() + : 0xffffffff; + int32_t result_mask = left_mask & right_mask; + return (result_mask >= 0) + ? new Range(0, result_mask) + : HValue::InferRange(); } Range* HBitOr::InferRange() { - Range* a = left()->range(); - Range* b = right()->range(); - int32_t a_mask = 0xffffffff; - int32_t b_mask = 0xffffffff; - if (a != NULL) a_mask = a->Mask(); - if (b != NULL) b_mask = b->Mask(); - int32_t result_mask = a_mask | b_mask; - if (result_mask >= 0) { - return new Range(0, result_mask); - } else { - return HBinaryOperation::InferRange(); - } + int32_t left_mask = (left()->range() != NULL) + ? left()->range()->Mask() + : 0xffffffff; + int32_t right_mask = (right()->range() != NULL) + ? right()->range()->Mask() + : 0xffffffff; + int32_t result_mask = left_mask | right_mask; + return (result_mask >= 0) + ? new Range(0, result_mask) + : HValue::InferRange(); } @@ -1083,20 +1094,14 @@ Range* HSar::InferRange() { if (right()->IsConstant()) { HConstant* c = HConstant::cast(right()); if (c->HasInteger32Value()) { - int32_t val = c->Integer32Value(); - Range* result = NULL; - Range* left_range = left()->range(); - if (left_range == NULL) { - result = new Range(); - } else { - result = left_range->Copy(); - } - result->Sar(val); + Range* result = (left()->range() != NULL) + ? left()->range()->Copy() + : new Range(); + result->Sar(c->Integer32Value()); return result; } } - - return HBinaryOperation::InferRange(); + return HValue::InferRange(); } @@ -1104,25 +1109,19 @@ Range* HShl::InferRange() { if (right()->IsConstant()) { HConstant* c = HConstant::cast(right()); if (c->HasInteger32Value()) { - int32_t val = c->Integer32Value(); - Range* result = NULL; - Range* left_range = left()->range(); - if (left_range == NULL) { - result = new Range(); - } else { - result = left_range->Copy(); - } - result->Shl(val); + Range* result = (left()->range() != NULL) + ? left()->range()->Copy() + : new Range(); + result->Shl(c->Integer32Value()); return result; } } - - return HBinaryOperation::InferRange(); + return HValue::InferRange(); } -void HCompare::PrintDataTo(StringStream* stream) const { +void HCompare::PrintDataTo(StringStream* stream) { stream->Add(Token::Name(token())); stream->Add(" "); HBinaryOperation::PrintDataTo(stream); @@ -1141,18 +1140,26 @@ void HCompare::SetInputRepresentation(Representation r) { } -void HParameter::PrintDataTo(StringStream* stream) const { +void HParameter::PrintDataTo(StringStream* stream) { stream->Add("%u", index()); } -void HLoadNamedField::PrintDataTo(StringStream* stream) const { +void HLoadNamedField::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : ""); } -void HLoadKeyed::PrintDataTo(StringStream* stream) const { +void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintNameTo(stream); + stream->Add("["); + key()->PrintNameTo(stream); + stream->Add("]"); +} + + +void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add("["); key()->PrintNameTo(stream); @@ -1160,7 +1167,7 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) const { } -void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const { +void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) { external_pointer()->PrintNameTo(stream); stream->Add("["); key()->PrintNameTo(stream); @@ -1168,7 +1175,7 @@ void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const { } -void HStoreNamed::PrintDataTo(StringStream* stream) const { +void HStoreNamedGeneric::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add("."); ASSERT(name()->IsString()); @@ -1178,15 +1185,29 @@ void HStoreNamed::PrintDataTo(StringStream* stream) const { } -void HStoreNamedField::PrintDataTo(StringStream* stream) const { - HStoreNamed::PrintDataTo(stream); +void HStoreNamedField::PrintDataTo(StringStream* stream) { + object()->PrintNameTo(stream); + stream->Add("."); + ASSERT(name()->IsString()); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" = "); + value()->PrintNameTo(stream); if (!transition().is_null()) { stream->Add(" (transition map %p)", *transition()); } } -void HStoreKeyed::PrintDataTo(StringStream* stream) const { +void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintNameTo(stream); + stream->Add("["); + key()->PrintNameTo(stream); + stream->Add("] = "); + value()->PrintNameTo(stream); +} + + +void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add("["); key()->PrintNameTo(stream); @@ -1195,25 +1216,34 @@ void HStoreKeyed::PrintDataTo(StringStream* stream) const { } -void HLoadGlobal::PrintDataTo(StringStream* stream) const { +void HStorePixelArrayElement::PrintDataTo(StringStream* stream) { + external_pointer()->PrintNameTo(stream); + stream->Add("["); + key()->PrintNameTo(stream); + stream->Add("] = "); + value()->PrintNameTo(stream); +} + + +void HLoadGlobal::PrintDataTo(StringStream* stream) { stream->Add("[%p]", *cell()); if (check_hole_value()) stream->Add(" (deleteable/read-only)"); } -void HStoreGlobal::PrintDataTo(StringStream* stream) const { +void HStoreGlobal::PrintDataTo(StringStream* stream) { stream->Add("[%p] = ", *cell()); value()->PrintNameTo(stream); } -void HLoadContextSlot::PrintDataTo(StringStream* stream) const { +void HLoadContextSlot::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add("[%d]", slot_index()); } -void HStoreContextSlot::PrintDataTo(StringStream* stream) const { +void HStoreContextSlot::PrintDataTo(StringStream* stream) { context()->PrintNameTo(stream); stream->Add("[%d] = ", slot_index()); value()->PrintNameTo(stream); @@ -1223,33 +1253,33 @@ void HStoreContextSlot::PrintDataTo(StringStream* stream) const { // Implementation of type inference and type conversions. Calculates // the inferred type of this instruction based on the input operands. -HType HValue::CalculateInferredType() const { +HType HValue::CalculateInferredType() { return type_; } -HType HCheckMap::CalculateInferredType() const { +HType HCheckMap::CalculateInferredType() { return value()->type(); } -HType HCheckFunction::CalculateInferredType() const { +HType HCheckFunction::CalculateInferredType() { return value()->type(); } -HType HCheckNonSmi::CalculateInferredType() const { +HType HCheckNonSmi::CalculateInferredType() { // TODO(kasperl): Is there any way to signal that this isn't a smi? return HType::Tagged(); } -HType HCheckSmi::CalculateInferredType() const { +HType HCheckSmi::CalculateInferredType() { return HType::Smi(); } -HType HPhi::CalculateInferredType() const { +HType HPhi::CalculateInferredType() { HType result = HType::Uninitialized(); for (int i = 0; i < OperandCount(); ++i) { HType current = OperandAt(i)->type(); @@ -1259,77 +1289,77 @@ HType HPhi::CalculateInferredType() const { } -HType HConstant::CalculateInferredType() const { +HType HConstant::CalculateInferredType() { return constant_type_; } -HType HCompare::CalculateInferredType() const { +HType HCompare::CalculateInferredType() { return HType::Boolean(); } -HType HCompareJSObjectEq::CalculateInferredType() const { +HType HCompareJSObjectEq::CalculateInferredType() { return HType::Boolean(); } -HType HUnaryPredicate::CalculateInferredType() const { +HType HUnaryPredicate::CalculateInferredType() { return HType::Boolean(); } -HType HBitwiseBinaryOperation::CalculateInferredType() const { +HType HBitwiseBinaryOperation::CalculateInferredType() { return HType::TaggedNumber(); } -HType HArithmeticBinaryOperation::CalculateInferredType() const { +HType HArithmeticBinaryOperation::CalculateInferredType() { return HType::TaggedNumber(); } -HType HAdd::CalculateInferredType() const { +HType HAdd::CalculateInferredType() { return HType::Tagged(); } -HType HBitAnd::CalculateInferredType() const { +HType HBitAnd::CalculateInferredType() { return HType::TaggedNumber(); } -HType HBitXor::CalculateInferredType() const { +HType HBitXor::CalculateInferredType() { return HType::TaggedNumber(); } -HType HBitOr::CalculateInferredType() const { +HType HBitOr::CalculateInferredType() { return HType::TaggedNumber(); } -HType HBitNot::CalculateInferredType() const { +HType HBitNot::CalculateInferredType() { return HType::TaggedNumber(); } -HType HUnaryMathOperation::CalculateInferredType() const { +HType HUnaryMathOperation::CalculateInferredType() { return HType::TaggedNumber(); } -HType HShl::CalculateInferredType() const { +HType HShl::CalculateInferredType() { return HType::TaggedNumber(); } -HType HShr::CalculateInferredType() const { +HType HShr::CalculateInferredType() { return HType::TaggedNumber(); } -HType HSar::CalculateInferredType() const { +HType HSar::CalculateInferredType() { return HType::TaggedNumber(); } diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h index a0d932fb..cc753546 100644 --- a/src/hydrogen-instructions.h +++ b/src/hydrogen-instructions.h @@ -51,14 +51,9 @@ class LChunkBuilder; V(BinaryCall) \ V(BinaryOperation) \ V(BitwiseBinaryOperation) \ - V(Call) \ V(ControlInstruction) \ V(Instruction) \ - V(LoadKeyed) \ - V(MaterializedLiteral) \ V(Phi) \ - V(StoreKeyed) \ - V(StoreNamed) \ V(UnaryCall) \ V(UnaryControlInstruction) \ V(UnaryOperation) \ @@ -107,6 +102,7 @@ class LChunkBuilder; V(EnterInlined) \ V(FixedArrayLength) \ V(FunctionLiteral) \ + V(GetCachedArrayIndex) \ V(GlobalObject) \ V(GlobalReceiver) \ V(Goto) \ @@ -150,6 +146,7 @@ class LChunkBuilder; V(StoreContextSlot) \ V(StoreGlobal) \ V(StoreKeyedFastElement) \ + V(StorePixelArrayElement) \ V(StoreKeyedGeneric) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ @@ -191,91 +188,50 @@ class LChunkBuilder; DECLARE_INSTRUCTION(type) - -template<int kSize> -class HOperandVector : public EmbeddedVector<HValue*, kSize> { - public: - HOperandVector() : EmbeddedVector<HValue*, kSize>(NULL) { } -}; - - class Range: public ZoneObject { public: - Range() : lower_(kMinInt), - upper_(kMaxInt), - next_(NULL), - can_be_minus_zero_(false) { } + Range() + : lower_(kMinInt), + upper_(kMaxInt), + next_(NULL), + can_be_minus_zero_(false) { } Range(int32_t lower, int32_t upper) - : lower_(lower), upper_(upper), next_(NULL), can_be_minus_zero_(false) { } + : lower_(lower), + upper_(upper), + next_(NULL), + can_be_minus_zero_(false) { } - bool IsInSmiRange() const { - return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue; - } - void KeepOrder(); - void Verify() const; int32_t upper() const { return upper_; } int32_t lower() const { return lower_; } Range* next() const { return next_; } Range* CopyClearLower() const { return new Range(kMinInt, upper_); } Range* CopyClearUpper() const { return new Range(lower_, kMaxInt); } - void ClearLower() { lower_ = kMinInt; } - void ClearUpper() { upper_ = kMaxInt; } Range* Copy() const { return new Range(lower_, upper_); } - bool IsMostGeneric() const { return lower_ == kMinInt && upper_ == kMaxInt; } int32_t Mask() const; void set_can_be_minus_zero(bool b) { can_be_minus_zero_ = b; } bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; } bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; } bool CanBeNegative() const { return lower_ < 0; } - bool Includes(int value) const { - return lower_ <= value && upper_ >= value; - } - - void Sar(int32_t value) { - int32_t bits = value & 0x1F; - lower_ = lower_ >> bits; - upper_ = upper_ >> bits; - set_can_be_minus_zero(false); - } - - void Shl(int32_t value) { - int32_t bits = value & 0x1F; - int old_lower = lower_; - int old_upper = upper_; - lower_ = lower_ << bits; - upper_ = upper_ << bits; - if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) { - upper_ = kMaxInt; - lower_ = kMinInt; - } - set_can_be_minus_zero(false); + bool Includes(int value) const { return lower_ <= value && upper_ >= value; } + bool IsMostGeneric() const { return lower_ == kMinInt && upper_ == kMaxInt; } + bool IsInSmiRange() const { + return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue; } - - // Adds a constant to the lower and upper bound of the range. - void AddConstant(int32_t value); + void KeepOrder(); + void Verify() const; void StackUpon(Range* other) { Intersect(other); next_ = other; } - void Intersect(Range* other) { - upper_ = Min(upper_, other->upper_); - lower_ = Max(lower_, other->lower_); - bool b = CanBeMinusZero() && other->CanBeMinusZero(); - set_can_be_minus_zero(b); - } - - void Union(Range* other) { - upper_ = Max(upper_, other->upper_); - lower_ = Min(lower_, other->lower_); - bool b = CanBeMinusZero() || other->CanBeMinusZero(); - set_can_be_minus_zero(b); - } + void Intersect(Range* other); + void Union(Range* other); - // Compute a new result range and return true, if the operation - // can overflow. + void AddConstant(int32_t value); + void Sar(int32_t value); + void Shl(int32_t value); bool AddAndCheckOverflow(Range* other); bool SubAndCheckOverflow(Range* other); bool MulAndCheckOverflow(Range* other); @@ -307,7 +263,7 @@ class Representation { static Representation Double() { return Representation(kDouble); } static Representation External() { return Representation(kExternal); } - bool Equals(const Representation& other) const { + bool Equals(const Representation& other) { return kind_ == other.kind_; } @@ -542,15 +498,12 @@ class HValue: public ZoneObject { bool IsDefinedAfter(HBasicBlock* other) const; // Operands. - virtual int OperandCount() const { return 0; } - virtual HValue* OperandAt(int index) const { - UNREACHABLE(); - return NULL; - } + virtual int OperandCount() = 0; + virtual HValue* OperandAt(int index) = 0; void SetOperandAt(int index, HValue* value); - int LookupOperandIndex(int occurrence_index, HValue* op) const; - bool UsesMultipleTimes(HValue* op) const; + int LookupOperandIndex(int occurrence_index, HValue* op); + bool UsesMultipleTimes(HValue* op); void ReplaceAndDelete(HValue* other); void ReplaceValue(HValue* other); @@ -576,10 +529,9 @@ class HValue: public ZoneObject { void ComputeInitialRange(); // Representation helpers. - virtual Representation RequiredInputRepresentation(int index) const { - return Representation::None(); - } - virtual Representation InferredRepresentation() const { + virtual Representation RequiredInputRepresentation(int index) const = 0; + + virtual Representation InferredRepresentation() { return representation(); } @@ -594,11 +546,11 @@ class HValue: public ZoneObject { HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO) #undef DECLARE_DO - bool Equals(HValue* other) const; - virtual intptr_t Hashcode() const; + bool Equals(HValue* other); + virtual intptr_t Hashcode(); // Printing support. - virtual void PrintTo(StringStream* stream) const = 0; + virtual void PrintTo(StringStream* stream) = 0; void PrintNameTo(StringStream* stream); static void PrintTypeTo(HType type, StringStream* stream); @@ -609,7 +561,7 @@ class HValue: public ZoneObject { // it has changed. bool UpdateInferredType(); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); #ifdef DEBUG virtual void Verify() = 0; @@ -618,14 +570,14 @@ class HValue: public ZoneObject { protected: // This function must be overridden for instructions with flag kUseGVN, to // compare the non-Operand parts of the instruction. - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { UNREACHABLE(); return false; } virtual void RepresentationChanged(Representation to) { } virtual Range* InferRange(); virtual void DeleteFromGraph() = 0; - virtual void InternalSetOperandAt(int index, HValue* value) { UNREACHABLE(); } + virtual void InternalSetOperandAt(int index, HValue* value) = 0; void clear_block() { ASSERT(block_ != NULL); block_ = NULL; @@ -667,8 +619,8 @@ class HInstruction: public HValue { HInstruction* next() const { return next_; } HInstruction* previous() const { return previous_; } - void PrintTo(StringStream* stream) const; - virtual void PrintDataTo(StringStream* stream) const {} + virtual void PrintTo(StringStream* stream); + virtual void PrintDataTo(StringStream* stream) { } bool IsLinked() const { return block() != NULL; } void Unlink(); @@ -689,6 +641,8 @@ class HInstruction: public HValue { // instruction. virtual bool IsCheckInstruction() const { return false; } + virtual bool IsCall() { return false; } + DECLARE_INSTRUCTION(Instruction) protected: @@ -715,12 +669,6 @@ class HInstruction: public HValue { }; -class HBlockEntry: public HInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry") -}; - - class HControlInstruction: public HInstruction { public: HControlInstruction(HBasicBlock* first, HBasicBlock* second) @@ -730,7 +678,7 @@ class HControlInstruction: public HInstruction { HBasicBlock* FirstSuccessor() const { return first_successor_; } HBasicBlock* SecondSuccessor() const { return second_successor_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_INSTRUCTION(ControlInstruction) @@ -740,25 +688,101 @@ class HControlInstruction: public HInstruction { }; -class HDeoptimize: public HControlInstruction { +template<int NumElements> +class HOperandContainer { public: - HDeoptimize() : HControlInstruction(NULL, NULL) { } + HOperandContainer() : elems_() { } + + int length() { return NumElements; } + HValue*& operator[](int i) { + ASSERT(i < length()); + return elems_[i]; + } + + private: + HValue* elems_[NumElements]; +}; + + +template<> +class HOperandContainer<0> { + public: + int length() { return 0; } + HValue*& operator[](int i) { + UNREACHABLE(); + static HValue* t = 0; + return t; + } +}; + + +template<int V> +class HTemplateInstruction : public HInstruction { + public: + int OperandCount() { return V; } + HValue* OperandAt(int i) { return inputs_[i]; } + + protected: + void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; } + + private: + HOperandContainer<V> inputs_; +}; + + +template<int V> +class HTemplateControlInstruction : public HControlInstruction { + public: + HTemplateControlInstruction<V>(HBasicBlock* first, HBasicBlock* second) + : HControlInstruction(first, second) { } + int OperandCount() { return V; } + HValue* OperandAt(int i) { return inputs_[i]; } + + protected: + void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; } + + private: + HOperandContainer<V> inputs_; +}; + + +class HBlockEntry: public HTemplateInstruction<0> { + public: + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + + DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry") +}; + + +class HDeoptimize: public HTemplateControlInstruction<0> { + public: + HDeoptimize() : HTemplateControlInstruction<0>(NULL, NULL) { } + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") }; -class HGoto: public HControlInstruction { +class HGoto: public HTemplateControlInstruction<0> { public: explicit HGoto(HBasicBlock* target) - : HControlInstruction(target, NULL), include_stack_check_(false) { - } + : HTemplateControlInstruction<0>(target, NULL), + include_stack_check_(false) { } void set_include_stack_check(bool include_stack_check) { include_stack_check_ = include_stack_check; } bool include_stack_check() const { return include_stack_check_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") private: @@ -766,34 +790,20 @@ class HGoto: public HControlInstruction { }; -class HUnaryControlInstruction: public HControlInstruction { +class HUnaryControlInstruction: public HTemplateControlInstruction<1> { public: explicit HUnaryControlInstruction(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target) - : HControlInstruction(true_target, false_target) { + : HTemplateControlInstruction<1>(true_target, false_target) { SetOperandAt(0, value); } - virtual Representation RequiredInputRepresentation(int index) const { - return Representation::Tagged(); - } + virtual void PrintDataTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream) const; - - HValue* value() const { return OperandAt(0); } - virtual int OperandCount() const { return 1; } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + HValue* value() { return OperandAt(0); } DECLARE_INSTRUCTION(UnaryControlInstruction) - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - private: - HOperandVector<1> operands_; }; @@ -825,10 +835,14 @@ class HCompareMap: public HUnaryControlInstruction { ASSERT(!map.is_null()); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle<Map> map() const { return map_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map") private: @@ -842,38 +856,36 @@ class HReturn: public HUnaryControlInstruction { : HUnaryControlInstruction(value, NULL, NULL) { } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; -class HAbnormalExit: public HControlInstruction { +class HAbnormalExit: public HTemplateControlInstruction<0> { public: - HAbnormalExit() : HControlInstruction(NULL, NULL) { } + HAbnormalExit() : HTemplateControlInstruction<0>(NULL, NULL) { } + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } DECLARE_CONCRETE_INSTRUCTION(AbnormalExit, "abnormal_exit") }; -class HUnaryOperation: public HInstruction { +class HUnaryOperation: public HTemplateInstruction<1> { public: explicit HUnaryOperation(HValue* value) { SetOperandAt(0, value); } - HValue* value() const { return OperandAt(0); } - virtual void PrintDataTo(StringStream* stream) const; - virtual int OperandCount() const { return 1; } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + HValue* value() { return OperandAt(0); } + virtual void PrintDataTo(StringStream* stream); DECLARE_INSTRUCTION(UnaryOperation) - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - private: - HOperandVector<1> operands_; }; @@ -895,13 +907,14 @@ class HChange: public HUnaryOperation { public: HChange(HValue* value, Representation from, - Representation to) + Representation to, + bool is_truncating) : HUnaryOperation(value), from_(from), to_(to) { ASSERT(!from.IsNone() && !to.IsNone()); ASSERT(!from.Equals(to)); set_representation(to); SetFlag(kUseGVN); - + if (is_truncating) SetFlag(kTruncatingToInt32); if (from.IsInteger32() && to.IsTagged() && value->range() != NULL && value->range()->IsInSmiRange()) { set_type(HType::Smi()); @@ -916,25 +929,19 @@ class HChange: public HUnaryOperation { return from_; } - bool CanTruncateToInt32() const { - for (int i = 0; i < uses()->length(); ++i) { - if (!uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) return false; - } - return true; - } + bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(Change, CanTruncateToInt32() ? "truncate" : "change") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { if (!other->IsChange()) return false; HChange* change = HChange::cast(other); return value() == change->value() - && to().Equals(change->to()) - && CanTruncateToInt32() == change->CanTruncateToInt32(); + && to().Equals(change->to()); } private: @@ -953,7 +960,7 @@ class HSimulate: public HInstruction { assigned_indexes_(2) {} virtual ~HSimulate() {} - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); bool HasAstId() const { return ast_id_ != AstNode::kNoNumber; } int ast_id() const { return ast_id_; } @@ -978,8 +985,12 @@ class HSimulate: public HInstruction { void AddPushedValue(HValue* value) { AddValue(kNoIndex, value); } - virtual int OperandCount() const { return values_.length(); } - virtual HValue* OperandAt(int index) const { return values_[index]; } + virtual int OperandCount() { return values_.length(); } + virtual HValue* OperandAt(int index) { return values_[index]; } + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate") @@ -1010,25 +1021,33 @@ class HSimulate: public HInstruction { }; -class HStackCheck: public HInstruction { +class HStackCheck: public HTemplateInstruction<0> { public: HStackCheck() { } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack_check") }; -class HEnterInlined: public HInstruction { +class HEnterInlined: public HTemplateInstruction<0> { public: HEnterInlined(Handle<JSFunction> closure, FunctionLiteral* function) : closure_(closure), function_(function) { } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle<JSFunction> closure() const { return closure_; } FunctionLiteral* function() const { return function_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(EnterInlined, "enter_inlined") private: @@ -1037,39 +1056,49 @@ class HEnterInlined: public HInstruction { }; -class HLeaveInlined: public HInstruction { +class HLeaveInlined: public HTemplateInstruction<0> { public: HLeaveInlined() {} + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(LeaveInlined, "leave_inlined") }; class HPushArgument: public HUnaryOperation { public: - explicit HPushArgument(HValue* value) : HUnaryOperation(value) { } + explicit HPushArgument(HValue* value) : HUnaryOperation(value) { + set_representation(Representation::Tagged()); + } virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - HValue* argument() const { return OperandAt(0); } + HValue* argument() { return OperandAt(0); } DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument") }; -class HContext: public HInstruction { +class HContext: public HTemplateInstruction<0> { public: HContext() { set_representation(Representation::Tagged()); SetFlag(kUseGVN); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(Context, "context"); protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1082,8 +1111,12 @@ class HOuterContext: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context"); + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1096,8 +1129,12 @@ class HGlobalObject: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object") + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1111,94 +1148,79 @@ class HGlobalReceiver: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver") + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HCall: public HInstruction { +template <int V> +class HCall: public HTemplateInstruction<V> { public: // The argument count includes the receiver. - explicit HCall(int argument_count) : argument_count_(argument_count) { - set_representation(Representation::Tagged()); - SetAllSideEffects(); + explicit HCall<V>(int argument_count) : argument_count_(argument_count) { + this->set_representation(Representation::Tagged()); + this->SetAllSideEffects(); } - virtual HType CalculateInferredType() const { return HType::Tagged(); } + virtual HType CalculateInferredType() { return HType::Tagged(); } virtual int argument_count() const { return argument_count_; } - virtual void PrintDataTo(StringStream* stream) const; - - DECLARE_INSTRUCTION(Call) + virtual bool IsCall() { return true; } private: int argument_count_; }; -class HUnaryCall: public HCall { +class HUnaryCall: public HCall<1> { public: HUnaryCall(HValue* value, int argument_count) - : HCall(argument_count), value_(NULL) { + : HCall<1>(argument_count) { SetOperandAt(0, value); } - virtual void PrintDataTo(StringStream* stream) const; - - HValue* value() const { return value_; } - - virtual int OperandCount() const { return 1; } - virtual HValue* OperandAt(int index) const { - ASSERT(index == 0); - return value_; + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); } - DECLARE_INSTRUCTION(UnaryCall) + virtual void PrintDataTo(StringStream* stream); - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - ASSERT(index == 0); - value_ = value; - } + HValue* value() { return OperandAt(0); } - private: - HValue* value_; + DECLARE_INSTRUCTION(UnaryCall) }; -class HBinaryCall: public HCall { +class HBinaryCall: public HCall<2> { public: HBinaryCall(HValue* first, HValue* second, int argument_count) - : HCall(argument_count) { + : HCall<2>(argument_count) { SetOperandAt(0, first); SetOperandAt(1, second); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - HValue* first() const { return operands_[0]; } - HValue* second() const { return operands_[1]; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } - virtual int OperandCount() const { return 2; } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + HValue* first() { return OperandAt(0); } + HValue* second() { return OperandAt(1); } DECLARE_INSTRUCTION(BinaryCall) - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - private: - HOperandVector<2> operands_; }; -class HCallConstantFunction: public HCall { +class HCallConstantFunction: public HCall<0> { public: HCallConstantFunction(Handle<JSFunction> function, int argument_count) - : HCall(argument_count), function_(function) { } + : HCall<0>(argument_count), function_(function) { } Handle<JSFunction> function() const { return function_; } @@ -1206,7 +1228,11 @@ class HCallConstantFunction: public HCall { return function_->code() == Builtins::builtin(Builtins::FunctionApply); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call_constant_function") @@ -1225,8 +1251,8 @@ class HCallKeyed: public HBinaryCall { return Representation::Tagged(); } - HValue* context() const { return first(); } - HValue* key() const { return second(); } + HValue* context() { return first(); } + HValue* key() { return second(); } DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed") }; @@ -1238,13 +1264,17 @@ class HCallNamed: public HUnaryCall { : HUnaryCall(context, argument_count), name_(name) { } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - HValue* context() const { return value(); } + HValue* context() { return value(); } Handle<String> name() const { return name_; } DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named") + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + private: Handle<String> name_; }; @@ -1256,7 +1286,11 @@ class HCallFunction: public HUnaryCall { : HUnaryCall(context, argument_count) { } - HValue* context() const { return value(); } + HValue* context() { return value(); } + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function") }; @@ -1268,11 +1302,15 @@ class HCallGlobal: public HUnaryCall { : HUnaryCall(context, argument_count), name_(name) { } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - HValue* context() const { return value(); } + HValue* context() { return value(); } Handle<String> name() const { return name_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global") private: @@ -1280,15 +1318,19 @@ class HCallGlobal: public HUnaryCall { }; -class HCallKnownGlobal: public HCall { +class HCallKnownGlobal: public HCall<0> { public: HCallKnownGlobal(Handle<JSFunction> target, int argument_count) - : HCall(argument_count), target_(target) { } + : HCall<0>(argument_count), target_(target) { } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle<JSFunction> target() const { return target_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global") private: @@ -1306,24 +1348,28 @@ class HCallNew: public HBinaryCall { return Representation::Tagged(); } - HValue* context() const { return first(); } - HValue* constructor() const { return second(); } + HValue* context() { return first(); } + HValue* constructor() { return second(); } DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new") }; -class HCallRuntime: public HCall { +class HCallRuntime: public HCall<0> { public: HCallRuntime(Handle<String> name, Runtime::Function* c_function, int argument_count) - : HCall(argument_count), c_function_(c_function), name_(name) { } - virtual void PrintDataTo(StringStream* stream) const; + : HCall<0>(argument_count), c_function_(c_function), name_(name) { } + virtual void PrintDataTo(StringStream* stream); Runtime::Function* function() const { return c_function_; } Handle<String> name() const { return name_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call_runtime") private: @@ -1350,7 +1396,7 @@ class HJSArrayLength: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1369,7 +1415,7 @@ class HFixedArrayLength: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1390,7 +1436,7 @@ class HPixelArrayLength: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel_array_length") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1405,12 +1451,12 @@ class HBitNot: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Integer32(); } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1441,9 +1487,9 @@ class HUnaryMathOperation: public HUnaryOperation { SetFlag(kUseGVN); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); @@ -1458,11 +1504,10 @@ class HUnaryMathOperation: public HUnaryOperation { case kMathSin: case kMathCos: return Representation::Double(); - break; case kMathAbs: return representation(); - break; default: + UNREACHABLE(); return Representation::None(); } } @@ -1483,7 +1528,7 @@ class HUnaryMathOperation: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HUnaryMathOperation* b = HUnaryMathOperation::cast(other); return op_ == b->op(); } @@ -1508,7 +1553,7 @@ class HLoadElements: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1532,7 +1577,7 @@ class HLoadPixelArrayExternalPointer: public HUnaryOperation { "load-pixel-array-external-pointer") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1550,8 +1595,8 @@ class HCheckMap: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; - virtual HType CalculateInferredType() const; + virtual void PrintDataTo(StringStream* stream); + virtual HType CalculateInferredType(); #ifdef DEBUG virtual void Verify(); @@ -1562,7 +1607,7 @@ class HCheckMap: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check_map") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HCheckMap* b = HCheckMap::cast(other); return map_.is_identical_to(b->map()); } @@ -1585,8 +1630,8 @@ class HCheckFunction: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; - virtual HType CalculateInferredType() const; + virtual void PrintDataTo(StringStream* stream); + virtual HType CalculateInferredType(); #ifdef DEBUG virtual void Verify(); @@ -1597,7 +1642,7 @@ class HCheckFunction: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check_function") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HCheckFunction* b = HCheckFunction::cast(other); return target_.is_identical_to(b->target()); } @@ -1645,7 +1690,7 @@ class HCheckInstanceType: public HUnaryOperation { // TODO(ager): It could be nice to allow the ommision of instance // type checks if we have already performed an instance type check // with a larger range. - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HCheckInstanceType* b = HCheckInstanceType::cast(other); return (first_ == b->first()) && (last_ == b->last()); } @@ -1669,7 +1714,7 @@ class HCheckNonSmi: public HUnaryOperation { return Representation::Tagged(); } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); #ifdef DEBUG virtual void Verify(); @@ -1678,11 +1723,11 @@ class HCheckNonSmi: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HCheckPrototypeMaps: public HInstruction { +class HCheckPrototypeMaps: public HTemplateInstruction<0> { public: HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder) : prototype_(prototype), holder_(holder) { @@ -1701,7 +1746,11 @@ class HCheckPrototypeMaps: public HInstruction { DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps") - virtual intptr_t Hashcode() const { + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + + virtual intptr_t Hashcode() { ASSERT(!Heap::IsAllocationAllowed()); intptr_t hash = reinterpret_cast<intptr_t>(*prototype()); hash = 17 * hash + reinterpret_cast<intptr_t>(*holder()); @@ -1709,7 +1758,7 @@ class HCheckPrototypeMaps: public HInstruction { } protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other); return prototype_.is_identical_to(b->prototype()) && holder_.is_identical_to(b->holder()); @@ -1733,7 +1782,7 @@ class HCheckSmi: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); #ifdef DEBUG virtual void Verify(); @@ -1742,7 +1791,7 @@ class HCheckSmi: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1761,7 +1810,7 @@ class HPhi: public HValue { SetFlag(kFlexibleRepresentation); } - virtual Representation InferredRepresentation() const { + virtual Representation InferredRepresentation() { bool double_occurred = false; bool int32_occurred = false; for (int i = 0; i < OperandCount(); ++i) { @@ -1780,10 +1829,10 @@ class HPhi: public HValue { virtual Representation RequiredInputRepresentation(int index) const { return representation(); } - virtual HType CalculateInferredType() const; - virtual int OperandCount() const { return inputs_.length(); } - virtual HValue* OperandAt(int index) const { return inputs_[index]; } - HValue* GetRedundantReplacement() const; + virtual HType CalculateInferredType(); + virtual int OperandCount() { return inputs_.length(); } + virtual HValue* OperandAt(int index) { return inputs_[index]; } + HValue* GetRedundantReplacement(); void AddInput(HValue* value); bool IsReceiver() { return merged_index_ == 0; } @@ -1792,7 +1841,7 @@ class HPhi: public HValue { virtual const char* Mnemonic() const { return "phi"; } - virtual void PrintTo(StringStream* stream) const; + virtual void PrintTo(StringStream* stream); #ifdef DEBUG virtual void Verify(); @@ -1840,18 +1889,22 @@ class HPhi: public HValue { }; -class HArgumentsObject: public HInstruction { +class HArgumentsObject: public HTemplateInstruction<0> { public: HArgumentsObject() { set_representation(Representation::Tagged()); SetFlag(kIsArguments); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject, "arguments-object") }; -class HConstant: public HInstruction { +class HConstant: public HTemplateInstruction<0> { public: HConstant(Handle<Object> handle, Representation r); @@ -1859,9 +1912,13 @@ class HConstant: public HInstruction { bool InOldSpace() const { return !Heap::InNewSpace(*handle_); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + virtual bool EmitAtUses() const { return !representation().IsDouble(); } - virtual void PrintDataTo(StringStream* stream) const; - virtual HType CalculateInferredType() const; + virtual void PrintDataTo(StringStream* stream); + virtual HType CalculateInferredType(); bool IsInteger() const { return handle_->IsSmi(); } HConstant* CopyToRepresentation(Representation r) const; HConstant* CopyToTruncatedInt32() const; @@ -1877,7 +1934,7 @@ class HConstant: public HInstruction { } bool HasStringValue() const { return handle_->IsString(); } - virtual intptr_t Hashcode() const { + virtual intptr_t Hashcode() { ASSERT(!Heap::allow_allocation(false)); return reinterpret_cast<intptr_t>(*handle()); } @@ -1891,7 +1948,7 @@ class HConstant: public HInstruction { protected: virtual Range* InferRange(); - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HConstant* other_constant = HConstant::cast(other); return handle().is_identical_to(other_constant->handle()); } @@ -1910,7 +1967,7 @@ class HConstant: public HInstruction { }; -class HBinaryOperation: public HInstruction { +class HBinaryOperation: public HTemplateInstruction<2> { public: HBinaryOperation(HValue* left, HValue* right) { ASSERT(left != NULL && right != NULL); @@ -1918,38 +1975,29 @@ class HBinaryOperation: public HInstruction { SetOperandAt(1, right); } - HValue* left() const { return OperandAt(0); } - HValue* right() const { return OperandAt(1); } + HValue* left() { return OperandAt(0); } + HValue* right() { return OperandAt(1); } // TODO(kasperl): Move these helpers to the IA-32 Lithium // instruction sequence builder. - HValue* LeastConstantOperand() const { + HValue* LeastConstantOperand() { if (IsCommutative() && left()->IsConstant()) return right(); return left(); } - HValue* MostConstantOperand() const { + HValue* MostConstantOperand() { if (IsCommutative() && left()->IsConstant()) return left(); return right(); } virtual bool IsCommutative() const { return false; } - virtual void PrintDataTo(StringStream* stream) const; - virtual int OperandCount() const { return operands_.length(); } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + virtual void PrintDataTo(StringStream* stream); DECLARE_INSTRUCTION(BinaryOperation) - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - HOperandVector<2> operands_; }; -class HApplyArguments: public HInstruction { +class HApplyArguments: public HTemplateInstruction<4> { public: HApplyArguments(HValue* function, HValue* receiver, @@ -1970,27 +2018,16 @@ class HApplyArguments: public HInstruction { : Representation::Tagged(); } - HValue* function() const { return OperandAt(0); } - HValue* receiver() const { return OperandAt(1); } - HValue* length() const { return OperandAt(2); } - HValue* elements() const { return OperandAt(3); } - - virtual int OperandCount() const { return operands_.length(); } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + HValue* function() { return OperandAt(0); } + HValue* receiver() { return OperandAt(1); } + HValue* length() { return OperandAt(2); } + HValue* elements() { return OperandAt(3); } DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply_arguments") - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - private: - HOperandVector<4> operands_; }; -class HArgumentsElements: public HInstruction { +class HArgumentsElements: public HTemplateInstruction<0> { public: HArgumentsElements() { // The value produced by this instruction is a pointer into the stack @@ -2001,8 +2038,12 @@ class HArgumentsElements: public HInstruction { DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements") + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2013,14 +2054,18 @@ class HArgumentsLength: public HUnaryOperation { SetFlag(kUseGVN); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HAccessArgumentsAt: public HInstruction { +class HAccessArgumentsAt: public HTemplateInstruction<3> { public: HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) { set_representation(Representation::Tagged()); @@ -2030,7 +2075,7 @@ class HAccessArgumentsAt: public HInstruction { SetOperandAt(2, index); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); virtual Representation RequiredInputRepresentation(int index) const { // The arguments elements is considered tagged. @@ -2039,24 +2084,13 @@ class HAccessArgumentsAt: public HInstruction { : Representation::Integer32(); } - HValue* arguments() const { return operands_[0]; } - HValue* length() const { return operands_[1]; } - HValue* index() const { return operands_[2]; } - - virtual int OperandCount() const { return operands_.length(); } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + HValue* arguments() { return OperandAt(0); } + HValue* length() { return OperandAt(1); } + HValue* index() { return OperandAt(2); } DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access_arguments_at") - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - virtual bool DataEquals(HValue* other) const { return true; } - - private: - HOperandVector<3> operands_; + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2077,13 +2111,13 @@ class HBoundsCheck: public HBinaryOperation { virtual void Verify(); #endif - HValue* index() const { return left(); } - HValue* length() const { return right(); } + HValue* index() { return left(); } + HValue* length() { return right(); } DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2109,7 +2143,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation { } } - HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_INSTRUCTION(BitwiseBinaryOperation) }; @@ -2131,11 +2165,11 @@ class HArithmeticBinaryOperation: public HBinaryOperation { } } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); virtual Representation RequiredInputRepresentation(int index) const { return representation(); } - virtual Representation InferredRepresentation() const { + virtual Representation InferredRepresentation() { if (left()->representation().Equals(right()->representation())) { return left()->representation(); } @@ -2168,18 +2202,18 @@ class HCompare: public HBinaryOperation { return input_representation_; } Token::Value token() const { return token_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); - virtual intptr_t Hashcode() const { + virtual intptr_t Hashcode() { return HValue::Hashcode() * 7 + token_; } DECLARE_CONCRETE_INSTRUCTION(Compare, "compare") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HCompare* comp = HCompare::cast(other); return token_ == comp->token(); } @@ -2205,12 +2239,12 @@ class HCompareJSObjectEq: public HBinaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2228,7 +2262,7 @@ class HUnaryPredicate: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); }; @@ -2242,7 +2276,7 @@ class HIsNull: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HIsNull* b = HIsNull::cast(other); return is_strict_ == b->is_strict(); } @@ -2259,7 +2293,7 @@ class HIsObject: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2270,11 +2304,11 @@ class HIsSmi: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HIsConstructCall: public HInstruction { +class HIsConstructCall: public HTemplateInstruction<0> { public: HIsConstructCall() { set_representation(Representation::Tagged()); @@ -2285,10 +2319,14 @@ class HIsConstructCall: public HInstruction { return !HasSideEffects() && (uses()->length() <= 1); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is_construct_call") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2304,12 +2342,12 @@ class HHasInstanceType: public HUnaryPredicate { InstanceType from() { return from_; } InstanceType to() { return to_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HHasInstanceType* b = HHasInstanceType::cast(other); return (from_ == b->from()) && (to_ == b->to()); } @@ -2327,7 +2365,18 @@ class HHasCachedArrayIndex: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } +}; + + +class HGetCachedArrayIndex: public HUnaryPredicate { + public: + explicit HGetCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { } + + DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get_cached_array_index") + + protected: + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2338,12 +2387,12 @@ class HClassOfTest: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class_of_test") - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle<String> class_name() const { return class_name_; } protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HClassOfTest* b = HClassOfTest::cast(other); return class_name_.is_identical_to(b->class_name_); } @@ -2359,12 +2408,12 @@ class HTypeofIs: public HUnaryPredicate { : HUnaryPredicate(value), type_literal_(type_literal) { } Handle<String> type_literal() { return type_literal_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof_is") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HTypeofIs* b = HTypeofIs::cast(other); return type_literal_.is_identical_to(b->type_literal_); } @@ -2374,7 +2423,7 @@ class HTypeofIs: public HUnaryPredicate { }; -class HInstanceOf: public HInstruction { +class HInstanceOf: public HTemplateInstruction<3> { public: HInstanceOf(HValue* context, HValue* left, HValue* right) { SetOperandAt(0, context); @@ -2384,9 +2433,9 @@ class HInstanceOf: public HInstruction { SetAllSideEffects(); } - HValue* context() const { return operands_[0]; } - HValue* left() const { return operands_[1]; } - HValue* right() const { return operands_[2]; } + HValue* context() { return OperandAt(0); } + HValue* left() { return OperandAt(1); } + HValue* right() { return OperandAt(2); } virtual bool EmitAtUses() const { return !HasSideEffects() && (uses()->length() <= 1); @@ -2396,20 +2445,9 @@ class HInstanceOf: public HInstruction { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; - - virtual int OperandCount() const { return 3; } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of") - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - private: - HOperandVector<3> operands_; }; @@ -2450,7 +2488,7 @@ class HPower: public HBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(Power, "power") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2468,12 +2506,12 @@ class HAdd: public HArithmeticBinaryOperation { virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(Add, "add") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2490,7 +2528,7 @@ class HSub: public HArithmeticBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(Sub, "sub") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2512,7 +2550,7 @@ class HMul: public HArithmeticBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(Mul, "mul") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2529,7 +2567,7 @@ class HMod: public HArithmeticBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(Mod, "mod") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2547,7 +2585,7 @@ class HDiv: public HArithmeticBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(Div, "div") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2559,12 +2597,12 @@ class HBitAnd: public HBitwiseBinaryOperation { : HBitwiseBinaryOperation(left, right) { } virtual bool IsCommutative() const { return true; } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2576,12 +2614,12 @@ class HBitXor: public HBitwiseBinaryOperation { : HBitwiseBinaryOperation(left, right) { } virtual bool IsCommutative() const { return true; } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2591,12 +2629,12 @@ class HBitOr: public HBitwiseBinaryOperation { : HBitwiseBinaryOperation(left, right) { } virtual bool IsCommutative() const { return true; } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2608,12 +2646,12 @@ class HShl: public HBitwiseBinaryOperation { : HBitwiseBinaryOperation(left, right) { } virtual Range* InferRange(); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(Shl, "shl") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2622,12 +2660,12 @@ class HShr: public HBitwiseBinaryOperation { HShr(HValue* left, HValue* right) : HBitwiseBinaryOperation(left, right) { } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(Shr, "shr") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2637,16 +2675,16 @@ class HSar: public HBitwiseBinaryOperation { : HBitwiseBinaryOperation(left, right) { } virtual Range* InferRange(); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(Sar, "sar") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HOsrEntry: public HInstruction { +class HOsrEntry: public HTemplateInstruction<0> { public: explicit HOsrEntry(int ast_id) : ast_id_(ast_id) { SetFlag(kChangesOsrEntries); @@ -2654,6 +2692,10 @@ class HOsrEntry: public HInstruction { int ast_id() const { return ast_id_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr_entry") private: @@ -2661,7 +2703,7 @@ class HOsrEntry: public HInstruction { }; -class HParameter: public HInstruction { +class HParameter: public HTemplateInstruction<0> { public: explicit HParameter(unsigned index) : index_(index) { set_representation(Representation::Tagged()); @@ -2669,7 +2711,11 @@ class HParameter: public HInstruction { unsigned index() const { return index_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") @@ -2688,7 +2734,7 @@ class HCallStub: public HUnaryCall { CodeStub::Major major_key() { return major_key_; } - HValue* context() const { return value(); } + HValue* context() { return value(); } void set_transcendental_type(TranscendentalCache::Type transcendental_type) { transcendental_type_ = transcendental_type; @@ -2697,7 +2743,11 @@ class HCallStub: public HUnaryCall { return transcendental_type_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub") @@ -2707,15 +2757,19 @@ class HCallStub: public HUnaryCall { }; -class HUnknownOSRValue: public HInstruction { +class HUnknownOSRValue: public HTemplateInstruction<0> { public: HUnknownOSRValue() { set_representation(Representation::Tagged()); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown_osr_value") }; -class HLoadGlobal: public HInstruction { +class HLoadGlobal: public HTemplateInstruction<0> { public: HLoadGlobal(Handle<JSGlobalPropertyCell> cell, bool check_hole_value) : cell_(cell), check_hole_value_(check_hole_value) { @@ -2727,20 +2781,21 @@ class HLoadGlobal: public HInstruction { Handle<JSGlobalPropertyCell> cell() const { return cell_; } bool check_hole_value() const { return check_hole_value_; } - virtual Representation RequiredInputRepresentation(int index) const { - return Representation::Tagged(); - } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - virtual intptr_t Hashcode() const { + virtual intptr_t Hashcode() { ASSERT(!Heap::allow_allocation(false)); return reinterpret_cast<intptr_t>(*cell_); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load_global") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HLoadGlobal* b = HLoadGlobal::cast(other); return cell_.is_identical_to(b->cell()); } @@ -2768,7 +2823,7 @@ class HStoreGlobal: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global") @@ -2793,12 +2848,12 @@ class HLoadContextSlot: public HUnaryOperation { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HLoadContextSlot* b = HLoadContextSlot::cast(other); return (slot_index() == b->slot_index()); } @@ -2821,11 +2876,11 @@ class HStoreContextSlot: public HBinaryOperation { SetFlag(kChangesContextSlots); } - HValue* context() const { return OperandAt(0); } - HValue* value() const { return OperandAt(1); } + HValue* context() { return OperandAt(0); } + HValue* value() { return OperandAt(1); } int slot_index() const { return slot_index_; } - bool NeedsWriteBarrier() const { + bool NeedsWriteBarrier() { return StoringValueNeedsWriteBarrier(value()); } @@ -2833,7 +2888,7 @@ class HStoreContextSlot: public HBinaryOperation { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot") @@ -2857,19 +2912,19 @@ class HLoadNamedField: public HUnaryOperation { } } - HValue* object() const { return OperandAt(0); } + HValue* object() { return OperandAt(0); } bool is_in_object() const { return is_in_object_; } int offset() const { return offset_; } virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load_named_field") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HLoadNamedField* b = HLoadNamedField::cast(other); return is_in_object_ == b->is_in_object_ && offset_ == b->offset_; } @@ -2888,8 +2943,8 @@ class HLoadNamedGeneric: public HBinaryOperation { SetAllSideEffects(); } - HValue* context() const { return OperandAt(0); } - HValue* object() const { return OperandAt(1); } + HValue* context() { return OperandAt(0); } + HValue* object() { return OperandAt(1); } Handle<Object> name() const { return name_; } virtual Representation RequiredInputRepresentation(int index) const { @@ -2912,7 +2967,7 @@ class HLoadFunctionPrototype: public HUnaryOperation { SetFlag(kDependsOnCalls); } - HValue* function() const { return OperandAt(0); } + HValue* function() { return OperandAt(0); } virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); @@ -2921,46 +2976,34 @@ class HLoadFunctionPrototype: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load_function_prototype") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HLoadKeyed: public HBinaryOperation { +class HLoadKeyedFastElement: public HBinaryOperation { public: - HLoadKeyed(HValue* obj, HValue* key) : HBinaryOperation(obj, key) { + HLoadKeyedFastElement(HValue* obj, HValue* key) : HBinaryOperation(obj, key) { set_representation(Representation::Tagged()); - } - - virtual void PrintDataTo(StringStream* stream) const; - - virtual Representation RequiredInputRepresentation(int index) const { - return Representation::Tagged(); - } - HValue* object() const { return OperandAt(0); } - HValue* key() const { return OperandAt(1); } - - DECLARE_INSTRUCTION(LoadKeyed) -}; - - -class HLoadKeyedFastElement: public HLoadKeyed { - public: - HLoadKeyedFastElement(HValue* obj, HValue* key) : HLoadKeyed(obj, key) { SetFlag(kDependsOnArrayElements); SetFlag(kUseGVN); } + HValue* object() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } + virtual Representation RequiredInputRepresentation(int index) const { // The key is supposed to be Integer32. return (index == 1) ? Representation::Integer32() : Representation::Tagged(); } + virtual void PrintDataTo(StringStream* stream); + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load_keyed_fast_element") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2975,7 +3018,7 @@ class HLoadPixelArrayElement: public HBinaryOperation { SetFlag(kUseGVN); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); virtual Representation RequiredInputRepresentation(int index) const { // The key is supposed to be Integer32, but the base pointer @@ -2984,76 +3027,50 @@ class HLoadPixelArrayElement: public HBinaryOperation { : Representation::External(); } - HValue* external_pointer() const { return OperandAt(0); } - HValue* key() const { return OperandAt(1); } + HValue* external_pointer() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement, "load_pixel_array_element") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HLoadKeyedGeneric: public HLoadKeyed { +class HLoadKeyedGeneric: public HTemplateInstruction<3> { public: - HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key) - : HLoadKeyed(obj, key), context_(NULL) { + HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key) { + set_representation(Representation::Tagged()); + SetOperandAt(0, obj); + SetOperandAt(1, key); SetOperandAt(2, context); SetAllSideEffects(); } - HValue* context() const { return context_; } - HValue* object() const { return operands_[0]; } - HValue* key() const { return operands_[1]; } - - virtual int OperandCount() const { return 3; } - virtual HValue* OperandAt(int index) const { - return (index < 2) ? operands_[index] : context_; - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic") - - protected: - virtual void InternalSetOperandAt(int index, HValue* value); - - private: - HValue* context_; -}; + HValue* object() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } + HValue* context() { return OperandAt(2); } - -class HStoreNamed: public HBinaryOperation { - public: - HStoreNamed(HValue* obj, Handle<String> name, HValue* val) - : HBinaryOperation(obj, val), name_(name) { - } + virtual void PrintDataTo(StringStream* stream); virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; - - HValue* object() const { return OperandAt(0); } - Handle<String> name() const { return name_; } - HValue* value() const { return OperandAt(1); } - void set_value(HValue* value) { SetOperandAt(1, value); } - - DECLARE_INSTRUCTION(StoreNamed) - - private: - Handle<String> name_; + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic") }; -class HStoreNamedField: public HStoreNamed { +class HStoreNamedField: public HBinaryOperation { public: HStoreNamedField(HValue* obj, Handle<String> name, HValue* val, bool in_object, int offset) - : HStoreNamed(obj, name, val), + : HBinaryOperation(obj, val), + name_(name), is_in_object_(in_object), offset_(offset) { if (is_in_object_) { @@ -3068,137 +3085,143 @@ class HStoreNamedField: public HStoreNamed { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); + + HValue* object() { return OperandAt(0); } + HValue* value() { return OperandAt(1); } + Handle<String> name() const { return name_; } bool is_in_object() const { return is_in_object_; } int offset() const { return offset_; } Handle<Map> transition() const { return transition_; } void set_transition(Handle<Map> map) { transition_ = map; } - bool NeedsWriteBarrier() const { + bool NeedsWriteBarrier() { return StoringValueNeedsWriteBarrier(value()); } private: + Handle<String> name_; bool is_in_object_; int offset_; Handle<Map> transition_; }; -class HStoreNamedGeneric: public HStoreNamed { +class HStoreNamedGeneric: public HTemplateInstruction<3> { public: HStoreNamedGeneric(HValue* context, HValue* object, Handle<String> name, HValue* value) - : HStoreNamed(object, name, value), context_(NULL) { + : name_(name) { + SetOperandAt(0, object); + SetOperandAt(1, value); SetOperandAt(2, context); SetAllSideEffects(); } - HValue* context() const { return context_; } - HValue* object() const { return operands_[0]; } - HValue* value() const { return operands_[1]; } + HValue* object() { return OperandAt(0); } + HValue* value() { return OperandAt(1); } + HValue* context() { return OperandAt(2); } + Handle<String> name() { return name_; } - virtual int OperandCount() const { return 3; } + virtual void PrintDataTo(StringStream* stream); - virtual HValue* OperandAt(int index) const { - return (index < 2) ? operands_[index] : context_; + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); } DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic") - protected: - virtual void InternalSetOperandAt(int index, HValue* value); - private: - HValue* context_; + Handle<String> name_; }; -class HStoreKeyed: public HInstruction { +class HStoreKeyedFastElement: public HTemplateInstruction<3> { public: - HStoreKeyed(HValue* obj, HValue* key, HValue* val) { + HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) { SetOperandAt(0, obj); SetOperandAt(1, key); SetOperandAt(2, val); + SetFlag(kChangesArrayElements); } - virtual void PrintDataTo(StringStream* stream) const; - virtual int OperandCount() const { return operands_.length(); } - virtual HValue* OperandAt(int index) const { return operands_[index]; } - virtual Representation RequiredInputRepresentation(int index) const { - return Representation::Tagged(); + // The key is supposed to be Integer32. + return (index == 1) ? Representation::Integer32() + : Representation::Tagged(); } - HValue* object() const { return OperandAt(0); } - HValue* key() const { return OperandAt(1); } - HValue* value() const { return OperandAt(2); } + HValue* object() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } + HValue* value() { return OperandAt(2); } - bool NeedsWriteBarrier() const { + bool NeedsWriteBarrier() { return StoringValueNeedsWriteBarrier(value()); } - DECLARE_INSTRUCTION(StoreKeyed) + virtual void PrintDataTo(StringStream* stream); - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - HOperandVector<3> operands_; + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store_keyed_fast_element") }; -class HStoreKeyedFastElement: public HStoreKeyed { +class HStorePixelArrayElement: public HTemplateInstruction<3> { public: - HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) - : HStoreKeyed(obj, key, val) { - SetFlag(kChangesArrayElements); + HStorePixelArrayElement(HValue* external_elements, HValue* key, HValue* val) { + SetFlag(kChangesPixelArrayElements); + SetOperandAt(0, external_elements); + SetOperandAt(1, key); + SetOperandAt(2, val); } + virtual void PrintDataTo(StringStream* stream); + virtual Representation RequiredInputRepresentation(int index) const { - // The key is supposed to be Integer32. - return (index == 1) ? Representation::Integer32() - : Representation::Tagged(); + if (index == 0) { + return Representation::External(); + } else { + return Representation::Integer32(); + } } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store_keyed_fast_element") + HValue* external_pointer() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } + HValue* value() { return OperandAt(2); } + + DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement, + "store_pixel_array_element") }; -class HStoreKeyedGeneric: public HStoreKeyed { +class HStoreKeyedGeneric: public HTemplateInstruction<4> { public: HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key, - HValue* value) - : HStoreKeyed(object, key, value), context_(NULL) { + HValue* value) { + SetOperandAt(0, object); + SetOperandAt(1, key); + SetOperandAt(2, value); SetOperandAt(3, context); SetAllSideEffects(); } - HValue* context() const { return context_; } - HValue* object() const { return operands_[0]; } - HValue* key() const { return operands_[1]; } - HValue* value() const { return operands_[2]; } - - virtual int OperandCount() const { return 4; } + HValue* object() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } + HValue* value() { return OperandAt(2); } + HValue* context() { return OperandAt(3); } - virtual HValue* OperandAt(int index) const { - return (index < 3) ? operands_[index] : context_; + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic") - - protected: - virtual void InternalSetOperandAt(int index, HValue* value); + virtual void PrintDataTo(StringStream* stream); - private: - HValue* context_; + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic") }; @@ -3216,13 +3239,13 @@ class HStringCharCodeAt: public HBinaryOperation { : Representation::Tagged(); } - HValue* string() const { return OperandAt(0); } - HValue* index() const { return OperandAt(1); } + HValue* string() { return OperandAt(0); } + HValue* index() { return OperandAt(1); } DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange() { return new Range(0, String::kMaxUC16CharCode); @@ -3241,7 +3264,7 @@ class HStringLength: public HUnaryOperation { return Representation::Tagged(); } - virtual HType CalculateInferredType() const { + virtual HType CalculateInferredType() { STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); return HType::Smi(); } @@ -3249,7 +3272,7 @@ class HStringLength: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange() { return new Range(0, String::kMaxLength); @@ -3257,31 +3280,30 @@ class HStringLength: public HUnaryOperation { }; -class HMaterializedLiteral: public HInstruction { +template <int V> +class HMaterializedLiteral: public HTemplateInstruction<V> { public: - HMaterializedLiteral(int index, int depth) + HMaterializedLiteral<V>(int index, int depth) : literal_index_(index), depth_(depth) { - set_representation(Representation::Tagged()); + this->set_representation(Representation::Tagged()); } int literal_index() const { return literal_index_; } int depth() const { return depth_; } - DECLARE_INSTRUCTION(MaterializedLiteral) - private: int literal_index_; int depth_; }; -class HArrayLiteral: public HMaterializedLiteral { +class HArrayLiteral: public HMaterializedLiteral<0> { public: HArrayLiteral(Handle<FixedArray> constant_elements, int length, int literal_index, int depth) - : HMaterializedLiteral(literal_index, depth), + : HMaterializedLiteral<0>(literal_index, depth), length_(length), constant_elements_(constant_elements) {} @@ -3290,6 +3312,10 @@ class HArrayLiteral: public HMaterializedLiteral { bool IsCopyOnWrite() const; + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array_literal") private: @@ -3298,55 +3324,53 @@ class HArrayLiteral: public HMaterializedLiteral { }; -class HObjectLiteral: public HMaterializedLiteral { +class HObjectLiteral: public HMaterializedLiteral<1> { public: HObjectLiteral(HValue* context, Handle<FixedArray> constant_properties, bool fast_elements, int literal_index, int depth) - : HMaterializedLiteral(literal_index, depth), - context_(NULL), + : HMaterializedLiteral<1>(literal_index, depth), constant_properties_(constant_properties), fast_elements_(fast_elements) { SetOperandAt(0, context); } - HValue* context() const { return context_; } + HValue* context() { return OperandAt(0); } Handle<FixedArray> constant_properties() const { return constant_properties_; } bool fast_elements() const { return fast_elements_; } - virtual int OperandCount() const { return 1; } - virtual HValue* OperandAt(int index) const { return context_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal") - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - context_ = value; - } - private: - HValue* context_; Handle<FixedArray> constant_properties_; bool fast_elements_; }; -class HRegExpLiteral: public HMaterializedLiteral { +class HRegExpLiteral: public HMaterializedLiteral<0> { public: HRegExpLiteral(Handle<String> pattern, Handle<String> flags, int literal_index) - : HMaterializedLiteral(literal_index, 0), + : HMaterializedLiteral<0>(literal_index, 0), pattern_(pattern), flags_(flags) { } Handle<String> pattern() { return pattern_; } Handle<String> flags() { return flags_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp_literal") private: @@ -3355,13 +3379,17 @@ class HRegExpLiteral: public HMaterializedLiteral { }; -class HFunctionLiteral: public HInstruction { +class HFunctionLiteral: public HTemplateInstruction<0> { public: HFunctionLiteral(Handle<SharedFunctionInfo> shared, bool pretenure) : shared_info_(shared), pretenure_(pretenure) { set_representation(Representation::Tagged()); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function_literal") Handle<SharedFunctionInfo> shared_info() const { return shared_info_; } @@ -3393,6 +3421,10 @@ class HValueOf: public HUnaryOperation { set_representation(Representation::Tagged()); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value_of") }; @@ -3411,8 +3443,8 @@ class HDeleteProperty: public HBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete_property") - HValue* object() const { return left(); } - HValue* key() const { return right(); } + HValue* object() { return left(); } + HValue* key() { return right(); } }; #undef DECLARE_INSTRUCTION diff --git a/src/hydrogen.cc b/src/hydrogen.cc index 9be31760..158bfbe3 100644 --- a/src/hydrogen.cc +++ b/src/hydrogen.cc @@ -106,18 +106,10 @@ void HBasicBlock::AddInstruction(HInstruction* instr) { if (first_ == NULL) { HBlockEntry* entry = new HBlockEntry(); entry->InitializeAsFirst(this); - first_ = entry; + first_ = last_ = entry; } - instr->InsertAfter(GetLastInstruction()); -} - - -HInstruction* HBasicBlock::GetLastInstruction() { - if (end_ != NULL) return end_->previous(); - if (first_ == NULL) return NULL; - if (last_ == NULL) last_ = first_; - while (last_->next() != NULL) last_ = last_->next(); - return last_; + instr->InsertAfter(last_); + last_ = instr; } @@ -178,7 +170,7 @@ void HBasicBlock::SetJoinId(int id) { for (int i = 0; i < length; i++) { HBasicBlock* predecessor = predecessors_[i]; ASSERT(predecessor->end()->IsGoto()); - HSimulate* simulate = HSimulate::cast(predecessor->GetLastInstruction()); + HSimulate* simulate = HSimulate::cast(predecessor->end()->previous()); // We only need to verify the ID once. ASSERT(i != 0 || predecessor->last_environment()->closure()->shared() @@ -490,179 +482,60 @@ HConstant* HGraph::GetConstantFalse() { } -void HSubgraph::AppendOptional(HSubgraph* graph, - bool on_true_branch, - HValue* value) { - ASSERT(HasExit() && graph->HasExit()); - HBasicBlock* other_block = graph_->CreateBasicBlock(); - HBasicBlock* join_block = graph_->CreateBasicBlock(); - - HTest* test = on_true_branch - ? new HTest(value, graph->entry_block(), other_block) - : new HTest(value, other_block, graph->entry_block()); - exit_block_->Finish(test); - other_block->Goto(join_block); - graph->exit_block()->Goto(join_block); - exit_block_ = join_block; -} - - -void HSubgraph::AppendJoin(HSubgraph* then_graph, - HSubgraph* else_graph, - AstNode* node) { - if (then_graph->HasExit() && else_graph->HasExit()) { - // We need to merge, create new merge block. - HBasicBlock* join_block = graph_->CreateBasicBlock(); - then_graph->exit_block()->Goto(join_block); - else_graph->exit_block()->Goto(join_block); - join_block->SetJoinId(node->id()); - exit_block_ = join_block; - } else if (then_graph->HasExit()) { - exit_block_ = then_graph->exit_block_; - } else if (else_graph->HasExit()) { - exit_block_ = else_graph->exit_block_; +HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first, + HBasicBlock* second, + int join_id) { + if (first == NULL) { + return second; + } else if (second == NULL) { + return first; } else { - exit_block_ = NULL; + HBasicBlock* join_block = graph_->CreateBasicBlock(); + first->Goto(join_block); + second->Goto(join_block); + join_block->SetJoinId(join_id); + return join_block; } } -void HSubgraph::ResolveContinue(IterationStatement* statement) { - HBasicBlock* continue_block = BundleContinue(statement); +HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement, + HBasicBlock* exit_block, + HBasicBlock* continue_block) { if (continue_block != NULL) { - exit_block_ = JoinBlocks(exit_block(), - continue_block, - statement->ContinueId()); - } -} - - -HBasicBlock* HSubgraph::BundleBreak(BreakableStatement* statement) { - return BundleBreakContinue(statement, false, statement->ExitId()); -} - - -HBasicBlock* HSubgraph::BundleContinue(IterationStatement* statement) { - return BundleBreakContinue(statement, true, statement->ContinueId()); -} - - -HBasicBlock* HSubgraph::BundleBreakContinue(BreakableStatement* statement, - bool is_continue, - int join_id) { - HBasicBlock* result = NULL; - const ZoneList<BreakContinueInfo*>* infos = break_continue_info(); - for (int i = 0; i < infos->length(); ++i) { - BreakContinueInfo* info = infos->at(i); - if (info->is_continue() == is_continue && - info->target() == statement && - !info->IsResolved()) { - if (result == NULL) { - result = graph_->CreateBasicBlock(); - } - info->block()->Goto(result); - info->Resolve(); - } + if (exit_block != NULL) exit_block->Goto(continue_block); + continue_block->SetJoinId(statement->ContinueId()); + return continue_block; } - - if (result != NULL) result->SetJoinId(join_id); - - return result; -} - - -HBasicBlock* HSubgraph::JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id) { - if (a == NULL) return b; - if (b == NULL) return a; - HBasicBlock* target = graph_->CreateBasicBlock(); - a->Goto(target); - b->Goto(target); - target->SetJoinId(id); - return target; + return exit_block; } -void HSubgraph::AppendEndless(HSubgraph* body, IterationStatement* statement) { - ConnectExitTo(body->entry_block()); - body->ResolveContinue(statement); - body->ConnectExitTo(body->entry_block(), true); - exit_block_ = body->BundleBreak(statement); - body->entry_block()->PostProcessLoopHeader(statement); -} - - -void HSubgraph::AppendDoWhile(HSubgraph* body, - IterationStatement* statement, - HSubgraph* go_back, - HSubgraph* exit) { - ConnectExitTo(body->entry_block()); - go_back->ConnectExitTo(body->entry_block(), true); - - HBasicBlock* break_block = body->BundleBreak(statement); - exit_block_ = - JoinBlocks(exit->exit_block(), break_block, statement->ExitId()); - body->entry_block()->PostProcessLoopHeader(statement); -} - - -void HSubgraph::AppendWhile(HSubgraph* condition, - HSubgraph* body, - IterationStatement* statement, - HSubgraph* continue_subgraph, - HSubgraph* exit) { - ConnectExitTo(condition->entry_block()); - - HBasicBlock* break_block = body->BundleBreak(statement); - exit_block_ = - JoinBlocks(exit->exit_block(), break_block, statement->ExitId()); - - if (continue_subgraph != NULL) { - body->ConnectExitTo(continue_subgraph->entry_block(), true); - continue_subgraph->entry_block()->SetJoinId(statement->EntryId()); - exit_block_ = JoinBlocks(exit_block_, - continue_subgraph->exit_block(), - statement->ExitId()); - } else { - body->ConnectExitTo(condition->entry_block(), true); +HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement, + HBasicBlock* loop_entry, + HBasicBlock* body_exit, + HBasicBlock* loop_successor, + HBasicBlock* break_block) { + if (body_exit != NULL) body_exit->Goto(loop_entry, true); + loop_entry->PostProcessLoopHeader(statement); + if (break_block != NULL) { + if (loop_successor != NULL) loop_successor->Goto(break_block); + break_block->SetJoinId(statement->ExitId()); + return break_block; } - condition->entry_block()->PostProcessLoopHeader(statement); + return loop_successor; } -void HSubgraph::Append(HSubgraph* next, BreakableStatement* stmt) { - exit_block_->Goto(next->entry_block()); - exit_block_ = next->exit_block_; - - if (stmt != NULL) { - next->entry_block()->SetJoinId(stmt->EntryId()); - HBasicBlock* break_block = next->BundleBreak(stmt); - exit_block_ = JoinBlocks(exit_block(), break_block, stmt->ExitId()); - } -} - - -void HSubgraph::FinishExit(HControlInstruction* instruction) { - ASSERT(HasExit()); - exit_block_->Finish(instruction); - exit_block_->ClearEnvironment(); - exit_block_ = NULL; -} - - -void HSubgraph::FinishBreakContinue(BreakableStatement* target, - bool is_continue) { - ASSERT(!exit_block_->IsFinished()); - BreakContinueInfo* info = new BreakContinueInfo(target, exit_block_, - is_continue); - break_continue_info_.Add(info); - exit_block_ = NULL; +void HBasicBlock::FinishExit(HControlInstruction* instruction) { + Finish(instruction); + ClearEnvironment(); } HGraph::HGraph(CompilationInfo* info) : HSubgraph(this), next_block_id_(0), - info_(info), blocks_(8), values_(16), phi_list_(NULL) { @@ -671,12 +544,7 @@ HGraph::HGraph(CompilationInfo* info) } -bool HGraph::AllowCodeMotion() const { - return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount; -} - - -Handle<Code> HGraph::Compile() { +Handle<Code> HGraph::Compile(CompilationInfo* info) { int values = GetMaximumValueID(); if (values > LAllocator::max_initial_value_ids()) { if (FLAG_trace_bailout) PrintF("Function is too big\n"); @@ -684,7 +552,7 @@ Handle<Code> HGraph::Compile() { } LAllocator allocator(values, this); - LChunkBuilder builder(this, &allocator); + LChunkBuilder builder(info, this, &allocator); LChunk* chunk = builder.Build(); if (chunk == NULL) return Handle<Code>::null(); @@ -695,7 +563,7 @@ Handle<Code> HGraph::Compile() { if (!FLAG_use_lithium) return Handle<Code>::null(); MacroAssembler assembler(NULL, 0); - LCodeGen generator(chunk, &assembler, info()); + LCodeGen generator(chunk, &assembler, info); if (FLAG_eliminate_empty_blocks) { chunk->MarkEmptyBlocks(); @@ -705,13 +573,13 @@ Handle<Code> HGraph::Compile() { if (FLAG_trace_codegen) { PrintF("Crankshaft Compiler - "); } - CodeGenerator::MakeCodePrologue(info()); + CodeGenerator::MakeCodePrologue(info); Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION, NOT_IN_LOOP); Handle<Code> code = - CodeGenerator::MakeCodeEpilogue(&assembler, flags, info()); + CodeGenerator::MakeCodeEpilogue(&assembler, flags, info); generator.FinishCode(code); - CodeGenerator::PrintCode(code, info()); + CodeGenerator::PrintCode(code, info); return code; } return Handle<Code>::null(); @@ -726,20 +594,14 @@ HBasicBlock* HGraph::CreateBasicBlock() { void HGraph::Canonicalize() { + if (!FLAG_use_canonicalizing) return; HPhase phase("Canonicalize", this); - if (FLAG_use_canonicalizing) { - for (int i = 0; i < blocks()->length(); ++i) { - HBasicBlock* b = blocks()->at(i); - for (HInstruction* insn = b->first(); insn != NULL; insn = insn->next()) { - HValue* value = insn->Canonicalize(); - if (value != insn) { - if (value != NULL) { - insn->ReplaceAndDelete(value); - } else { - insn->Delete(); - } - } - } + for (int i = 0; i < blocks()->length(); ++i) { + HInstruction* instr = blocks()->at(i)->first(); + while (instr != NULL) { + HValue* value = instr->Canonicalize(); + if (value != instr) instr->ReplaceAndDelete(value); + instr = instr->next(); } } } @@ -999,8 +861,8 @@ void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) { void HRangeAnalysis::InferControlFlowRange(Token::Value op, HValue* value, HValue* other) { - Range* range = other->range(); - if (range == NULL) range = new Range(); + Range temp_range; + Range* range = other->range() != NULL ? other->range() : &temp_range; Range* new_range = NULL; TraceRange("Control flow range infer %d %s %d\n", @@ -1308,8 +1170,9 @@ void HStackCheckEliminator::RemoveStackCheck(HBasicBlock* block) { class HGlobalValueNumberer BASE_EMBEDDED { public: - explicit HGlobalValueNumberer(HGraph* graph) + explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info) : graph_(graph), + info_(info), block_side_effects_(graph_->blocks()->length()), loop_side_effects_(graph_->blocks()->length()) { ASSERT(Heap::allow_allocation(false)); @@ -1329,9 +1192,14 @@ class HGlobalValueNumberer BASE_EMBEDDED { void ProcessLoopBlock(HBasicBlock* block, HBasicBlock* before_loop, int loop_kills); + bool AllowCodeMotion(); bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header); + HGraph* graph() { return graph_; } + CompilationInfo* info() { return info_; } + HGraph* graph_; + CompilationInfo* info_; // A map of block IDs to their side effects. ZoneList<int> block_side_effects_; @@ -1432,10 +1300,15 @@ void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block, } +bool HGlobalValueNumberer::AllowCodeMotion() { + return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount; +} + + bool HGlobalValueNumberer::ShouldMove(HInstruction* instr, HBasicBlock* loop_header) { // If we've disabled code motion, don't move any instructions. - if (!graph_->AllowCodeMotion()) return false; + if (!AllowCodeMotion()) return false; // If --aggressive-loop-invariant-motion, move everything except change // instructions. @@ -1495,8 +1368,7 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) { instr->Mnemonic(), other->id(), other->Mnemonic()); - instr->ReplaceValue(other); - instr->Delete(); + instr->ReplaceAndDelete(other); } else { map->Add(instr); } @@ -1796,8 +1668,7 @@ void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) { void HGraph::InsertRepresentationChangeForUse(HValue* value, HValue* use, - Representation to, - bool is_truncating) { + Representation to) { // Insert the representation change right before its use. For phi-uses we // insert at the end of the corresponding predecessor. HInstruction* next = NULL; @@ -1814,6 +1685,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value, // information we treat constants like normal instructions and insert the // change instructions for them. HInstruction* new_value = NULL; + bool is_truncating = use->CheckFlag(HValue::kTruncatingToInt32); if (value->IsConstant()) { HConstant* constant = HConstant::cast(value); // Try to create a new copy of the constant with the new representation. @@ -1823,7 +1695,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value, } if (new_value == NULL) { - new_value = new HChange(value, value->representation(), to); + new_value = new HChange(value, value->representation(), to, is_truncating); } new_value->InsertBefore(next); @@ -1898,8 +1770,7 @@ void HGraph::InsertRepresentationChanges(HValue* current) { for (int i = 0; i < to_convert.length(); ++i) { HValue* use = to_convert[i]; Representation r_to = to_convert_reps[i]; - bool is_truncating = use->CheckFlag(HValue::kTruncatingToInt32); - InsertRepresentationChangeForUse(current, use, r_to, is_truncating); + InsertRepresentationChangeForUse(current, use, r_to); } if (current->uses()->is_empty()) { @@ -1982,6 +1853,47 @@ void HGraph::ComputeMinusZeroChecks() { } +// Implementation of utility class to encapsulate the translation state for +// a (possibly inlined) function. +FunctionState::FunctionState(HGraphBuilder* owner, + CompilationInfo* info, + TypeFeedbackOracle* oracle) + : owner_(owner), + compilation_info_(info), + oracle_(oracle), + call_context_(NULL), + function_return_(NULL), + test_context_(NULL), + outer_(owner->function_state()) { + if (outer_ != NULL) { + // State for an inline function. + if (owner->ast_context()->IsTest()) { + HBasicBlock* if_true = owner->graph()->CreateBasicBlock(); + HBasicBlock* if_false = owner->graph()->CreateBasicBlock(); + if_true->MarkAsInlineReturnTarget(); + if_false->MarkAsInlineReturnTarget(); + // The AstContext constructor pushed on the context stack. This newed + // instance is the reason that AstContext can't be BASE_EMBEDDED. + test_context_ = new TestContext(owner, if_true, if_false); + } else { + function_return_ = owner->graph()->CreateBasicBlock(); + function_return()->MarkAsInlineReturnTarget(); + } + // Set this after possibly allocating a new TestContext above. + call_context_ = owner->ast_context(); + } + + // Push on the state stack. + owner->set_function_state(this); +} + + +FunctionState::~FunctionState() { + delete test_context_; + owner_->set_function_state(outer_); +} + + // Implementation of utility classes to represent an expression's context in // the AST. AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind) @@ -2000,14 +1912,14 @@ AstContext::~AstContext() { EffectContext::~EffectContext() { ASSERT(owner()->HasStackOverflow() || - !owner()->subgraph()->HasExit() || + owner()->current_block() == NULL || owner()->environment()->length() == original_length_); } ValueContext::~ValueContext() { ASSERT(owner()->HasStackOverflow() || - !owner()->subgraph()->HasExit() || + owner()->current_block() == NULL || owner()->environment()->length() == original_length_ + 1); } @@ -2065,7 +1977,7 @@ void TestContext::BuildBranch(HValue* value) { HBasicBlock* empty_true = builder->graph()->CreateBasicBlock(); HBasicBlock* empty_false = builder->graph()->CreateBasicBlock(); HTest* test = new HTest(value, empty_true, empty_false); - builder->CurrentBlock()->Finish(test); + builder->current_block()->Finish(test); HValue* const no_return_value = NULL; HBasicBlock* true_target = if_true(); @@ -2081,7 +1993,7 @@ void TestContext::BuildBranch(HValue* value) { } else { empty_false->Goto(false_target); } - builder->subgraph()->set_exit_block(NULL); + builder->set_current_block(NULL); } @@ -2138,7 +2050,6 @@ class HGraphBuilder::SubgraphScope BASE_EMBEDDED { } ~SubgraphScope() { - old_subgraph_->AddBreakContinueInfo(subgraph_); builder_->current_subgraph_ = old_subgraph_; } @@ -2153,8 +2064,8 @@ class HGraphBuilder::SubgraphScope BASE_EMBEDDED { void HGraphBuilder::Bailout(const char* reason) { if (FLAG_trace_bailout) { - SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); - PrintF("Bailout in HGraphBuilder: @\"%s\": %s\n", *debug_name, reason); + SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); + PrintF("Bailout in HGraphBuilder: @\"%s\": %s\n", *name, reason); } SetStackOverflow(); } @@ -2181,78 +2092,107 @@ void HGraphBuilder::VisitForControl(Expression* expr, void HGraphBuilder::VisitArgument(Expression* expr) { - VisitForValue(expr); + VISIT_FOR_VALUE(expr); + Push(AddInstruction(new HPushArgument(Pop()))); } void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) { for (int i = 0; i < arguments->length(); i++) { VisitArgument(arguments->at(i)); - if (HasStackOverflow() || !current_subgraph_->HasExit()) return; + if (HasStackOverflow() || current_block() == NULL) return; } } -HGraph* HGraphBuilder::CreateGraph(CompilationInfo* info) { - ASSERT(current_subgraph_ == NULL); - graph_ = new HGraph(info); +void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) { + for (int i = 0; i < exprs->length(); ++i) { + VISIT_FOR_VALUE(exprs->at(i)); + } +} + + +HGraph* HGraphBuilder::CreateGraph() { + ASSERT(subgraph() == NULL); + graph_ = new HGraph(info()); { HPhase phase("Block building"); - graph_->Initialize(CreateBasicBlock(graph_->start_environment())); - current_subgraph_ = graph_; + graph()->Initialize(CreateBasicBlock(graph()->start_environment())); + current_subgraph_ = graph(); - Scope* scope = info->scope(); + Scope* scope = info()->scope(); + if (scope->HasIllegalRedeclaration()) { + Bailout("function with illegal redeclaration"); + return NULL; + } SetupScope(scope); VisitDeclarations(scope->declarations()); - AddInstruction(new HStackCheck()); - ZoneList<Statement*>* stmts = info->function()->body(); - HSubgraph* body = CreateGotoSubgraph(environment()); - AddToSubgraph(body, stmts); + // Add an edge to the body entry. This is warty: the graph's start + // environment will be used by the Lithium translation as the initial + // environment on graph entry, but it has now been mutated by the + // Hydrogen translation of the instructions in the start block. This + // environment uses values which have not been defined yet. These + // Hydrogen instructions will then be replayed by the Lithium + // translation, so they cannot have an environment effect. The edge to + // the body's entry block (along with some special logic for the start + // block in HInstruction::InsertAfter) seals the start block from + // getting unwanted instructions inserted. + // + // TODO(kmillikin): Fix this. Stop mutating the initial environment. + // Make the Hydrogen instructions in the initial block into Hydrogen + // values (but not instructions), present in the initial environment and + // not replayed by the Lithium translation. + HEnvironment* initial_env = environment()->CopyWithoutHistory(); + HBasicBlock* body_entry = CreateBasicBlock(initial_env); + current_block()->Goto(body_entry); + body_entry->SetJoinId(info()->function()->id()); + set_current_block(body_entry); + VisitStatements(info()->function()->body()); if (HasStackOverflow()) return NULL; - current_subgraph_->Append(body, NULL); - body->entry_block()->SetJoinId(info->function()->id()); - if (graph_->HasExit()) { - graph_->FinishExit(new HReturn(graph_->GetConstantUndefined())); + if (current_block() != NULL) { + HReturn* instr = new HReturn(graph()->GetConstantUndefined()); + current_block()->FinishExit(instr); + set_current_block(NULL); } } - graph_->OrderBlocks(); - graph_->AssignDominators(); - graph_->EliminateRedundantPhis(); - if (!graph_->CollectPhis()) { + graph()->OrderBlocks(); + graph()->AssignDominators(); + graph()->EliminateRedundantPhis(); + if (!graph()->CollectPhis()) { Bailout("Phi-use of arguments object"); return NULL; } - HInferRepresentation rep(graph_); + HInferRepresentation rep(graph()); rep.Analyze(); if (FLAG_use_range) { - HRangeAnalysis rangeAnalysis(graph_); + HRangeAnalysis rangeAnalysis(graph()); rangeAnalysis.Analyze(); } - graph_->InitializeInferredTypes(); - graph_->Canonicalize(); - graph_->InsertRepresentationChanges(); - graph_->ComputeMinusZeroChecks(); + graph()->InitializeInferredTypes(); + graph()->Canonicalize(); + graph()->InsertRepresentationChanges(); + graph()->ComputeMinusZeroChecks(); // Eliminate redundant stack checks on backwards branches. - HStackCheckEliminator sce(graph_); + HStackCheckEliminator sce(graph()); sce.Process(); // Perform common subexpression elimination and loop-invariant code motion. if (FLAG_use_gvn) { - HPhase phase("Global value numbering", graph_); - HGlobalValueNumberer gvn(graph_); + HPhase phase("Global value numbering", graph()); + HGlobalValueNumberer gvn(graph(), info()); gvn.Analyze(); } - return graph_; + return graph(); } @@ -2276,21 +2216,21 @@ void HGraphBuilder::AddToSubgraph(HSubgraph* graph, HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) { - ASSERT(current_subgraph_->HasExit()); - current_subgraph_->exit_block()->AddInstruction(instr); + ASSERT(current_block() != NULL); + current_block()->AddInstruction(instr); return instr; } void HGraphBuilder::AddSimulate(int id) { - ASSERT(current_subgraph_->HasExit()); - current_subgraph_->exit_block()->AddSimulate(id); + ASSERT(current_block() != NULL); + current_block()->AddSimulate(id); } void HGraphBuilder::AddPhi(HPhi* instr) { - ASSERT(current_subgraph_->HasExit()); - current_subgraph_->exit_block()->AddPhi(instr); + ASSERT(current_block() != NULL); + current_block()->AddPhi(instr); } @@ -2300,7 +2240,8 @@ void HGraphBuilder::PushAndAdd(HInstruction* instr) { } -void HGraphBuilder::PreProcessCall(HCall* call) { +template <int V> +HInstruction* HGraphBuilder::PreProcessCall(HCall<V>* call) { int count = call->argument_count(); ZoneList<HValue*> arguments(count); for (int i = 0; i < count; ++i) { @@ -2310,6 +2251,7 @@ void HGraphBuilder::PreProcessCall(HCall* call) { while (!arguments.is_empty()) { AddInstruction(new HPushArgument(arguments.RemoveLast())); } + return call; } @@ -2317,9 +2259,6 @@ void HGraphBuilder::SetupScope(Scope* scope) { // We don't yet handle the function name for named function expressions. if (scope->function() != NULL) BAILOUT("named function expression"); - // We can't handle heap-allocated locals. - if (scope->num_heap_slots() > 0) BAILOUT("heap allocated locals"); - HConstant* undefined_constant = new HConstant(Factory::undefined_value(), Representation::Tagged()); AddInstruction(undefined_constant); @@ -2341,6 +2280,10 @@ void HGraphBuilder::SetupScope(Scope* scope) { // Handle the arguments and arguments shadow variables specially (they do // not have declarations). if (scope->arguments() != NULL) { + if (!scope->arguments()->IsStackAllocated() || + !scope->arguments_shadow()->IsStackAllocated()) { + BAILOUT("context-allocated arguments"); + } HArgumentsObject* object = new HArgumentsObject; AddInstruction(object); graph()->SetArgumentsObject(object); @@ -2353,7 +2296,7 @@ void HGraphBuilder::SetupScope(Scope* scope) { void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) { for (int i = 0; i < statements->length(); i++) { Visit(statements->at(i)); - if (HasStackOverflow() || !current_subgraph_->HasExit()) break; + if (HasStackOverflow() || current_block() == NULL) break; } } @@ -2377,14 +2320,6 @@ HSubgraph* HGraphBuilder::CreateInlinedSubgraph(HEnvironment* outer, } -HSubgraph* HGraphBuilder::CreateGotoSubgraph(HEnvironment* env) { - HSubgraph* subgraph = new HSubgraph(graph()); - HEnvironment* new_env = env->CopyWithoutHistory(); - subgraph->Initialize(CreateBasicBlock(new_env)); - return subgraph; -} - - HSubgraph* HGraphBuilder::CreateEmptySubgraph() { HSubgraph* subgraph = new HSubgraph(graph()); subgraph->Initialize(graph()->CreateBasicBlock()); @@ -2400,24 +2335,26 @@ HSubgraph* HGraphBuilder::CreateBranchSubgraph(HEnvironment* env) { } -HSubgraph* HGraphBuilder::CreateLoopHeaderSubgraph(HEnvironment* env) { - HSubgraph* subgraph = new HSubgraph(graph()); - HBasicBlock* block = graph()->CreateBasicBlock(); - HEnvironment* new_env = env->CopyAsLoopHeader(block); - block->SetInitialEnvironment(new_env); - subgraph->Initialize(block); - subgraph->entry_block()->AttachLoopInformation(); - return subgraph; +HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() { + HBasicBlock* header = graph()->CreateBasicBlock(); + HEnvironment* entry_env = environment()->CopyAsLoopHeader(header); + header->SetInitialEnvironment(entry_env); + header->AttachLoopInformation(); + return header; } void HGraphBuilder::VisitBlock(Block* stmt) { - if (stmt->labels() != NULL) { - HSubgraph* block_graph = CreateGotoSubgraph(environment()); - ADD_TO_SUBGRAPH(block_graph, stmt->statements()); - current_subgraph_->Append(block_graph, stmt); - } else { + BreakAndContinueInfo break_info(stmt); + { BreakAndContinueScope push(&break_info, this); VisitStatements(stmt->statements()); + CHECK_BAILOUT; + } + HBasicBlock* break_block = break_info.break_block(); + if (break_block != NULL) { + if (current_block() != NULL) current_block()->Goto(break_block); + break_block->SetJoinId(stmt->ExitId()); + set_current_block(break_block); } } @@ -2439,30 +2376,69 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) { AddSimulate(stmt->ElseId()); Visit(stmt->else_statement()); } else { - HSubgraph* then_graph = CreateEmptySubgraph(); - HSubgraph* else_graph = CreateEmptySubgraph(); - VISIT_FOR_CONTROL(stmt->condition(), - then_graph->entry_block(), - else_graph->entry_block()); + HBasicBlock* cond_true = graph()->CreateBasicBlock(); + HBasicBlock* cond_false = graph()->CreateBasicBlock(); + VISIT_FOR_CONTROL(stmt->condition(), cond_true, cond_false); + cond_true->SetJoinId(stmt->ThenId()); + cond_false->SetJoinId(stmt->ElseId()); - then_graph->entry_block()->SetJoinId(stmt->ThenId()); - ADD_TO_SUBGRAPH(then_graph, stmt->then_statement()); + set_current_block(cond_true); + Visit(stmt->then_statement()); + CHECK_BAILOUT; + HBasicBlock* other = current_block(); - else_graph->entry_block()->SetJoinId(stmt->ElseId()); - ADD_TO_SUBGRAPH(else_graph, stmt->else_statement()); + set_current_block(cond_false); + Visit(stmt->else_statement()); + CHECK_BAILOUT; - current_subgraph_->AppendJoin(then_graph, else_graph, stmt); + HBasicBlock* join = CreateJoin(other, current_block(), stmt->id()); + set_current_block(join); } } +HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get( + BreakableStatement* stmt, + BreakType type) { + BreakAndContinueScope* current = this; + while (current != NULL && current->info()->target() != stmt) { + current = current->next(); + } + ASSERT(current != NULL); // Always found (unless stack is malformed). + HBasicBlock* block = NULL; + switch (type) { + case BREAK: + block = current->info()->break_block(); + if (block == NULL) { + block = current->owner()->graph()->CreateBasicBlock(); + current->info()->set_break_block(block); + } + break; + + case CONTINUE: + block = current->info()->continue_block(); + if (block == NULL) { + block = current->owner()->graph()->CreateBasicBlock(); + current->info()->set_continue_block(block); + } + break; + } + + return block; +} + + void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) { - current_subgraph_->FinishBreakContinue(stmt->target(), true); + HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE); + current_block()->Goto(continue_block); + set_current_block(NULL); } void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { - current_subgraph_->FinishBreakContinue(stmt->target(), false); + HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK); + current_block()->Goto(break_block); + set_current_block(NULL); } @@ -2472,7 +2448,8 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { // Not an inlined return, so an actual one. VISIT_FOR_VALUE(stmt->expression()); HValue* result = environment()->Pop(); - subgraph()->FinishExit(new HReturn(result)); + current_block()->FinishExit(new HReturn(result)); + set_current_block(NULL); } else { // Return from an inlined function, visit the subexpression in the // expression context of the call. @@ -2491,9 +2468,9 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { VISIT_FOR_VALUE(stmt->expression()); return_value = environment()->Pop(); } - subgraph()->exit_block()->AddLeaveInlined(return_value, - function_return_); - subgraph()->set_exit_block(NULL); + current_block()->AddLeaveInlined(return_value, + function_return()); + set_current_block(NULL); } } } @@ -2514,7 +2491,7 @@ HCompare* HGraphBuilder::BuildSwitchCompare(HSubgraph* subgraph, CaseClause* clause) { AddToSubgraph(subgraph, clause->label()); if (HasStackOverflow()) return NULL; - HValue* clause_value = subgraph->environment()->Pop(); + HValue* clause_value = subgraph->exit_block()->last_environment()->Pop(); HCompare* compare = new HCompare(switch_value, clause_value, Token::EQ_STRICT); @@ -2595,7 +2572,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { // last_false_block is the (empty) false-block of the last comparison. If // there are no comparisons at all (a single default clause), it is just // the last block of the current subgraph. - HBasicBlock* last_false_block = current_subgraph_->exit_block(); + HBasicBlock* last_false_block = current_block(); if (prev_graph != current_subgraph_) { last_false_block = graph()->CreateBasicBlock(); HBasicBlock* empty = graph()->CreateBasicBlock(); @@ -2638,17 +2615,20 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { } // Check for fall-through from previous statement block. - if (previous_subgraph != NULL && previous_subgraph->HasExit()) { + if (previous_subgraph != NULL && previous_subgraph->exit_block() != NULL) { if (subgraph == NULL) subgraph = CreateEmptySubgraph(); previous_subgraph->exit_block()-> Finish(new HGoto(subgraph->entry_block())); } if (subgraph != NULL) { - ADD_TO_SUBGRAPH(subgraph, clause->statements()); - HBasicBlock* break_block = subgraph->BundleBreak(stmt); - if (break_block != NULL) { - break_block->Finish(new HGoto(single_exit_block)); + BreakAndContinueInfo break_info(stmt); + { BreakAndContinueScope push(&break_info, this); + ADD_TO_SUBGRAPH(subgraph, clause->statements()); + } + if (break_info.break_block() != NULL) { + break_info.break_block()->SetJoinId(stmt->ExitId()); + break_info.break_block()->Finish(new HGoto(single_exit_block)); } } @@ -2657,7 +2637,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { // If the last statement block has a fall-through, connect it to the // single exit block. - if (previous_subgraph != NULL && previous_subgraph->HasExit()) { + if (previous_subgraph != NULL && previous_subgraph->exit_block() != NULL) { previous_subgraph->exit_block()->Finish(new HGoto(single_exit_block)); } @@ -2667,181 +2647,160 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { } if (single_exit_block->HasPredecessor()) { - current_subgraph_->set_exit_block(single_exit_block); + set_current_block(single_exit_block); } else { - current_subgraph_->set_exit_block(NULL); + set_current_block(NULL); } } -bool HGraph::HasOsrEntryAt(IterationStatement* statement) { +bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) { return statement->OsrEntryId() == info()->osr_ast_id(); } -void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) { - if (!graph()->HasOsrEntryAt(statement)) return; +void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) { + if (!HasOsrEntryAt(statement)) return; HBasicBlock* non_osr_entry = graph()->CreateBasicBlock(); HBasicBlock* osr_entry = graph()->CreateBasicBlock(); HValue* true_value = graph()->GetConstantTrue(); HTest* test = new HTest(true_value, non_osr_entry, osr_entry); - exit_block()->Finish(test); + current_block()->Finish(test); HBasicBlock* loop_predecessor = graph()->CreateBasicBlock(); non_osr_entry->Goto(loop_predecessor); + set_current_block(osr_entry); int osr_entry_id = statement->OsrEntryId(); // We want the correct environment at the OsrEntry instruction. Build // it explicitly. The expression stack should be empty. - int count = osr_entry->last_environment()->length(); - ASSERT(count == (osr_entry->last_environment()->parameter_count() + - osr_entry->last_environment()->local_count())); + int count = environment()->length(); + ASSERT(count == + (environment()->parameter_count() + environment()->local_count())); for (int i = 0; i < count; ++i) { HUnknownOSRValue* unknown = new HUnknownOSRValue; - osr_entry->AddInstruction(unknown); - osr_entry->last_environment()->Bind(i, unknown); + AddInstruction(unknown); + environment()->Bind(i, unknown); } - osr_entry->AddSimulate(osr_entry_id); - osr_entry->AddInstruction(new HOsrEntry(osr_entry_id)); - osr_entry->Goto(loop_predecessor); + AddSimulate(osr_entry_id); + AddInstruction(new HOsrEntry(osr_entry_id)); + current_block()->Goto(loop_predecessor); loop_predecessor->SetJoinId(statement->EntryId()); - set_exit_block(loop_predecessor); + set_current_block(loop_predecessor); } void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { - ASSERT(subgraph()->HasExit()); - subgraph()->PreProcessOsrEntry(stmt); - - HSubgraph* body_graph = CreateLoopHeaderSubgraph(environment()); - ADD_TO_SUBGRAPH(body_graph, stmt->body()); - body_graph->ResolveContinue(stmt); - - if (!body_graph->HasExit() || stmt->cond()->ToBooleanIsTrue()) { - current_subgraph_->AppendEndless(body_graph, stmt); - } else { - HSubgraph* go_back = CreateEmptySubgraph(); - HSubgraph* exit = CreateEmptySubgraph(); - { - SubgraphScope scope(this, body_graph); - VISIT_FOR_CONTROL(stmt->cond(), - go_back->entry_block(), - exit->entry_block()); - go_back->entry_block()->SetJoinId(stmt->BackEdgeId()); - exit->entry_block()->SetJoinId(stmt->ExitId()); - } - current_subgraph_->AppendDoWhile(body_graph, stmt, go_back, exit); - } -} - - -bool HGraphBuilder::ShouldPeel(HSubgraph* cond, HSubgraph* body) { - return FLAG_use_peeling; -} - - -void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { - ASSERT(subgraph()->HasExit()); - subgraph()->PreProcessOsrEntry(stmt); - - HSubgraph* cond_graph = NULL; - HSubgraph* body_graph = NULL; - HSubgraph* exit_graph = NULL; - - // If the condition is constant true, do not generate a condition subgraph. - if (stmt->cond()->ToBooleanIsTrue()) { - body_graph = CreateLoopHeaderSubgraph(environment()); - ADD_TO_SUBGRAPH(body_graph, stmt->body()); - } else { - cond_graph = CreateLoopHeaderSubgraph(environment()); - body_graph = CreateEmptySubgraph(); - exit_graph = CreateEmptySubgraph(); - { - SubgraphScope scope(this, cond_graph); - VISIT_FOR_CONTROL(stmt->cond(), - body_graph->entry_block(), - exit_graph->entry_block()); - body_graph->entry_block()->SetJoinId(stmt->BodyId()); - exit_graph->entry_block()->SetJoinId(stmt->ExitId()); - } - ADD_TO_SUBGRAPH(body_graph, stmt->body()); + ASSERT(current_block() != NULL); + PreProcessOsrEntry(stmt); + HBasicBlock* loop_entry = CreateLoopHeaderBlock(); + current_block()->Goto(loop_entry, false); + set_current_block(loop_entry); + + BreakAndContinueInfo break_info(stmt); + { BreakAndContinueScope push(&break_info, this); + Visit(stmt->body()); + CHECK_BAILOUT; } - - body_graph->ResolveContinue(stmt); - - if (cond_graph != NULL) { - AppendPeeledWhile(stmt, cond_graph, body_graph, exit_graph); - } else { - // TODO(fschneider): Implement peeling for endless loops as well. - current_subgraph_->AppendEndless(body_graph, stmt); + HBasicBlock* body_exit = + JoinContinue(stmt, current_block(), break_info.continue_block()); + HBasicBlock* loop_successor = NULL; + if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) { + set_current_block(body_exit); + // The block for a true condition, the actual predecessor block of the + // back edge. + body_exit = graph()->CreateBasicBlock(); + loop_successor = graph()->CreateBasicBlock(); + VISIT_FOR_CONTROL(stmt->cond(), body_exit, loop_successor); + body_exit->SetJoinId(stmt->BackEdgeId()); + loop_successor->SetJoinId(stmt->ExitId()); } + HBasicBlock* loop_exit = CreateLoop(stmt, + loop_entry, + body_exit, + loop_successor, + break_info.break_block()); + set_current_block(loop_exit); } -void HGraphBuilder::AppendPeeledWhile(IterationStatement* stmt, - HSubgraph* cond_graph, - HSubgraph* body_graph, - HSubgraph* exit_graph) { - HSubgraph* loop = NULL; - if (body_graph->HasExit() && stmt != peeled_statement_ && - ShouldPeel(cond_graph, body_graph)) { - // Save the last peeled iteration statement to prevent infinite recursion. - IterationStatement* outer_peeled_statement = peeled_statement_; - peeled_statement_ = stmt; - loop = CreateGotoSubgraph(body_graph->environment()); - ADD_TO_SUBGRAPH(loop, stmt); - peeled_statement_ = outer_peeled_statement; +void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { + ASSERT(current_block() != NULL); + PreProcessOsrEntry(stmt); + HBasicBlock* loop_entry = CreateLoopHeaderBlock(); + current_block()->Goto(loop_entry, false); + set_current_block(loop_entry); + + // If the condition is constant true, do not generate a branch. + HBasicBlock* loop_successor = NULL; + if (!stmt->cond()->ToBooleanIsTrue()) { + HBasicBlock* body_entry = graph()->CreateBasicBlock(); + loop_successor = graph()->CreateBasicBlock(); + VISIT_FOR_CONTROL(stmt->cond(), body_entry, loop_successor); + body_entry->SetJoinId(stmt->BodyId()); + loop_successor->SetJoinId(stmt->ExitId()); + set_current_block(body_entry); + } + + BreakAndContinueInfo break_info(stmt); + { BreakAndContinueScope push(&break_info, this); + Visit(stmt->body()); + CHECK_BAILOUT; } - current_subgraph_->AppendWhile(cond_graph, body_graph, stmt, loop, - exit_graph); + HBasicBlock* body_exit = + JoinContinue(stmt, current_block(), break_info.continue_block()); + HBasicBlock* loop_exit = CreateLoop(stmt, + loop_entry, + body_exit, + loop_successor, + break_info.break_block()); + set_current_block(loop_exit); } void HGraphBuilder::VisitForStatement(ForStatement* stmt) { - // Only visit the init statement in the peeled part of the loop. - if (stmt->init() != NULL && peeled_statement_ != stmt) { + if (stmt->init() != NULL) { Visit(stmt->init()); CHECK_BAILOUT; } - ASSERT(subgraph()->HasExit()); - subgraph()->PreProcessOsrEntry(stmt); + ASSERT(current_block() != NULL); + PreProcessOsrEntry(stmt); + HBasicBlock* loop_entry = CreateLoopHeaderBlock(); + current_block()->Goto(loop_entry, false); + set_current_block(loop_entry); - HSubgraph* cond_graph = NULL; - HSubgraph* body_graph = NULL; - HSubgraph* exit_graph = NULL; + HBasicBlock* loop_successor = NULL; if (stmt->cond() != NULL) { - cond_graph = CreateLoopHeaderSubgraph(environment()); - body_graph = CreateEmptySubgraph(); - exit_graph = CreateEmptySubgraph(); - { - SubgraphScope scope(this, cond_graph); - VISIT_FOR_CONTROL(stmt->cond(), - body_graph->entry_block(), - exit_graph->entry_block()); - body_graph->entry_block()->SetJoinId(stmt->BodyId()); - exit_graph->entry_block()->SetJoinId(stmt->ExitId()); - } - } else { - body_graph = CreateLoopHeaderSubgraph(environment()); + HBasicBlock* body_entry = graph()->CreateBasicBlock(); + loop_successor = graph()->CreateBasicBlock(); + VISIT_FOR_CONTROL(stmt->cond(), body_entry, loop_successor); + body_entry->SetJoinId(stmt->BodyId()); + loop_successor->SetJoinId(stmt->ExitId()); + set_current_block(body_entry); } - ADD_TO_SUBGRAPH(body_graph, stmt->body()); - HSubgraph* next_graph = NULL; - body_graph->ResolveContinue(stmt); - - if (stmt->next() != NULL && body_graph->HasExit()) { - next_graph = CreateGotoSubgraph(body_graph->environment()); - ADD_TO_SUBGRAPH(next_graph, stmt->next()); - body_graph->Append(next_graph, NULL); - next_graph->entry_block()->SetJoinId(stmt->ContinueId()); + BreakAndContinueInfo break_info(stmt); + { BreakAndContinueScope push(&break_info, this); + Visit(stmt->body()); + CHECK_BAILOUT; } + HBasicBlock* body_exit = + JoinContinue(stmt, current_block(), break_info.continue_block()); - if (cond_graph != NULL) { - AppendPeeledWhile(stmt, cond_graph, body_graph, exit_graph); - } else { - current_subgraph_->AppendEndless(body_graph, stmt); + if (stmt->next() != NULL && body_exit != NULL) { + set_current_block(body_exit); + Visit(stmt->next()); + CHECK_BAILOUT; + body_exit = current_block(); } + + HBasicBlock* loop_exit = CreateLoop(stmt, + loop_entry, + body_exit, + loop_successor, + break_info.break_block()); + set_current_block(loop_exit); } @@ -2867,7 +2826,7 @@ void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) { void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { Handle<SharedFunctionInfo> shared_info = - Compiler::BuildFunctionInfo(expr, graph_->info()->script()); + Compiler::BuildFunctionInfo(expr, info()->script()); CHECK_BAILOUT; HFunctionLiteral* instr = new HFunctionLiteral(shared_info, expr->pretenure()); @@ -2882,19 +2841,23 @@ void HGraphBuilder::VisitSharedFunctionInfoLiteral( void HGraphBuilder::VisitConditional(Conditional* expr) { - HSubgraph* then_graph = CreateEmptySubgraph(); - HSubgraph* else_graph = CreateEmptySubgraph(); - VISIT_FOR_CONTROL(expr->condition(), - then_graph->entry_block(), - else_graph->entry_block()); - - then_graph->entry_block()->SetJoinId(expr->ThenId()); - ADD_TO_SUBGRAPH(then_graph, expr->then_expression()); - - else_graph->entry_block()->SetJoinId(expr->ElseId()); - ADD_TO_SUBGRAPH(else_graph, expr->else_expression()); - - current_subgraph_->AppendJoin(then_graph, else_graph, expr); + HBasicBlock* cond_true = graph()->CreateBasicBlock(); + HBasicBlock* cond_false = graph()->CreateBasicBlock(); + VISIT_FOR_CONTROL(expr->condition(), cond_true, cond_false); + cond_true->SetJoinId(expr->ThenId()); + cond_false->SetJoinId(expr->ElseId()); + + // TOOD(kmillikin): Visit the subexpressions in the same AST context as + // the whole expression. + set_current_block(cond_true); + VISIT_FOR_VALUE(expr->then_expression()); + HBasicBlock* other = current_block(); + + set_current_block(cond_false); + VISIT_FOR_VALUE(expr->else_expression()); + + HBasicBlock* join = CreateJoin(other, current_block(), expr->id()); + set_current_block(join); ast_context()->ReturnValue(Pop()); } @@ -2905,10 +2868,10 @@ void HGraphBuilder::LookupGlobalPropertyCell(Variable* var, if (var->is_this()) { BAILOUT("global this reference"); } - if (!graph()->info()->has_global_object()) { + if (!info()->has_global_object()) { BAILOUT("no global object to optimize VariableProxy"); } - Handle<GlobalObject> global(graph()->info()->global_object()); + Handle<GlobalObject> global(info()->global_object()); global->Lookup(*var->name(), lookup); if (!lookup->IsProperty()) { BAILOUT("global variable cell not yet introduced"); @@ -2929,7 +2892,7 @@ HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) { ASSERT(var->IsContextSlot()); HInstruction* context = new HContext; AddInstruction(context); - int length = graph()->info()->scope()->ContextChainLength(var->scope()); + int length = info()->scope()->ContextChainLength(var->scope()); while (length-- > 0) { context = new HOuterContext(context); AddInstruction(context); @@ -2960,7 +2923,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) { LookupGlobalPropertyCell(variable, &lookup, false); CHECK_BAILOUT; - Handle<GlobalObject> global(graph()->info()->global_object()); + Handle<GlobalObject> global(info()->global_object()); // TODO(3039103): Handle global property load through an IC call when access // checks are enabled. if (global->IsAccessCheckNeeded()) { @@ -3086,53 +3049,47 @@ void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) { } -HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps, - ZoneList<HSubgraph*>* subgraphs, - HValue* receiver, +HBasicBlock* HGraphBuilder::BuildTypeSwitch(HValue* receiver, + ZoneMapList* maps, + ZoneList<HSubgraph*>* body_graphs, + HSubgraph* default_graph, int join_id) { - ASSERT(subgraphs->length() == (maps->length() + 1)); - - // Build map compare subgraphs for all but the first map. - ZoneList<HSubgraph*> map_compare_subgraphs(maps->length() - 1); - for (int i = maps->length() - 1; i > 0; --i) { - HSubgraph* subgraph = CreateBranchSubgraph(environment()); - SubgraphScope scope(this, subgraph); - HSubgraph* else_subgraph = - (i == (maps->length() - 1)) - ? subgraphs->last() - : map_compare_subgraphs.last(); - HCompareMap* compare = new HCompareMap(receiver, - maps->at(i), - subgraphs->at(i)->entry_block(), - else_subgraph->entry_block()); - current_subgraph_->exit_block()->Finish(compare); - map_compare_subgraphs.Add(subgraph); - } - - // Generate first map check to end the current block. + ASSERT(maps->length() == body_graphs->length()); + HBasicBlock* join_block = graph()->CreateBasicBlock(); AddInstruction(new HCheckNonSmi(receiver)); - HSubgraph* else_subgraph = - (maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last(); - HCompareMap* compare = new HCompareMap(receiver, - Handle<Map>(maps->first()), - subgraphs->first()->entry_block(), - else_subgraph->entry_block()); - current_subgraph_->exit_block()->Finish(compare); - - // Join all the call subgraphs in a new basic block and make - // this basic block the current basic block. - HBasicBlock* join_block = graph_->CreateBasicBlock(); - for (int i = 0; i < subgraphs->length(); ++i) { - HSubgraph* subgraph = subgraphs->at(i); - if (subgraph->HasExit()) { + + for (int i = 0; i < maps->length(); ++i) { + // Build the branches, connect all the target subgraphs to the join + // block. Use the default as a target of the last branch. + HSubgraph* if_true = body_graphs->at(i); + HSubgraph* if_false = (i == maps->length() - 1) + ? default_graph + : CreateBranchSubgraph(environment()); + HCompareMap* compare = + new HCompareMap(receiver, + maps->at(i), + if_true->entry_block(), + if_false->entry_block()); + current_block()->Finish(compare); + + if (if_true->exit_block() != NULL) { // In an effect context the value of the type switch is not needed. // There is no need to merge it at the join block only to discard it. - HBasicBlock* subgraph_exit = subgraph->exit_block(); if (ast_context()->IsEffect()) { - subgraph_exit->last_environment()->Drop(1); + if_true->exit_block()->last_environment()->Drop(1); } - subgraph_exit->Goto(join_block); + if_true->exit_block()->Goto(join_block); + } + + set_current_block(if_false->exit_block()); + } + + // Connect the default if necessary. + if (current_block() != NULL) { + if (ast_context()->IsEffect()) { + environment()->Drop(1); } + current_block()->Goto(join_block); } if (join_block->predecessors()->is_empty()) return NULL; @@ -3236,68 +3193,73 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr, HValue* value, ZoneMapList* types, Handle<String> name) { - int number_of_types = Min(types->length(), kMaxStorePolymorphism); - ZoneMapList maps(number_of_types); - ZoneList<HSubgraph*> subgraphs(number_of_types + 1); - bool needs_generic = (types->length() > kMaxStorePolymorphism); - - // Build subgraphs for each of the specific maps. - // - // TODO(ager): We should recognize when the prototype chains for - // different maps are identical. In that case we can avoid - // repeatedly generating the same prototype map checks. - for (int i = 0; i < number_of_types; ++i) { + // TODO(ager): We should recognize when the prototype chains for different + // maps are identical. In that case we can avoid repeatedly generating the + // same prototype map checks. + int count = 0; + HBasicBlock* join = NULL; + for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) { Handle<Map> map = types->at(i); LookupResult lookup; if (ComputeStoredField(map, name, &lookup)) { - maps.Add(map); - HSubgraph* subgraph = CreateBranchSubgraph(environment()); - SubgraphScope scope(this, subgraph); + if (count == 0) { + AddInstruction(new HCheckNonSmi(object)); // Only needed once. + join = graph()->CreateBasicBlock(); + } + ++count; + HBasicBlock* if_true = graph()->CreateBasicBlock(); + HBasicBlock* if_false = graph()->CreateBasicBlock(); + HCompareMap* compare = new HCompareMap(object, map, if_true, if_false); + current_block()->Finish(compare); + + set_current_block(if_true); HInstruction* instr = BuildStoreNamedField(object, name, value, map, &lookup, false); - Push(value); instr->set_position(expr->position()); + // Goto will add the HSimulate for the store. AddInstruction(instr); - subgraphs.Add(subgraph); - } else { - needs_generic = true; + if (!ast_context()->IsEffect()) Push(value); + current_block()->Goto(join); + + set_current_block(if_false); } } - // If none of the properties were named fields we generate a - // generic store. - if (maps.length() == 0) { + // Finish up. Unconditionally deoptimize if we've handled all the maps we + // know about and do not want to handle ones we've never seen. Otherwise + // use a generic IC. + if (count == types->length() && FLAG_deoptimize_uncommon_cases) { + current_block()->FinishExit(new HDeoptimize); + } else { HInstruction* instr = BuildStoreNamedGeneric(object, name, value); - Push(value); instr->set_position(expr->position()); AddInstruction(instr); - if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId()); - ast_context()->ReturnValue(Pop()); - } else { - // Build subgraph for generic store through IC. - { - HSubgraph* subgraph = CreateBranchSubgraph(environment()); - SubgraphScope scope(this, subgraph); - if (!needs_generic && FLAG_deoptimize_uncommon_cases) { - subgraph->FinishExit(new HDeoptimize()); - } else { - HInstruction* instr = BuildStoreNamedGeneric(object, name, value); - Push(value); - instr->set_position(expr->position()); - AddInstruction(instr); - } - subgraphs.Add(subgraph); - } - HBasicBlock* new_exit_block = - BuildTypeSwitch(&maps, &subgraphs, object, expr->id()); - subgraph()->set_exit_block(new_exit_block); - // In an effect context, we did not materialized the value in the - // predecessor environments so there's no need to handle it here. - if (subgraph()->HasExit() && !ast_context()->IsEffect()) { - ast_context()->ReturnValue(Pop()); + if (join != NULL) { + if (!ast_context()->IsEffect()) Push(value); + current_block()->Goto(join); + } else { + // The HSimulate for the store should not see the stored value in + // effect contexts (it is not materialized at expr->id() in the + // unoptimized code). + if (instr->HasSideEffects()) { + if (ast_context()->IsEffect()) { + AddSimulate(expr->id()); + } else { + Push(value); + AddSimulate(expr->id()); + Drop(1); + } + } + ast_context()->ReturnValue(value); + return; } } + + ASSERT(join != NULL); + join->SetJoinId(expr->id()); + set_current_block(join); + if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop()); } @@ -3342,12 +3304,20 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) { HValue* key = Pop(); HValue* object = Pop(); - bool is_fast_elements = expr->IsMonomorphic() && - expr->GetMonomorphicReceiverType()->has_fast_elements(); - - instr = is_fast_elements - ? BuildStoreKeyedFastElement(object, key, value, expr) - : BuildStoreKeyedGeneric(object, key, value); + if (expr->IsMonomorphic()) { + Handle<Map> receiver_type(expr->GetMonomorphicReceiverType()); + // An object has either fast elements or pixel array elements, but never + // both. Pixel array maps that are assigned to pixel array elements are + // always created with the fast elements flag cleared. + if (receiver_type->has_pixel_array_elements()) { + instr = BuildStoreKeyedPixelArrayElement(object, key, value, expr); + } else if (receiver_type->has_fast_elements()) { + instr = BuildStoreKeyedFastElement(object, key, value, expr); + } + } + if (instr == NULL) { + instr = BuildStoreKeyedGeneric(object, key, value); + } } Push(value); @@ -3370,7 +3340,7 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var, CHECK_BAILOUT; bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly(); - Handle<GlobalObject> global(graph()->info()->global_object()); + Handle<GlobalObject> global(info()->global_object()); Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup)); HInstruction* instr = new HStoreGlobal(value, cell, check_hole); instr->set_position(position); @@ -3391,10 +3361,6 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { BinaryOperation* operation = expr->binary_operation(); if (var != NULL) { - if (!var->is_global() && !var->IsStackAllocated()) { - BAILOUT("non-stack/non-global in compound assignment"); - } - VISIT_FOR_VALUE(operation); if (var->is_global()) { @@ -3402,8 +3368,16 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { Top(), expr->position(), expr->AssignmentId()); - } else { + } else if (var->IsStackAllocated()) { Bind(var, Top()); + } else if (var->IsContextSlot()) { + HValue* context = BuildContextChainWalk(var); + int index = var->AsSlot()->index(); + HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top()); + AddInstruction(instr); + if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId()); + } else { + BAILOUT("compound assignment to lookup slot"); } ast_context()->ReturnValue(Pop()); @@ -3555,7 +3529,8 @@ void HGraphBuilder::VisitThrow(Throw* expr) { instr->set_position(expr->position()); AddInstruction(instr); AddSimulate(expr->id()); - current_subgraph_->FinishExit(new HAbnormalExit); + current_block()->FinishExit(new HAbnormalExit); + set_current_block(NULL); } @@ -3563,65 +3538,62 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr, HValue* object, ZoneMapList* types, Handle<String> name) { - int number_of_types = Min(types->length(), kMaxLoadPolymorphism); - ZoneMapList maps(number_of_types); - ZoneList<HSubgraph*> subgraphs(number_of_types + 1); - bool needs_generic = (types->length() > kMaxLoadPolymorphism); - - // Build subgraphs for each of the specific maps. - // - // TODO(ager): We should recognize when the prototype chains for - // different maps are identical. In that case we can avoid - // repeatedly generating the same prototype map checks. - for (int i = 0; i < number_of_types; ++i) { + // TODO(ager): We should recognize when the prototype chains for different + // maps are identical. In that case we can avoid repeatedly generating the + // same prototype map checks. + int count = 0; + HBasicBlock* join = NULL; + for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) { Handle<Map> map = types->at(i); LookupResult lookup; map->LookupInDescriptors(NULL, *name, &lookup); if (lookup.IsProperty() && lookup.type() == FIELD) { - maps.Add(map); - HSubgraph* subgraph = CreateBranchSubgraph(environment()); - SubgraphScope scope(this, subgraph); + if (count == 0) { + AddInstruction(new HCheckNonSmi(object)); // Only needed once. + join = graph()->CreateBasicBlock(); + } + ++count; + HBasicBlock* if_true = graph()->CreateBasicBlock(); + HBasicBlock* if_false = graph()->CreateBasicBlock(); + HCompareMap* compare = new HCompareMap(object, map, if_true, if_false); + current_block()->Finish(compare); + + set_current_block(if_true); HLoadNamedField* instr = BuildLoadNamedField(object, expr, map, &lookup, false); instr->set_position(expr->position()); - instr->ClearFlag(HValue::kUseGVN); // Don't do GVN on polymorphic loads. - PushAndAdd(instr); - subgraphs.Add(subgraph); - } else { - needs_generic = true; + instr->ClearFlag(HValue::kUseGVN); + AddInstruction(instr); + if (!ast_context()->IsEffect()) Push(instr); + current_block()->Goto(join); + + set_current_block(if_false); } } - // If none of the properties were named fields we generate a - // generic load. - if (maps.length() == 0) { + // Finish up. Unconditionally deoptimize if we've handled all the maps we + // know about and do not want to handle ones we've never seen. Otherwise + // use a generic IC. + if (count == types->length() && FLAG_deoptimize_uncommon_cases) { + current_block()->FinishExit(new HDeoptimize); + } else { HInstruction* instr = BuildLoadNamedGeneric(object, expr); instr->set_position(expr->position()); - ast_context()->ReturnInstruction(instr, expr->id()); - } else { - // Build subgraph for generic load through IC. - { - HSubgraph* subgraph = CreateBranchSubgraph(environment()); - SubgraphScope scope(this, subgraph); - if (!needs_generic && FLAG_deoptimize_uncommon_cases) { - subgraph->FinishExit(new HDeoptimize()); - } else { - HInstruction* instr = BuildLoadNamedGeneric(object, expr); - instr->set_position(expr->position()); - PushAndAdd(instr); - } - subgraphs.Add(subgraph); - } - HBasicBlock* new_exit_block = - BuildTypeSwitch(&maps, &subgraphs, object, expr->id()); - subgraph()->set_exit_block(new_exit_block); - // In an effect context, we did not materialized the value in the - // predecessor environments so there's no need to handle it here. - if (subgraph()->HasExit() && !ast_context()->IsEffect()) { - ast_context()->ReturnValue(Pop()); + if (join != NULL) { + AddInstruction(instr); + if (!ast_context()->IsEffect()) Push(instr); + current_block()->Goto(join); + } else { + ast_context()->ReturnInstruction(instr, expr->id()); + return; } } + + ASSERT(join != NULL); + join->SetJoinId(expr->id()); + set_current_block(join); + if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop()); } @@ -3725,7 +3697,8 @@ HInstruction* HGraphBuilder::BuildLoadKeyedPixelArrayElement(HValue* object, AddInstruction(new HCheckMap(object, map)); HLoadElements* elements = new HLoadElements(object); AddInstruction(elements); - HInstruction* length = AddInstruction(new HPixelArrayLength(elements)); + HInstruction* length = new HPixelArrayLength(elements); + AddInstruction(length); AddInstruction(new HBoundsCheck(key, length)); HLoadPixelArrayExternalPointer* external_elements = new HLoadPixelArrayExternalPointer(elements); @@ -3768,6 +3741,28 @@ HInstruction* HGraphBuilder::BuildStoreKeyedFastElement(HValue* object, } +HInstruction* HGraphBuilder::BuildStoreKeyedPixelArrayElement( + HValue* object, + HValue* key, + HValue* val, + Expression* expr) { + ASSERT(expr->IsMonomorphic()); + AddInstruction(new HCheckNonSmi(object)); + Handle<Map> map = expr->GetMonomorphicReceiverType(); + ASSERT(!map->has_fast_elements()); + ASSERT(map->has_pixel_array_elements()); + AddInstruction(new HCheckMap(object, map)); + HLoadElements* elements = new HLoadElements(object); + AddInstruction(elements); + HInstruction* length = AddInstruction(new HPixelArrayLength(elements)); + AddInstruction(new HBoundsCheck(key, length)); + HLoadPixelArrayExternalPointer* external_elements = + new HLoadPixelArrayExternalPointer(elements); + AddInstruction(external_elements); + return new HStorePixelArrayElement(external_elements, key, val); +} + + bool HGraphBuilder::TryArgumentsAccess(Property* expr) { VariableProxy* proxy = expr->obj()->AsVariableProxy(); if (proxy == NULL) return false; @@ -3783,9 +3778,11 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) { HInstruction* elements = AddInstruction(new HArgumentsElements); result = new HArgumentsLength(elements); } else { + Push(graph()->GetArgumentsObject()); VisitForValue(expr->key()); if (HasStackOverflow()) return false; HValue* key = Pop(); + Drop(1); // Arguments object. HInstruction* elements = AddInstruction(new HArgumentsElements); HInstruction* length = AddInstruction(new HArgumentsLength(elements)); AddInstruction(new HBoundsCheck(key, length)); @@ -3891,7 +3888,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, int argument_count = expr->arguments()->length() + 1; // Plus receiver. int number_of_types = Min(types->length(), kMaxCallPolymorphism); ZoneMapList maps(number_of_types); - ZoneList<HSubgraph*> subgraphs(number_of_types + 1); + ZoneList<HSubgraph*> subgraphs(number_of_types); bool needs_generic = (types->length() > kMaxCallPolymorphism); // Build subgraphs for each of the specific maps. @@ -3902,7 +3899,6 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, for (int i = 0; i < number_of_types; ++i) { Handle<Map> map = types->at(i); if (expr->ComputeTarget(map, name)) { - maps.Add(map); HSubgraph* subgraph = CreateBranchSubgraph(environment()); SubgraphScope scope(this, subgraph); AddCheckConstantFunction(expr, receiver, map, false); @@ -3914,11 +3910,13 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, // Check for bailout, as trying to inline might fail due to bailout // during hydrogen processing. CHECK_BAILOUT; - HCall* call = new HCallConstantFunction(expr->target(), argument_count); + HCallConstantFunction* call = + new HCallConstantFunction(expr->target(), argument_count); call->set_position(expr->position()); PreProcessCall(call); PushAndAdd(call); } + maps.Add(map); subgraphs.Add(subgraph); } else { needs_generic = true; @@ -3930,31 +3928,30 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, if (maps.length() == 0) { HContext* context = new HContext; AddInstruction(context); - HCall* call = new HCallNamed(context, name, argument_count); + HCallNamed* call = new HCallNamed(context, name, argument_count); call->set_position(expr->position()); PreProcessCall(call); ast_context()->ReturnInstruction(call, expr->id()); } else { // Build subgraph for generic call through IC. - { - HSubgraph* subgraph = CreateBranchSubgraph(environment()); - SubgraphScope scope(this, subgraph); + HSubgraph* default_graph = CreateBranchSubgraph(environment()); + { SubgraphScope scope(this, default_graph); if (!needs_generic && FLAG_deoptimize_uncommon_cases) { - subgraph->FinishExit(new HDeoptimize()); + default_graph->exit_block()->FinishExit(new HDeoptimize()); + default_graph->set_exit_block(NULL); } else { HContext* context = new HContext; AddInstruction(context); - HCall* call = new HCallNamed(context, name, argument_count); + HCallNamed* call = new HCallNamed(context, name, argument_count); call->set_position(expr->position()); PreProcessCall(call); PushAndAdd(call); } - subgraphs.Add(subgraph); } HBasicBlock* new_exit_block = - BuildTypeSwitch(&maps, &subgraphs, receiver, expr->id()); - subgraph()->set_exit_block(new_exit_block); + BuildTypeSwitch(receiver, &maps, &subgraphs, default_graph, expr->id()); + set_current_block(new_exit_block); // In an effect context, we did not materialized the value in the // predecessor environments so there's no need to handle it here. if (new_exit_block != NULL && !ast_context()->IsEffect()) { @@ -3964,14 +3961,17 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, } -void HGraphBuilder::TraceInline(Handle<JSFunction> target, bool result) { - SmartPointer<char> callee = target->shared()->DebugName()->ToCString(); - SmartPointer<char> caller = - graph()->info()->function()->debug_name()->ToCString(); - if (result) { - PrintF("Inlined %s called from %s.\n", *callee, *caller); - } else { - PrintF("Do not inline %s called from %s.\n", *callee, *caller); +void HGraphBuilder::TraceInline(Handle<JSFunction> target, const char* reason) { + if (FLAG_trace_inlining) { + SmartPointer<char> callee = target->shared()->DebugName()->ToCString(); + SmartPointer<char> caller = + info()->function()->debug_name()->ToCString(); + if (reason == NULL) { + PrintF("Inlined %s called from %s.\n", *callee, *caller); + } else { + PrintF("Did not inline %s called from %s (%s).\n", + *callee, *caller, reason); + } } } @@ -3986,123 +3986,122 @@ bool HGraphBuilder::TryInline(Call* expr) { // Do a quick check on source code length to avoid parsing large // inlining candidates. if (FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize) { - if (FLAG_trace_inlining) TraceInline(target, false); + TraceInline(target, "target text too big"); return false; } // Target must be inlineable. - if (!target->IsInlineable()) return false; + if (!target->IsInlineable()) { + TraceInline(target, "target not inlineable"); + return false; + } // No context change required. - CompilationInfo* outer_info = graph()->info(); + CompilationInfo* outer_info = info(); if (target->context() != outer_info->closure()->context() || outer_info->scope()->contains_with() || outer_info->scope()->num_heap_slots() > 0) { + TraceInline(target, "target requires context change"); return false; } // Don't inline deeper than two calls. HEnvironment* env = environment(); - if (env->outer() != NULL && env->outer()->outer() != NULL) return false; + if (env->outer() != NULL && env->outer()->outer() != NULL) { + TraceInline(target, "inline depth limit reached"); + return false; + } // Don't inline recursive functions. - if (target->shared() == outer_info->closure()->shared()) return false; + if (target->shared() == outer_info->closure()->shared()) { + TraceInline(target, "target is recursive"); + return false; + } // We don't want to add more than a certain number of nodes from inlining. if (FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) { - if (FLAG_trace_inlining) TraceInline(target, false); + TraceInline(target, "cumulative AST node limit reached"); return false; } int count_before = AstNode::Count(); // Parse and allocate variables. - CompilationInfo inner_info(target); - if (!ParserApi::Parse(&inner_info) || - !Scope::Analyze(&inner_info)) { + CompilationInfo target_info(target); + if (!ParserApi::Parse(&target_info) || + !Scope::Analyze(&target_info)) { if (Top::has_pending_exception()) { + // Parse or scope error, never optimize this function. SetStackOverflow(); + target->shared()->set_optimization_disabled(true); } + TraceInline(target, "parse failure"); + return false; + } + + if (target_info.scope()->num_heap_slots() > 0) { + TraceInline(target, "target has context-allocated variables"); return false; } - FunctionLiteral* function = inner_info.function(); + FunctionLiteral* function = target_info.function(); // Count the number of AST nodes added by inlining this call. int nodes_added = AstNode::Count() - count_before; if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) { - if (FLAG_trace_inlining) TraceInline(target, false); + TraceInline(target, "target AST is too large"); return false; } // Check if we can handle all declarations in the inlined functions. - VisitDeclarations(inner_info.scope()->declarations()); + VisitDeclarations(target_info.scope()->declarations()); if (HasStackOverflow()) { + TraceInline(target, "target has non-trivial declaration"); ClearStackOverflow(); return false; } // Don't inline functions that uses the arguments object or that // have a mismatching number of parameters. - Handle<SharedFunctionInfo> shared(target->shared()); + Handle<SharedFunctionInfo> target_shared(target->shared()); int arity = expr->arguments()->length(); if (function->scope()->arguments() != NULL || - arity != shared->formal_parameter_count()) { + arity != target_shared->formal_parameter_count()) { + TraceInline(target, "target requires special argument handling"); return false; } // All statements in the body must be inlineable. for (int i = 0, count = function->body()->length(); i < count; ++i) { - if (!function->body()->at(i)->IsInlineable()) return false; + if (!function->body()->at(i)->IsInlineable()) { + TraceInline(target, "target contains unsupported syntax"); + return false; + } } // Generate the deoptimization data for the unoptimized version of // the target function if we don't already have it. - if (!shared->has_deoptimization_support()) { + if (!target_shared->has_deoptimization_support()) { // Note that we compile here using the same AST that we will use for // generating the optimized inline code. - inner_info.EnableDeoptimizationSupport(); - if (!FullCodeGenerator::MakeCode(&inner_info)) return false; - shared->EnableDeoptimizationSupport(*inner_info.code()); - Compiler::RecordFunctionCompilation( - Logger::FUNCTION_TAG, - Handle<String>(shared->DebugName()), - shared->start_position(), - &inner_info); + target_info.EnableDeoptimizationSupport(); + if (!FullCodeGenerator::MakeCode(&target_info)) { + TraceInline(target, "could not generate deoptimization info"); + return false; + } + target_shared->EnableDeoptimizationSupport(*target_info.code()); + Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG, + &target_info, + target_shared); } + // ---------------------------------------------------------------- // Save the pending call context and type feedback oracle. Set up new ones // for the inlined function. - ASSERT(shared->has_deoptimization_support()); - AstContext* saved_call_context = call_context(); - HBasicBlock* saved_function_return = function_return(); - TypeFeedbackOracle* saved_oracle = oracle(); - // On-stack replacement cannot target inlined functions. Since we don't - // use a separate CompilationInfo structure for the inlined function, we - // save and restore the AST ID in the original compilation info. - int saved_osr_ast_id = graph()->info()->osr_ast_id(); - - TestContext* test_context = NULL; - if (ast_context()->IsTest()) { - // Inlined body is treated as if it occurs in an 'inlined' call context - // with true and false blocks that will forward to the real ones. - HBasicBlock* if_true = graph()->CreateBasicBlock(); - HBasicBlock* if_false = graph()->CreateBasicBlock(); - if_true->MarkAsInlineReturnTarget(); - if_false->MarkAsInlineReturnTarget(); - // AstContext constructor pushes on the context stack. - test_context = new TestContext(this, if_true, if_false); - function_return_ = NULL; - } else { - // Inlined body is treated as if it occurs in the original call context. - function_return_ = graph()->CreateBasicBlock(); - function_return_->MarkAsInlineReturnTarget(); - } - call_context_ = ast_context(); - TypeFeedbackOracle new_oracle( - Handle<Code>(shared->code()), + ASSERT(target_shared->has_deoptimization_support()); + TypeFeedbackOracle target_oracle( + Handle<Code>(target_shared->code()), Handle<Context>(target->context()->global_context())); - oracle_ = &new_oracle; - graph()->info()->SetOsrAstId(AstNode::kNoNumber); + FunctionState target_state(this, &target_info, &target_oracle); HSubgraph* body = CreateInlinedSubgraph(env, target, function); body->exit_block()->AddInstruction(new HEnterInlined(target, function)); @@ -4110,26 +4109,22 @@ bool HGraphBuilder::TryInline(Call* expr) { if (HasStackOverflow()) { // Bail out if the inline function did, as we cannot residualize a call // instead. - delete test_context; - call_context_ = saved_call_context; - function_return_ = saved_function_return; - oracle_ = saved_oracle; - graph()->info()->SetOsrAstId(saved_osr_ast_id); + TraceInline(target, "inline graph construction failed"); return false; } // Update inlined nodes count. inlined_count_ += nodes_added; - if (FLAG_trace_inlining) TraceInline(target, true); + TraceInline(target, NULL); - if (body->HasExit()) { + if (body->exit_block() != NULL) { // Add a return of undefined if control can fall off the body. In a // test context, undefined is false. HValue* return_value = graph()->GetConstantUndefined(); - if (test_context == NULL) { - ASSERT(function_return_ != NULL); - body->exit_block()->AddLeaveInlined(return_value, function_return_); + if (inlined_test_context() == NULL) { + ASSERT(function_return() != NULL); + body->exit_block()->AddLeaveInlined(return_value, function_return()); } else { // The graph builder assumes control can reach both branches of a // test, so we materialize the undefined value and test it rather than @@ -4142,8 +4137,10 @@ bool HGraphBuilder::TryInline(Call* expr) { body->exit_block()->Finish(test); HValue* const no_return_value = NULL; - empty_true->AddLeaveInlined(no_return_value, test_context->if_true()); - empty_false->AddLeaveInlined(no_return_value, test_context->if_false()); + empty_true->AddLeaveInlined(no_return_value, + inlined_test_context()->if_true()); + empty_false->AddLeaveInlined(no_return_value, + inlined_test_context()->if_false()); } body->set_exit_block(NULL); } @@ -4152,16 +4149,17 @@ bool HGraphBuilder::TryInline(Call* expr) { AddSimulate(expr->ReturnId()); // Jump to the function entry (without re-recording the environment). - subgraph()->exit_block()->Finish(new HGoto(body->entry_block())); + current_block()->Finish(new HGoto(body->entry_block())); // Fix up the function exits. - if (test_context != NULL) { - HBasicBlock* if_true = test_context->if_true(); - HBasicBlock* if_false = test_context->if_false(); + if (inlined_test_context() != NULL) { + HBasicBlock* if_true = inlined_test_context()->if_true(); + HBasicBlock* if_false = inlined_test_context()->if_false(); if_true->SetJoinId(expr->id()); if_false->SetJoinId(expr->id()); - ASSERT(ast_context() == test_context); - delete test_context; // Destructor pops from expression context stack. + ASSERT(ast_context() == inlined_test_context()); + // Pop the return test context from the expression context stack. + ClearInlinedTestContext(); // Forward to the real test context. HValue* const no_return_value = NULL; @@ -4182,18 +4180,13 @@ bool HGraphBuilder::TryInline(Call* expr) { // TODO(kmillikin): Come up with a better way to handle this. It is too // subtle. NULL here indicates that the enclosing context has no control // flow to handle. - subgraph()->set_exit_block(NULL); + set_current_block(NULL); } else { - function_return_->SetJoinId(expr->id()); - subgraph()->set_exit_block(function_return_); + function_return()->SetJoinId(expr->id()); + set_current_block(function_return()); } - call_context_ = saved_call_context; - function_return_ = saved_function_return; - oracle_ = saved_oracle; - graph()->info()->SetOsrAstId(saved_osr_ast_id); - return true; } @@ -4301,7 +4294,7 @@ bool HGraphBuilder::TryCallApply(Call* expr) { Property* prop = callee->AsProperty(); ASSERT(prop != NULL); - if (graph()->info()->scope()->arguments() == NULL) return false; + if (info()->scope()->arguments() == NULL) return false; Handle<String> name = prop->key()->AsLiteral()->AsPropertyName(); if (!name->IsEqualTo(CStrVector("apply"))) return false; @@ -4348,14 +4341,13 @@ static bool HasCustomCallGenerator(Handle<JSFunction> function) { void HGraphBuilder::VisitCall(Call* expr) { Expression* callee = expr->expression(); int argument_count = expr->arguments()->length() + 1; // Plus receiver. - HCall* call = NULL; + HInstruction* call = NULL; Property* prop = callee->AsProperty(); if (prop != NULL) { if (!prop->key()->IsPropertyName()) { // Keyed function call. - VisitArgument(prop->obj()); - CHECK_BAILOUT; + VISIT_FOR_VALUE(prop->obj()); VISIT_FOR_VALUE(prop->key()); // Push receiver and key like the non-optimized code generator expects it. @@ -4364,14 +4356,13 @@ void HGraphBuilder::VisitCall(Call* expr) { Push(key); Push(receiver); - VisitArgumentList(expr->arguments()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - call = new HCallKeyed(context, key, argument_count); + call = PreProcessCall(new HCallKeyed(context, key, argument_count)); call->set_position(expr->position()); - PreProcessCall(call); Drop(1); // Key. ast_context()->ReturnInstruction(call, expr->id()); return; @@ -4383,9 +4374,8 @@ void HGraphBuilder::VisitCall(Call* expr) { if (TryCallApply(expr)) return; CHECK_BAILOUT; - VisitArgument(prop->obj()); - CHECK_BAILOUT; - VisitArgumentList(expr->arguments()); + VISIT_FOR_VALUE(prop->obj()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; Handle<String> name = prop->key()->AsLiteral()->AsPropertyName(); @@ -4412,12 +4402,12 @@ void HGraphBuilder::VisitCall(Call* expr) { // IC when a primitive receiver check is required. HContext* context = new HContext; AddInstruction(context); - call = new HCallNamed(context, name, argument_count); + call = PreProcessCall(new HCallNamed(context, name, argument_count)); } else { AddCheckConstantFunction(expr, receiver, receiver_map, true); if (TryInline(expr)) { - if (subgraph()->HasExit()) { + if (current_block() != NULL) { HValue* return_value = Pop(); // If we inlined a function in a test context then we need to emit // a simulate here to shadow the ones at the end of the @@ -4432,7 +4422,8 @@ void HGraphBuilder::VisitCall(Call* expr) { // Check for bailout, as the TryInline call in the if condition above // might return false due to bailout during hydrogen processing. CHECK_BAILOUT; - call = new HCallConstantFunction(expr->target(), argument_count); + call = PreProcessCall(new HCallConstantFunction(expr->target(), + argument_count)); } } } else if (types != NULL && types->length() > 1) { @@ -4443,7 +4434,7 @@ void HGraphBuilder::VisitCall(Call* expr) { } else { HContext* context = new HContext; AddInstruction(context); - call = new HCallNamed(context, name, argument_count); + call = PreProcessCall(new HCallNamed(context, name, argument_count)); } } else { @@ -4452,19 +4443,19 @@ void HGraphBuilder::VisitCall(Call* expr) { if (!global_call) { ++argument_count; - VisitArgument(expr->expression()); - CHECK_BAILOUT; + VISIT_FOR_VALUE(expr->expression()); } if (global_call) { + bool known_global_function = false; // If there is a global property cell for the name at compile time and // access check is not enabled we assume that the function will not change // and generate optimized code for calling the function. - CompilationInfo* info = graph()->info(); - bool known_global_function = info->has_global_object() && - !info->global_object()->IsAccessCheckNeeded() && - expr->ComputeGlobalTarget(Handle<GlobalObject>(info->global_object()), - var->name()); + if (info()->has_global_object() && + !info()->global_object()->IsAccessCheckNeeded()) { + Handle<GlobalObject> global(info()->global_object()); + known_global_function = expr->ComputeGlobalTarget(global, var->name()); + } if (known_global_function) { // Push the global object instead of the global receiver because // code generated by the full code generator expects it. @@ -4472,7 +4463,7 @@ void HGraphBuilder::VisitCall(Call* expr) { HGlobalObject* global_object = new HGlobalObject(context); AddInstruction(context); PushAndAdd(global_object); - VisitArgumentList(expr->arguments()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; VISIT_FOR_VALUE(expr->expression()); @@ -4489,7 +4480,7 @@ void HGraphBuilder::VisitCall(Call* expr) { environment()->SetExpressionStackAt(receiver_index, global_receiver); if (TryInline(expr)) { - if (subgraph()->HasExit()) { + if (current_block() != NULL) { HValue* return_value = Pop(); // If we inlined a function in a test context then we need to // emit a simulate here to shadow the ones at the end of the @@ -4505,15 +4496,18 @@ void HGraphBuilder::VisitCall(Call* expr) { // during hydrogen processing. CHECK_BAILOUT; - call = new HCallKnownGlobal(expr->target(), argument_count); + call = PreProcessCall(new HCallKnownGlobal(expr->target(), + argument_count)); } else { HContext* context = new HContext; AddInstruction(context); PushAndAdd(new HGlobalObject(context)); - VisitArgumentList(expr->arguments()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; - call = new HCallGlobal(context, var->name(), argument_count); + call = PreProcessCall(new HCallGlobal(context, + var->name(), + argument_count)); } } else { @@ -4522,15 +4516,14 @@ void HGraphBuilder::VisitCall(Call* expr) { AddInstruction(context); AddInstruction(global_object); PushAndAdd(new HGlobalReceiver(global_object)); - VisitArgumentList(expr->arguments()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; - call = new HCallFunction(context, argument_count); + call = PreProcessCall(new HCallFunction(context, argument_count)); } } call->set_position(expr->position()); - PreProcessCall(call); ast_context()->ReturnInstruction(call, expr->id()); } @@ -4538,9 +4531,8 @@ void HGraphBuilder::VisitCall(Call* expr) { void HGraphBuilder::VisitCallNew(CallNew* expr) { // The constructor function is also used as the receiver argument to the // JS construct call builtin. - VisitArgument(expr->expression()); - CHECK_BAILOUT; - VisitArgumentList(expr->arguments()); + VISIT_FOR_VALUE(expr->expression()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; HContext* context = new HContext; @@ -4550,7 +4542,7 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) { // to the construct call. int arg_count = expr->arguments()->length() + 1; // Plus constructor. HValue* constructor = environment()->ExpressionStackAt(arg_count - 1); - HCall* call = new HCallNew(context, constructor, arg_count); + HCallNew* call = new HCallNew(context, constructor, arg_count); call->set_position(expr->position()); PreProcessCall(call); ast_context()->ReturnInstruction(call, expr->id()); @@ -4573,25 +4565,15 @@ const HGraphBuilder::InlineFunctionGenerator void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) { - Handle<String> name = expr->name(); - if (name->IsEqualTo(CStrVector("_Log"))) { - ast_context()->ReturnValue(graph()->GetConstantUndefined()); - return; - } - - Runtime::Function* function = expr->function(); if (expr->is_jsruntime()) { BAILOUT("call to a JavaScript runtime function"); } - ASSERT(function != NULL); - - VisitArgumentList(expr->arguments()); - CHECK_BAILOUT; - int argument_count = expr->arguments()->length(); + Runtime::Function* function = expr->function(); + ASSERT(function != NULL); if (function->intrinsic_type == Runtime::INLINE) { - ASSERT(name->length() > 0); - ASSERT(name->Get(0) == '_'); + ASSERT(expr->name()->length() > 0); + ASSERT(expr->name()->Get(0) == '_'); // Call to an inline function. int lookup_index = static_cast<int>(function->function_id) - static_cast<int>(Runtime::kFirstInlineFunction); @@ -4601,12 +4583,17 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) { InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index]; // Call the inline code generator using the pointer-to-member. - (this->*generator)(argument_count, expr->id()); + (this->*generator)(expr); } else { ASSERT(function->intrinsic_type == Runtime::RUNTIME); - HCall* call = new HCallRuntime(name, expr->function(), argument_count); + VisitArgumentList(expr->arguments()); + CHECK_BAILOUT; + + Handle<String> name = expr->name(); + int argument_count = expr->arguments()->length(); + HCallRuntime* call = new HCallRuntime(name, function, argument_count); call->set_position(RelocInfo::kNoPosition); - PreProcessCall(call); + Drop(argument_count); ast_context()->ReturnInstruction(call, expr->id()); } } @@ -4656,21 +4643,29 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { VisitForControl(expr->expression(), context->if_false(), context->if_true()); - } else { - HSubgraph* true_graph = CreateEmptySubgraph(); - HSubgraph* false_graph = CreateEmptySubgraph(); + } else if (ast_context()->IsValue()) { + HBasicBlock* materialize_false = graph()->CreateBasicBlock(); + HBasicBlock* materialize_true = graph()->CreateBasicBlock(); VISIT_FOR_CONTROL(expr->expression(), - false_graph->entry_block(), - true_graph->entry_block()); - true_graph->entry_block()->SetJoinId(expr->expression()->id()); - true_graph->environment()->Push(graph_->GetConstantTrue()); - - false_graph->entry_block()->SetJoinId(expr->expression()->id()); - false_graph->environment()->Push(graph_->GetConstantFalse()); - - current_subgraph_->AppendJoin(true_graph, false_graph, expr); + materialize_false, + materialize_true); + materialize_false->SetJoinId(expr->expression()->id()); + materialize_true->SetJoinId(expr->expression()->id()); + + set_current_block(materialize_false); + Push(graph()->GetConstantFalse()); + set_current_block(materialize_true); + Push(graph()->GetConstantTrue()); + + HBasicBlock* join = + CreateJoin(materialize_false, materialize_true, expr->id()); + set_current_block(join); ast_context()->ReturnValue(Pop()); + } else { + ASSERT(ast_context()->IsEffect()); + VisitForEffect(expr->expression()); } + } else if (op == Token::BIT_NOT || op == Token::SUB) { VISIT_FOR_VALUE(expr->expression()); HValue* value = Pop(); @@ -4724,10 +4719,6 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { bool inc = expr->op() == Token::INC; if (var != NULL) { - if (!var->is_global() && !var->IsStackAllocated()) { - BAILOUT("non-stack/non-global variable in count operation"); - } - VISIT_FOR_VALUE(target); // Match the full code generator stack by simulating an extra stack @@ -4743,9 +4734,16 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { after, expr->position(), expr->AssignmentId()); - } else { - ASSERT(var->IsStackAllocated()); + } else if (var->IsStackAllocated()) { Bind(var, after); + } else if (var->IsContextSlot()) { + HValue* context = BuildContextChainWalk(var); + int index = var->AsSlot()->index(); + HStoreContextSlot* instr = new HStoreContextSlot(context, index, after); + AddInstruction(instr); + if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId()); + } else { + BAILOUT("lookup variable in count operation"); } Drop(has_extra ? 2 : 1); ast_context()->ReturnValue(expr->is_postfix() ? before : after); @@ -4956,22 +4954,59 @@ void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { // Translate right subexpression by visiting it in the same AST // context as the entire expression. - subgraph()->set_exit_block(eval_right); + set_current_block(eval_right); Visit(expr->right()); - } else { + } else if (ast_context()->IsValue()) { VISIT_FOR_VALUE(expr->left()); - ASSERT(current_subgraph_->HasExit()); - - HValue* left = Top(); - HEnvironment* environment_copy = environment()->Copy(); - environment_copy->Pop(); - HSubgraph* right_subgraph; - right_subgraph = CreateBranchSubgraph(environment_copy); - ADD_TO_SUBGRAPH(right_subgraph, expr->right()); - current_subgraph_->AppendOptional(right_subgraph, is_logical_and, left); - current_subgraph_->exit_block()->SetJoinId(expr->id()); + ASSERT(current_block() != NULL); + + // We need an extra block to maintain edge-split form. + HBasicBlock* empty_block = graph()->CreateBasicBlock(); + HBasicBlock* eval_right = graph()->CreateBasicBlock(); + HTest* test = is_logical_and + ? new HTest(Top(), eval_right, empty_block) + : new HTest(Top(), empty_block, eval_right); + current_block()->Finish(test); + + set_current_block(eval_right); + Drop(1); // Value of the left subexpression. + VISIT_FOR_VALUE(expr->right()); + + HBasicBlock* join_block = + CreateJoin(empty_block, current_block(), expr->id()); + set_current_block(join_block); ast_context()->ReturnValue(Pop()); + + } else { + ASSERT(ast_context()->IsEffect()); + // In an effect context, we don't need the value of the left + // subexpression, only its control flow and side effects. We need an + // extra block to maintain edge-split form. + HBasicBlock* empty_block = graph()->CreateBasicBlock(); + HBasicBlock* right_block = graph()->CreateBasicBlock(); + HBasicBlock* join_block = graph()->CreateBasicBlock(); + if (is_logical_and) { + VISIT_FOR_CONTROL(expr->left(), right_block, empty_block); + } else { + VISIT_FOR_CONTROL(expr->left(), empty_block, right_block); + } + // TODO(kmillikin): Find a way to fix this. It's ugly that there are + // actually two empty blocks (one here and one inserted by + // TestContext::BuildBranch, and that they both have an HSimulate + // though the second one is not a merge node, and that we really have + // no good AST ID to put on that first HSimulate. + empty_block->SetJoinId(expr->id()); + right_block->SetJoinId(expr->RightId()); + set_current_block(right_block); + VISIT_FOR_EFFECT(expr->right()); + + empty_block->Goto(join_block); + current_block()->Goto(join_block); + join_block->SetJoinId(expr->id()); + set_current_block(join_block); + // We did not materialize any value in the predecessor environments, + // so there is no need to handle it here. } } else { @@ -5049,7 +5084,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { HValue* left = Pop(); Token::Value op = expr->op(); - TypeInfo info = oracle()->CompareType(expr); + TypeInfo type_info = oracle()->CompareType(expr); HInstruction* instr = NULL; if (op == Token::INSTANCEOF) { // Check to see if the rhs of the instanceof is a global function not @@ -5058,12 +5093,11 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { Handle<JSFunction> target = Handle<JSFunction>::null(); Variable* var = expr->right()->AsVariableProxy()->AsVariable(); bool global_function = (var != NULL) && var->is_global() && !var->is_this(); - CompilationInfo* info = graph()->info(); if (global_function && - info->has_global_object() && - !info->global_object()->IsAccessCheckNeeded()) { + info()->has_global_object() && + !info()->global_object()->IsAccessCheckNeeded()) { Handle<String> name = var->name(); - Handle<GlobalObject> global(info->global_object()); + Handle<GlobalObject> global(info()->global_object()); LookupResult lookup; global->Lookup(*name, &lookup); if (lookup.IsProperty() && @@ -5090,7 +5124,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { } } else if (op == Token::IN) { BAILOUT("Unsupported comparison: in"); - } else if (info.IsNonPrimitive()) { + } else if (type_info.IsNonPrimitive()) { switch (op) { case Token::EQ: case Token::EQ_STRICT: { @@ -5107,7 +5141,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { } } else { HCompare* compare = new HCompare(left, right, op); - Representation r = ToRepresentation(info); + Representation r = ToRepresentation(type_info); compare->SetInputRepresentation(r); instr = compare; } @@ -5148,340 +5182,361 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) { // Generators for inline runtime functions. // Support for types. -void HGraphBuilder::GenerateIsSmi(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateIsSmi(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HIsSmi* result = new HIsSmi(value); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateIsSpecObject(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HHasInstanceType* result = new HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateIsFunction(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateIsFunction(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HHasInstanceType* result = new HHasInstanceType(value, JS_FUNCTION_TYPE); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateHasCachedArrayIndex(int argument_count, - int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HHasCachedArrayIndex* result = new HHasCachedArrayIndex(value); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateIsArray(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateIsArray(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HHasInstanceType* result = new HHasInstanceType(value, JS_ARRAY_TYPE); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateIsRegExp(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HHasInstanceType* result = new HHasInstanceType(value, JS_REGEXP_TYPE); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateIsObject(int argument_count, int ast_id) { - ASSERT(argument_count == 1); - +void HGraphBuilder::GenerateIsObject(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HIsObject* test = new HIsObject(value); - ast_context()->ReturnInstruction(test, ast_id); + ast_context()->ReturnInstruction(test, call->id()); } -void HGraphBuilder::GenerateIsNonNegativeSmi(int argument_count, - int ast_id) { +void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) { BAILOUT("inlined runtime function: IsNonNegativeSmi"); } -void HGraphBuilder::GenerateIsUndetectableObject(int argument_count, - int ast_id) { +void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) { BAILOUT("inlined runtime function: IsUndetectableObject"); } void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf( - int argument_count, - int ast_id) { + CallRuntime* call) { BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf"); } // Support for construct call checks. -void HGraphBuilder::GenerateIsConstructCall(int argument_count, int ast_id) { - ASSERT(argument_count == 0); - ast_context()->ReturnInstruction(new HIsConstructCall, ast_id); +void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) { + ASSERT(call->arguments()->length() == 0); + ast_context()->ReturnInstruction(new HIsConstructCall, call->id()); } // Support for arguments.length and arguments[?]. -void HGraphBuilder::GenerateArgumentsLength(int argument_count, int ast_id) { - ASSERT(argument_count == 0); +void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) { + ASSERT(call->arguments()->length() == 0); HInstruction* elements = AddInstruction(new HArgumentsElements); HArgumentsLength* result = new HArgumentsLength(elements); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateArguments(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateArguments(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* index = Pop(); HInstruction* elements = AddInstruction(new HArgumentsElements); HInstruction* length = AddInstruction(new HArgumentsLength(elements)); HAccessArgumentsAt* result = new HAccessArgumentsAt(elements, length, index); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } // Support for accessing the class and value fields of an object. -void HGraphBuilder::GenerateClassOf(int argument_count, int ast_id) { +void HGraphBuilder::GenerateClassOf(CallRuntime* call) { // The special form detected by IsClassOfTest is detected before we get here // and does not cause a bailout. BAILOUT("inlined runtime function: ClassOf"); } -void HGraphBuilder::GenerateValueOf(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateValueOf(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HValueOf* result = new HValueOf(value); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateSetValueOf(int argument_count, int ast_id) { +void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) { BAILOUT("inlined runtime function: SetValueOf"); } // Fast support for charCodeAt(n). -void HGraphBuilder::GenerateStringCharCodeAt(int argument_count, int ast_id) { - ASSERT(argument_count == 2); +void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) { + ASSERT(call->arguments()->length() == 2); + VISIT_FOR_VALUE(call->arguments()->at(0)); + VISIT_FOR_VALUE(call->arguments()->at(1)); HValue* index = Pop(); HValue* string = Pop(); HStringCharCodeAt* result = BuildStringCharCodeAt(string, index); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } // Fast support for string.charAt(n) and string[n]. -void HGraphBuilder::GenerateStringCharFromCode(int argument_count, - int ast_id) { +void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) { BAILOUT("inlined runtime function: StringCharFromCode"); } // Fast support for string.charAt(n) and string[n]. -void HGraphBuilder::GenerateStringCharAt(int argument_count, int ast_id) { - ASSERT_EQ(2, argument_count); +void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) { + ASSERT_EQ(2, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::StringCharAt, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::StringCharAt, 2); + Drop(2); + ast_context()->ReturnInstruction(result, call->id()); } // Fast support for object equality testing. -void HGraphBuilder::GenerateObjectEquals(int argument_count, int ast_id) { - ASSERT(argument_count == 2); +void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) { + ASSERT(call->arguments()->length() == 2); + VISIT_FOR_VALUE(call->arguments()->at(0)); + VISIT_FOR_VALUE(call->arguments()->at(1)); HValue* right = Pop(); HValue* left = Pop(); HCompareJSObjectEq* result = new HCompareJSObjectEq(left, right); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateLog(int argument_count, int ast_id) { - UNREACHABLE(); // We caught this in VisitCallRuntime. +void HGraphBuilder::GenerateLog(CallRuntime* call) { + // %_Log is ignored in optimized code. + ast_context()->ReturnValue(graph()->GetConstantUndefined()); } // Fast support for Math.random(). -void HGraphBuilder::GenerateRandomHeapNumber(int argument_count, int ast_id) { +void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) { BAILOUT("inlined runtime function: RandomHeapNumber"); } // Fast support for StringAdd. -void HGraphBuilder::GenerateStringAdd(int argument_count, int ast_id) { - ASSERT_EQ(2, argument_count); +void HGraphBuilder::GenerateStringAdd(CallRuntime* call) { + ASSERT_EQ(2, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::StringAdd, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::StringAdd, 2); + Drop(2); + ast_context()->ReturnInstruction(result, call->id()); } // Fast support for SubString. -void HGraphBuilder::GenerateSubString(int argument_count, int ast_id) { - ASSERT_EQ(3, argument_count); +void HGraphBuilder::GenerateSubString(CallRuntime* call) { + ASSERT_EQ(3, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::SubString, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::SubString, 3); + Drop(3); + ast_context()->ReturnInstruction(result, call->id()); } // Fast support for StringCompare. -void HGraphBuilder::GenerateStringCompare(int argument_count, int ast_id) { - ASSERT_EQ(2, argument_count); +void HGraphBuilder::GenerateStringCompare(CallRuntime* call) { + ASSERT_EQ(2, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::StringCompare, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::StringCompare, 2); + Drop(2); + ast_context()->ReturnInstruction(result, call->id()); } // Support for direct calls from JavaScript to native RegExp code. -void HGraphBuilder::GenerateRegExpExec(int argument_count, int ast_id) { - ASSERT_EQ(4, argument_count); +void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) { + ASSERT_EQ(4, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::RegExpExec, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::RegExpExec, 4); + Drop(4); + ast_context()->ReturnInstruction(result, call->id()); } // Construct a RegExp exec result with two in-object properties. -void HGraphBuilder::GenerateRegExpConstructResult(int argument_count, - int ast_id) { - ASSERT_EQ(3, argument_count); +void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) { + ASSERT_EQ(3, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); HCallStub* result = - new HCallStub(context, CodeStub::RegExpConstructResult, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + new HCallStub(context, CodeStub::RegExpConstructResult, 3); + Drop(3); + ast_context()->ReturnInstruction(result, call->id()); } // Support for fast native caches. -void HGraphBuilder::GenerateGetFromCache(int argument_count, int ast_id) { +void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) { BAILOUT("inlined runtime function: GetFromCache"); } // Fast support for number to string. -void HGraphBuilder::GenerateNumberToString(int argument_count, int ast_id) { - ASSERT_EQ(1, argument_count); +void HGraphBuilder::GenerateNumberToString(CallRuntime* call) { + ASSERT_EQ(1, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::NumberToString, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::NumberToString, 1); + Drop(1); + ast_context()->ReturnInstruction(result, call->id()); } // Fast swapping of elements. Takes three expressions, the object and two // indices. This should only be used if the indices are known to be // non-negative and within bounds of the elements array at the call site. -void HGraphBuilder::GenerateSwapElements(int argument_count, int ast_id) { +void HGraphBuilder::GenerateSwapElements(CallRuntime* call) { BAILOUT("inlined runtime function: SwapElements"); } // Fast call for custom callbacks. -void HGraphBuilder::GenerateCallFunction(int argument_count, int ast_id) { +void HGraphBuilder::GenerateCallFunction(CallRuntime* call) { BAILOUT("inlined runtime function: CallFunction"); } // Fast call to math functions. -void HGraphBuilder::GenerateMathPow(int argument_count, int ast_id) { - ASSERT_EQ(2, argument_count); +void HGraphBuilder::GenerateMathPow(CallRuntime* call) { + ASSERT_EQ(2, call->arguments()->length()); + VISIT_FOR_VALUE(call->arguments()->at(0)); + VISIT_FOR_VALUE(call->arguments()->at(1)); HValue* right = Pop(); HValue* left = Pop(); HPower* result = new HPower(left, right); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateMathSin(int argument_count, int ast_id) { - ASSERT_EQ(1, argument_count); +void HGraphBuilder::GenerateMathSin(CallRuntime* call) { + ASSERT_EQ(1, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::TranscendentalCache, argument_count); + HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1); result->set_transcendental_type(TranscendentalCache::SIN); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + Drop(1); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateMathCos(int argument_count, int ast_id) { - ASSERT_EQ(1, argument_count); +void HGraphBuilder::GenerateMathCos(CallRuntime* call) { + ASSERT_EQ(1, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::TranscendentalCache, argument_count); + HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1); result->set_transcendental_type(TranscendentalCache::COS); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + Drop(1); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateMathLog(int argument_count, int ast_id) { - ASSERT_EQ(1, argument_count); +void HGraphBuilder::GenerateMathLog(CallRuntime* call) { + ASSERT_EQ(1, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::TranscendentalCache, argument_count); + HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1); result->set_transcendental_type(TranscendentalCache::LOG); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + Drop(1); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateMathSqrt(int argument_count, int ast_id) { +void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) { BAILOUT("inlined runtime function: MathSqrt"); } // Check whether two RegExps are equivalent -void HGraphBuilder::GenerateIsRegExpEquivalent(int argument_count, - int ast_id) { +void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) { BAILOUT("inlined runtime function: IsRegExpEquivalent"); } -void HGraphBuilder::GenerateGetCachedArrayIndex(int argument_count, - int ast_id) { - BAILOUT("inlined runtime function: GetCachedArrayIndex"); +void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); + HValue* value = Pop(); + HGetCachedArrayIndex* result = new HGetCachedArrayIndex(value); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateFastAsciiArrayJoin(int argument_count, - int ast_id) { +void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) { BAILOUT("inlined runtime function: FastAsciiArrayJoin"); } @@ -5882,7 +5937,7 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type) { if (op != NULL && op->IsUnallocated()) hint_index = op->VirtualRegister(); trace_.Add(" %d %d", parent_index, hint_index); UseInterval* cur_interval = range->first_interval(); - while (cur_interval != NULL) { + while (cur_interval != NULL && range->Covers(cur_interval->start())) { trace_.Add(" [%d, %d[", cur_interval->start().Value(), cur_interval->end().Value()); @@ -5891,7 +5946,7 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type) { UsePosition* current_pos = range->first_pos(); while (current_pos != NULL) { - if (current_pos->RegisterIsBeneficial()) { + if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) { trace_.Add(" %d M", current_pos->pos().Value()); } current_pos = current_pos->next(); diff --git a/src/hydrogen.h b/src/hydrogen.h index 6f41ee68..d8b1cfb6 100644 --- a/src/hydrogen.h +++ b/src/hydrogen.h @@ -60,6 +60,8 @@ class HBasicBlock: public ZoneObject { HGraph* graph() const { return graph_; } const ZoneList<HPhi*>* phis() const { return &phis_; } HInstruction* first() const { return first_; } + HInstruction* last() const { return last_; } + void set_last(HInstruction* instr) { last_ = instr; } HInstruction* GetLastInstruction(); HControlInstruction* end() const { return end_; } HLoopInformation* loop_information() const { return loop_information_; } @@ -115,6 +117,7 @@ class HBasicBlock: public ZoneObject { void SetJoinId(int id); void Finish(HControlInstruction* last); + void FinishExit(HControlInstruction* instruction); void Goto(HBasicBlock* block, bool include_stack_check = false); int PredecessorIndexOf(HBasicBlock* predecessor) const; @@ -148,7 +151,7 @@ class HBasicBlock: public ZoneObject { HGraph* graph_; ZoneList<HPhi*> phis_; HInstruction* first_; - HInstruction* last_; // Last non-control instruction of the block. + HInstruction* last_; HControlInstruction* end_; HLoopInformation* loop_information_; ZoneList<HBasicBlock*> predecessors_; @@ -194,94 +197,26 @@ class HSubgraph: public ZoneObject { explicit HSubgraph(HGraph* graph) : graph_(graph), entry_block_(NULL), - exit_block_(NULL), - break_continue_info_(4) { + exit_block_(NULL) { } HGraph* graph() const { return graph_; } - HEnvironment* environment() const { - ASSERT(HasExit()); - return exit_block_->last_environment(); - } - - bool HasExit() const { return exit_block_ != NULL; } - - void PreProcessOsrEntry(IterationStatement* statement); - - void AppendOptional(HSubgraph* graph, - bool on_true_branch, - HValue* boolean_value); - void AppendJoin(HSubgraph* then_graph, HSubgraph* else_graph, AstNode* node); - void AppendWhile(HSubgraph* condition, - HSubgraph* body, - IterationStatement* statement, - HSubgraph* continue_subgraph, - HSubgraph* exit); - void AppendDoWhile(HSubgraph* body, - IterationStatement* statement, - HSubgraph* go_back, - HSubgraph* exit); - void AppendEndless(HSubgraph* body, IterationStatement* statement); - void Append(HSubgraph* next, BreakableStatement* statement); - void ResolveContinue(IterationStatement* statement); - HBasicBlock* BundleBreak(BreakableStatement* statement); - HBasicBlock* BundleContinue(IterationStatement* statement); - HBasicBlock* BundleBreakContinue(BreakableStatement* statement, - bool is_continue, - int join_id); - HBasicBlock* JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id); - - void FinishExit(HControlInstruction* instruction); - void FinishBreakContinue(BreakableStatement* target, bool is_continue); - void Initialize(HBasicBlock* block) { - ASSERT(entry_block_ == NULL); - entry_block_ = block; - exit_block_ = block; - } HBasicBlock* entry_block() const { return entry_block_; } HBasicBlock* exit_block() const { return exit_block_; } void set_exit_block(HBasicBlock* block) { exit_block_ = block; } - void ConnectExitTo(HBasicBlock* other, bool include_stack_check = false) { - if (HasExit()) { - exit_block()->Goto(other, include_stack_check); - } - } - - void AddBreakContinueInfo(HSubgraph* other) { - break_continue_info_.AddAll(other->break_continue_info_); + void Initialize(HBasicBlock* block) { + ASSERT(entry_block_ == NULL); + entry_block_ = block; + exit_block_ = block; } protected: - class BreakContinueInfo: public ZoneObject { - public: - BreakContinueInfo(BreakableStatement* target, HBasicBlock* block, - bool is_continue) - : target_(target), block_(block), continue_(is_continue) {} - BreakableStatement* target() const { return target_; } - HBasicBlock* block() const { return block_; } - bool is_continue() const { return continue_; } - bool IsResolved() const { return block_ == NULL; } - void Resolve() { block_ = NULL; } - - private: - BreakableStatement* target_; - HBasicBlock* block_; - bool continue_; - }; - - const ZoneList<BreakContinueInfo*>* break_continue_info() const { - return &break_continue_info_; - } - HGraph* graph_; // The graph this is a subgraph of. HBasicBlock* entry_block_; HBasicBlock* exit_block_; - - private: - ZoneList<BreakContinueInfo*> break_continue_info_; }; @@ -289,13 +224,8 @@ class HGraph: public HSubgraph { public: explicit HGraph(CompilationInfo* info); - CompilationInfo* info() const { return info_; } - - bool AllowCodeMotion() const; - const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; } const ZoneList<HPhi*>* phi_list() const { return phi_list_; } - Handle<String> debug_name() const { return info_->function()->debug_name(); } HEnvironment* start_environment() const { return start_environment_; } void InitializeInferredTypes(); @@ -312,7 +242,7 @@ class HGraph: public HSubgraph { // which are not supported by the optimizing compiler. bool CollectPhis(); - Handle<Code> Compile(); + Handle<Code> Compile(CompilationInfo* info); void set_undefined_constant(HConstant* constant) { undefined_constant_.set(constant); @@ -333,9 +263,6 @@ class HGraph: public HSubgraph { arguments_object_.set(object); } - // True iff. we are compiling for OSR and the statement is the entry. - bool HasOsrEntryAt(IterationStatement* statement); - int GetMaximumValueID() const { return values_.length(); } int GetNextBlockID() { return next_block_id_++; } int GetNextValueID(HValue* value) { @@ -367,15 +294,13 @@ class HGraph: public HSubgraph { void PropagateMinusZeroChecks(HValue* value, BitVector* visited); void InsertRepresentationChangeForUse(HValue* value, HValue* use, - Representation to, - bool truncating); + Representation to); void InsertRepresentationChanges(HValue* current); void InferTypes(ZoneList<HValue*>* worklist); void InitializeInferredTypes(int from_inclusive, int to_inclusive); void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor); int next_block_id_; - CompilationInfo* info_; HEnvironment* start_environment_; ZoneList<HBasicBlock*> blocks_; ZoneList<HValue*> values_; @@ -526,6 +451,8 @@ class HEnvironment: public ZoneObject { class HGraphBuilder; +// This class is not BASE_EMBEDDED because our inlining implementation uses +// new and delete. class AstContext { public: bool IsEffect() const { return kind_ == Expression::kEffect; } @@ -617,26 +544,125 @@ class TestContext: public AstContext { }; +class FunctionState BASE_EMBEDDED { + public: + FunctionState(HGraphBuilder* owner, + CompilationInfo* info, + TypeFeedbackOracle* oracle); + ~FunctionState(); + + CompilationInfo* compilation_info() { return compilation_info_; } + TypeFeedbackOracle* oracle() { return oracle_; } + AstContext* call_context() { return call_context_; } + HBasicBlock* function_return() { return function_return_; } + TestContext* test_context() { return test_context_; } + void ClearInlinedTestContext() { + delete test_context_; + test_context_ = NULL; + } + + private: + HGraphBuilder* owner_; + + CompilationInfo* compilation_info_; + TypeFeedbackOracle* oracle_; + + // During function inlining, expression context of the call being + // inlined. NULL when not inlining. + AstContext* call_context_; + + // When inlining in an effect of value context, this is the return block. + // It is NULL otherwise. When inlining in a test context, there are a + // pair of return blocks in the context. When not inlining, there is no + // local return point. + HBasicBlock* function_return_; + + // When inlining a call in a test context, a context containing a pair of + // return blocks. NULL in all other cases. + TestContext* test_context_; + + FunctionState* outer_; +}; + + class HGraphBuilder: public AstVisitor { public: - explicit HGraphBuilder(TypeFeedbackOracle* oracle) - : oracle_(oracle), + enum BreakType { BREAK, CONTINUE }; + + // A class encapsulating (lazily-allocated) break and continue blocks for + // a breakable statement. Separated from BreakAndContinueScope so that it + // can have a separate lifetime. + class BreakAndContinueInfo BASE_EMBEDDED { + public: + explicit BreakAndContinueInfo(BreakableStatement* target) + : target_(target), break_block_(NULL), continue_block_(NULL) { + } + + BreakableStatement* target() { return target_; } + HBasicBlock* break_block() { return break_block_; } + void set_break_block(HBasicBlock* block) { break_block_ = block; } + HBasicBlock* continue_block() { return continue_block_; } + void set_continue_block(HBasicBlock* block) { continue_block_ = block; } + + private: + BreakableStatement* target_; + HBasicBlock* break_block_; + HBasicBlock* continue_block_; + }; + + // A helper class to maintain a stack of current BreakAndContinueInfo + // structures mirroring BreakableStatement nesting. + class BreakAndContinueScope BASE_EMBEDDED { + public: + BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner) + : info_(info), owner_(owner), next_(owner->break_scope()) { + owner->set_break_scope(this); + } + + ~BreakAndContinueScope() { owner_->set_break_scope(next_); } + + BreakAndContinueInfo* info() { return info_; } + HGraphBuilder* owner() { return owner_; } + BreakAndContinueScope* next() { return next_; } + + // Search the break stack for a break or continue target. + HBasicBlock* Get(BreakableStatement* stmt, BreakType type); + + private: + BreakAndContinueInfo* info_; + HGraphBuilder* owner_; + BreakAndContinueScope* next_; + }; + + HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle) + : function_state_(NULL), + initial_function_state_(this, info, oracle), + ast_context_(NULL), + break_scope_(NULL), graph_(NULL), current_subgraph_(NULL), - peeled_statement_(NULL), - ast_context_(NULL), - call_context_(NULL), - function_return_(NULL), - inlined_count_(0) { } + inlined_count_(0) { + // This is not initialized in the initializer list because the + // constructor for the initial state relies on function_state_ == NULL + // to know it's the initial state. + function_state_= &initial_function_state_; + } - HGraph* CreateGraph(CompilationInfo* info); + HGraph* CreateGraph(); // Simple accessors. HGraph* graph() const { return graph_; } HSubgraph* subgraph() const { return current_subgraph_; } + BreakAndContinueScope* break_scope() const { return break_scope_; } + void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; } - HEnvironment* environment() const { return subgraph()->environment(); } - HBasicBlock* CurrentBlock() const { return subgraph()->exit_block(); } + HBasicBlock* current_block() const { return subgraph()->exit_block(); } + void set_current_block(HBasicBlock* block) { + subgraph()->set_exit_block(block); + } + HEnvironment* environment() const { + return current_block()->last_environment(); + } // Adding instructions. HInstruction* AddInstruction(HInstruction* instr); @@ -648,8 +674,7 @@ class HGraphBuilder: public AstVisitor { private: // Type of a member function that generates inline code for a native function. - typedef void (HGraphBuilder::*InlineFunctionGenerator)(int argument_count, - int ast_id); + typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call); // Forward declarations for inner scope classes. class SubgraphScope; @@ -665,15 +690,34 @@ class HGraphBuilder: public AstVisitor { static const int kMaxSourceSize = 600; // Simple accessors. - TypeFeedbackOracle* oracle() const { return oracle_; } + FunctionState* function_state() const { return function_state_; } + void set_function_state(FunctionState* state) { function_state_ = state; } + AstContext* ast_context() const { return ast_context_; } void set_ast_context(AstContext* context) { ast_context_ = context; } - AstContext* call_context() const { return call_context_; } - HBasicBlock* function_return() const { return function_return_; } + + // Accessors forwarded to the function state. + CompilationInfo* info() const { + return function_state()->compilation_info(); + } + TypeFeedbackOracle* oracle() const { return function_state()->oracle(); } + + AstContext* call_context() const { + return function_state()->call_context(); + } + HBasicBlock* function_return() const { + return function_state()->function_return(); + } + TestContext* inlined_test_context() const { + return function_state()->test_context(); + } + void ClearInlinedTestContext() { + function_state()->ClearInlinedTestContext(); + } // Generators for inline runtime functions. #define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \ - void Generate##Name(int argument_count, int ast_id); + void Generate##Name(CallRuntime* call); INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION) INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION) @@ -681,10 +725,30 @@ class HGraphBuilder: public AstVisitor { void Bailout(const char* reason); - void AppendPeeledWhile(IterationStatement* stmt, - HSubgraph* cond_graph, - HSubgraph* body_graph, - HSubgraph* exit_graph); + void PreProcessOsrEntry(IterationStatement* statement); + // True iff. we are compiling for OSR and the statement is the entry. + bool HasOsrEntryAt(IterationStatement* statement); + + HBasicBlock* CreateJoin(HBasicBlock* first, + HBasicBlock* second, + int join_id); + + // Create a back edge in the flow graph. body_exit is the predecessor + // block and loop_entry is the successor block. loop_successor is the + // block where control flow exits the loop normally (e.g., via failure of + // the condition) and break_block is the block where control flow breaks + // from the loop. All blocks except loop_entry can be NULL. The return + // value is the new successor block which is the join of loop_successor + // and break_block, or NULL. + HBasicBlock* CreateLoop(IterationStatement* statement, + HBasicBlock* loop_entry, + HBasicBlock* body_exit, + HBasicBlock* loop_successor, + HBasicBlock* break_block); + + HBasicBlock* JoinContinue(IterationStatement* statement, + HBasicBlock* exit_block, + HBasicBlock* continue_block); void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts); void AddToSubgraph(HSubgraph* graph, Statement* stmt); @@ -700,17 +764,21 @@ class HGraphBuilder: public AstVisitor { HBasicBlock* true_block, HBasicBlock* false_block); - // Visit an argument subexpression. + // Visit an argument subexpression and emit a push to the outgoing + // arguments. void VisitArgument(Expression* expr); void VisitArgumentList(ZoneList<Expression*>* arguments); + // Visit a list of expressions from left to right, each in a value context. + void VisitExpressions(ZoneList<Expression*>* exprs); + void AddPhi(HPhi* phi); void PushAndAdd(HInstruction* instr); // Remove the arguments from the bailout environment and emit instructions // to push them as outgoing parameters. - void PreProcessCall(HCall* call); + template <int V> HInstruction* PreProcessCall(HCall<V>* call); void AssumeRepresentation(HValue* value, Representation r); static Representation ToRepresentation(TypeInfo info); @@ -722,13 +790,10 @@ class HGraphBuilder: public AstVisitor { AST_NODE_LIST(DECLARE_VISIT) #undef DECLARE_VISIT - bool ShouldPeel(HSubgraph* cond, HSubgraph* body); - HBasicBlock* CreateBasicBlock(HEnvironment* env); HSubgraph* CreateEmptySubgraph(); - HSubgraph* CreateGotoSubgraph(HEnvironment* env); HSubgraph* CreateBranchSubgraph(HEnvironment* env); - HSubgraph* CreateLoopHeaderSubgraph(HEnvironment* env); + HBasicBlock* CreateLoopHeaderBlock(); HSubgraph* CreateInlinedSubgraph(HEnvironment* outer, Handle<JSFunction> target, FunctionLiteral* function); @@ -745,7 +810,11 @@ class HGraphBuilder: public AstVisitor { HValue* receiver, Handle<Map> receiver_map, CheckType check_type); - void TraceInline(Handle<JSFunction> target, bool result); + + // If --trace-inlining, print a line of the inlining trace. Inlining + // succeeded if the reason string is NULL and failed if there is a + // non-NULL reason string. + void TraceInline(Handle<JSFunction> target, const char* failure_reason); void HandleGlobalVariableAssignment(Variable* var, HValue* value, @@ -814,6 +883,11 @@ class HGraphBuilder: public AstVisitor { HValue* val, Expression* expr); + HInstruction* BuildStoreKeyedPixelArrayElement(HValue* object, + HValue* key, + HValue* val, + Expression* expr); + HCompare* BuildSwitchCompare(HSubgraph* subgraph, HValue* switch_value, CaseClause* clause); @@ -826,30 +900,31 @@ class HGraphBuilder: public AstVisitor { bool smi_and_map_check); - HBasicBlock* BuildTypeSwitch(ZoneMapList* maps, - ZoneList<HSubgraph*>* subgraphs, - HValue* receiver, + HBasicBlock* BuildTypeSwitch(HValue* receiver, + ZoneMapList* maps, + ZoneList<HSubgraph*>* body_graphs, + HSubgraph* default_graph, int join_id); - TypeFeedbackOracle* oracle_; - HGraph* graph_; - HSubgraph* current_subgraph_; - IterationStatement* peeled_statement_; + // The translation state of the currently-being-translated function. + FunctionState* function_state_; + + // The base of the function state stack. + FunctionState initial_function_state_; + // Expression context of the currently visited subexpression. NULL when // visiting statements. AstContext* ast_context_; - // During function inlining, expression context of the call being - // inlined. NULL when not inlining. - AstContext* call_context_; + // A stack of breakable statements entered. + BreakAndContinueScope* break_scope_; - // When inlining a call in an effect or value context, the return - // block. NULL otherwise. When inlining a call in a test context, there - // are a pair of target blocks in the call context. - HBasicBlock* function_return_; + HGraph* graph_; + HSubgraph* current_subgraph_; int inlined_count_; + friend class FunctionState; // Pushes and pops the state stack. friend class AstContext; // Pushes and pops the AST context stack. DISALLOW_COPY_AND_ASSIGN(HGraphBuilder); diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc index e4d09f2e..6652df27 100644 --- a/src/ia32/assembler-ia32.cc +++ b/src/ia32/assembler-ia32.cc @@ -2607,8 +2607,8 @@ void Assembler::RecordDebugBreakSlot() { } -void Assembler::RecordComment(const char* msg) { - if (FLAG_code_comments) { +void Assembler::RecordComment(const char* msg, bool force) { + if (FLAG_code_comments || force) { EnsureSpace ensure_space(this); RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); } diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h index 11f324ee..b60157c7 100644 --- a/src/ia32/assembler-ia32.h +++ b/src/ia32/assembler-ia32.h @@ -183,13 +183,6 @@ const XMMRegister xmm7 = { 7 }; typedef XMMRegister DoubleRegister; -// Index of register used in pusha/popa. -// Order of pushed registers: EAX, ECX, EDX, EBX, ESP, EBP, ESI, and EDI -inline int EspIndexForPushAll(Register reg) { - return Register::kNumRegisters - 1 - reg.code(); -} - - enum Condition { // any value < 0 is considered no_condition no_condition = -1, @@ -957,8 +950,9 @@ class Assembler : public Malloced { void RecordDebugBreakSlot(); // Record a comment relocation entry that can be used by a disassembler. - // Use --code-comments to enable. - void RecordComment(const char* msg); + // Use --code-comments to enable, or provide "force = true" flag to always + // write a comment. + void RecordComment(const char* msg, bool force = false); // Writes a single byte or word of data in the code stream. Used for // inline tables, e.g., jump-tables. @@ -979,6 +973,10 @@ class Assembler : public Malloced { PositionsRecorder* positions_recorder() { return &positions_recorder_; } + int relocation_writer_size() { + return (buffer_ + buffer_size_) - reloc_info_writer.pos(); + } + // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512*MB; static const int kMinimalBufferSize = 4*KB; diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc index 0a3e0930..c7e55270 100644 --- a/src/ia32/builtins-ia32.cc +++ b/src/ia32/builtins-ia32.cc @@ -589,6 +589,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // Change context eagerly in case we need the global receiver. __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + // Do not transform the receiver for strict mode functions. + __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset), + 1 << SharedFunctionInfo::kStrictModeBitWithinByte); + __ j(not_equal, &shift_arguments); + + // Compute the receiver in non-strict mode. __ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument. __ test(ebx, Immediate(kSmiTagMask)); __ j(zero, &convert_to_object); @@ -736,6 +743,14 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Compute the receiver. Label call_to_object, use_global_receiver, push_receiver; __ mov(ebx, Operand(ebp, 3 * kPointerSize)); + + // Do not transform the receiver for strict mode functions. + __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset), + 1 << SharedFunctionInfo::kStrictModeBitWithinByte); + __ j(not_equal, &push_receiver); + + // Compute the receiver in non-strict mode. __ test(ebx, Immediate(kSmiTagMask)); __ j(zero, &call_to_object); __ cmp(ebx, Factory::null_value()); @@ -1233,11 +1248,9 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { Label generic_constructor; if (FLAG_debug_code) { - // The array construct code is only set for the builtin Array function which - // does always have a map. - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ebx); - __ cmp(edi, Operand(ebx)); - __ Assert(equal, "Unexpected Array function"); + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + // Initial map for the builtin Array function should be a map. __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index 7d70ac34..7efa9340 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -2385,14 +2385,14 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { - NearLabel call_runtime; + ASSERT(op_ == Token::ADD); + NearLabel left_not_string, call_runtime; // Registers containing left and right operands respectively. Register left = edx; Register right = eax; // Test if left operand is a string. - NearLabel left_not_string; __ test(left, Immediate(kSmiTagMask)); __ j(zero, &left_not_string); __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx); @@ -3399,7 +3399,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ test(edx, Immediate(kSmiTagMask)); __ j(not_zero, &base_nonsmi); - // Optimized version when both exponent and base is a smi. + // Optimized version when both exponent and base are smis. Label powi; __ SmiUntag(edx); __ cvtsi2sd(xmm0, Operand(edx)); @@ -3438,7 +3438,6 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ j(not_carry, &no_multiply); __ mulsd(xmm1, xmm0); __ bind(&no_multiply); - __ test(eax, Operand(eax)); __ mulsd(xmm0, xmm0); __ j(not_zero, &while_true); @@ -3525,7 +3524,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ AllocateHeapNumber(ecx, eax, edx, &call_runtime); __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1); __ mov(eax, ecx); - __ ret(2); + __ ret(2 * kPointerSize); __ bind(&call_runtime); __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); @@ -3887,7 +3886,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ IncrementCounter(&Counters::regexp_entry_native, 1); static const int kRegExpExecuteArguments = 7; - __ PrepareCallCFunction(kRegExpExecuteArguments, ecx); + __ EnterApiExitFrame(kRegExpExecuteArguments); // Argument 7: Indicate that this is a direct call from JavaScript. __ mov(Operand(esp, 6 * kPointerSize), Immediate(1)); @@ -3932,7 +3931,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Locate the code entry and call it. __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(edx, kRegExpExecuteArguments); + __ call(Operand(edx)); + + // Drop arguments and come back to JS mode. + __ LeaveApiExitFrame(); // Check the result. Label success; @@ -3949,12 +3951,30 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. ExternalReference pending_exception(Top::k_pending_exception_address); - __ mov(eax, + __ mov(edx, Operand::StaticVariable(ExternalReference::the_hole_value_location())); - __ cmp(eax, Operand::StaticVariable(pending_exception)); + __ mov(eax, Operand::StaticVariable(pending_exception)); + __ cmp(edx, Operand(eax)); __ j(equal, &runtime); + // For exception, throw the exception again. + + // Clear the pending exception variable. + __ mov(Operand::StaticVariable(pending_exception), edx); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + __ cmp(eax, Factory::termination_exception()); + Label throw_termination_exception; + __ j(equal, &throw_termination_exception); + + // Handle normal exception by following handler chain. + __ Throw(eax); + + __ bind(&throw_termination_exception); + __ ThrowUncatchable(TERMINATION, eax); + __ bind(&failure); - // For failure and exception return null. + // For failure to match, return null. __ mov(Operand(eax), Factory::null_value()); __ ret(4 * kPointerSize); @@ -4628,34 +4648,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // eax holds the exception. - - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop the sp to the top of the handler. - ExternalReference handler_address(Top::k_handler_address); - __ mov(esp, Operand::StaticVariable(handler_address)); - - // Restore next handler and frame pointer, discard handler state. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(Operand::StaticVariable(handler_address)); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); - __ pop(ebp); - __ pop(edx); // Remove state. - - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of - // a JS entry frame. - __ Set(esi, Immediate(0)); // Tentatively set context pointer to NULL. - NearLabel skip; - __ cmp(ebp, 0); - __ j(equal, &skip, not_taken); - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ bind(&skip); - - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ ret(0); + __ Throw(eax); } @@ -4778,52 +4771,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, UncatchableExceptionType type) { - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop sp to the top stack handler. - ExternalReference handler_address(Top::k_handler_address); - __ mov(esp, Operand::StaticVariable(handler_address)); - - // Unwind the handlers until the ENTRY handler is found. - NearLabel loop, done; - __ bind(&loop); - // Load the type of the current stack handler. - const int kStateOffset = StackHandlerConstants::kStateOffset; - __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY)); - __ j(equal, &done); - // Fetch the next handler in the list. - const int kNextOffset = StackHandlerConstants::kNextOffset; - __ mov(esp, Operand(esp, kNextOffset)); - __ jmp(&loop); - __ bind(&done); - - // Set the top handler address to next handler past the current ENTRY handler. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(Operand::StaticVariable(handler_address)); - - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Top::k_external_caught_exception_address); - __ mov(eax, false); - __ mov(Operand::StaticVariable(external_caught), eax); - - // Set pending exception and eax to out of memory exception. - ExternalReference pending_exception(Top::k_pending_exception_address); - __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException())); - __ mov(Operand::StaticVariable(pending_exception), eax); - } - - // Clear the context pointer. - __ Set(esi, Immediate(0)); - - // Restore fp from handler and discard handler state. - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); - __ pop(ebp); - __ pop(edx); // State. - - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ ret(0); + __ ThrowUncatchable(type, eax); } @@ -6559,9 +6507,19 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, __ mov(untagged_key, key); __ SmiUntag(untagged_key); - // Verify that the receiver has pixel array elements. __ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset)); - __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true); + // By passing NULL as not_pixel_array, callers signal that they have already + // verified that the receiver has pixel array elements. + if (not_pixel_array != NULL) { + __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true); + } else { + if (FLAG_debug_code) { + // Map check should have already made sure that elements is a pixel array. + __ cmp(FieldOperand(elements, HeapObject::kMapOffset), + Immediate(Factory::pixel_array_map())); + __ Assert(equal, "Elements isn't a pixel array"); + } + } // Key must be in range. __ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset)); @@ -6575,6 +6533,90 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, } +// Stores an indexed element into a pixel array, clamping the stored value. +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register scratch1, + bool load_elements_from_receiver, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range) { + // Register use: + // receiver - holds the receiver and is unchanged unless the + // store succeeds. + // key - holds the key (must be a smi) and is unchanged. + // value - holds the value (must be a smi) and is unchanged. + // elements - holds the element object of the receiver on entry if + // load_elements_from_receiver is false, otherwise used + // internally to store the pixel arrays elements and + // external array pointer. + // + // receiver, key and value remain unmodified until it's guaranteed that the + // store will succeed. + Register external_pointer = elements; + Register untagged_key = scratch1; + Register untagged_value = receiver; // Only set once success guaranteed. + + // Fetch the receiver's elements if the caller hasn't already done so. + if (load_elements_from_receiver) { + __ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset)); + } + + // By passing NULL as not_pixel_array, callers signal that they have already + // verified that the receiver has pixel array elements. + if (not_pixel_array != NULL) { + __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true); + } else { + if (FLAG_debug_code) { + // Map check should have already made sure that elements is a pixel array. + __ cmp(FieldOperand(elements, HeapObject::kMapOffset), + Immediate(Factory::pixel_array_map())); + __ Assert(equal, "Elements isn't a pixel array"); + } + } + + // Some callers already have verified that the key is a smi. key_not_smi is + // set to NULL as a sentinel for that case. Otherwise, add an explicit check + // to ensure the key is a smi must be added. + if (key_not_smi != NULL) { + __ JumpIfNotSmi(key, key_not_smi); + } else { + if (FLAG_debug_code) { + __ AbortIfNotSmi(key); + } + } + + // Key must be a smi and it must be in range. + __ mov(untagged_key, key); + __ SmiUntag(untagged_key); + __ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset)); + __ j(above_equal, out_of_range); // unsigned check handles negative keys. + + // Value must be a smi. + __ JumpIfNotSmi(value, value_not_smi); + __ mov(untagged_value, value); + __ SmiUntag(untagged_value); + + { // Clamp the value to [0..255]. + NearLabel done; + __ test(untagged_value, Immediate(0xFFFFFF00)); + __ j(zero, &done); + __ setcc(negative, untagged_value); // 1 if negative, 0 if positive. + __ dec_b(untagged_value); // 0 if negative, 255 if positive. + __ bind(&done); + } + + __ mov(external_pointer, + FieldOperand(elements, PixelArray::kExternalPointerOffset)); + __ mov_b(Operand(external_pointer, untagged_key, times_1, 0), untagged_value); + __ ret(0); // Return value in eax. +} + + #undef __ } } // namespace v8::internal diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h index 2064574c..4a1119ab 100644 --- a/src/ia32/code-stubs-ia32.h +++ b/src/ia32/code-stubs-ia32.h @@ -45,8 +45,8 @@ class TranscendentalCacheStub: public CodeStub { UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits }; - explicit TranscendentalCacheStub(TranscendentalCache::Type type, - ArgumentType argument_type) + TranscendentalCacheStub(TranscendentalCache::Type type, + ArgumentType argument_type) : type_(type), argument_type_(argument_type) {} void Generate(MacroAssembler* masm); private: @@ -490,14 +490,14 @@ class NumberToStringStub: public CodeStub { }; -// Generate code the to load an element from a pixel array. The receiver is -// assumed to not be a smi and to have elements, the caller must guarantee this -// precondition. If the receiver does not have elements that are pixel arrays, -// the generated code jumps to not_pixel_array. If key is not a smi, then the -// generated code branches to key_not_smi. Callers can specify NULL for -// key_not_smi to signal that a smi check has already been performed on key so -// that the smi check is not generated . If key is not a valid index within the -// bounds of the pixel array, the generated code jumps to out_of_range. +// Generate code to load an element from a pixel array. The receiver is assumed +// to not be a smi and to have elements, the caller must guarantee this +// precondition. If key is not a smi, then the generated code branches to +// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi +// check has already been performed on key so that the smi check is not +// generated. If key is not a valid index within the bounds of the pixel array, +// the generated code jumps to out_of_range. receiver, key and elements are +// unchanged throughout the generated code sequence. void GenerateFastPixelArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -508,6 +508,28 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, Label* key_not_smi, Label* out_of_range); +// Generate code to store an element into a pixel array, clamping values between +// [0..255]. The receiver is assumed to not be a smi and to have elements, the +// caller must guarantee this precondition. If key is not a smi, then the +// generated code branches to key_not_smi. Callers can specify NULL for +// key_not_smi to signal that a smi check has already been performed on key so +// that the smi check is not generated. If the value is not a smi, the generated +// code will branch to value_not_smi. If the receiver doesn't have pixel array +// elements, the generated code will branch to not_pixel_array, unless +// not_pixel_array is NULL, in which case the caller must ensure that the +// receiver has pixel array elements. If key is not a valid index within the +// bounds of the pixel array, the generated code jumps to out_of_range. +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register scratch1, + bool load_elements_from_receiver, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range); } } // namespace v8::internal diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc index b977db87..3a2753d2 100644 --- a/src/ia32/codegen-ia32.cc +++ b/src/ia32/codegen-ia32.cc @@ -3526,7 +3526,8 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { frame_->EmitPush(esi); // The context is the first argument. frame_->EmitPush(Immediate(pairs)); frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0))); - Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3); + frame_->EmitPush(Immediate(Smi::FromInt(strict_mode_flag()))); + Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4); // Return value is ignored. } @@ -5259,7 +5260,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { // by initialization. value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); } else { - value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3); + frame_->Push(Smi::FromInt(strict_mode_flag())); + value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4); } // Storing a variable must keep the (new) value on the expression // stack. This is necessary for compiling chained assignment @@ -5360,10 +5362,20 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) { void CodeGenerator::VisitLiteral(Literal* node) { Comment cmnt(masm_, "[ Literal"); - if (in_safe_int32_mode()) { - frame_->PushUntaggedElement(node->handle()); + if (frame_->ConstantPoolOverflowed()) { + Result temp = allocator_->Allocate(); + ASSERT(temp.is_valid()); + if (in_safe_int32_mode()) { + temp.set_untagged_int32(true); + } + __ Set(temp.reg(), Immediate(node->handle())); + frame_->Push(&temp); } else { - frame_->Push(node->handle()); + if (in_safe_int32_mode()) { + frame_->PushUntaggedElement(node->handle()); + } else { + frame_->Push(node->handle()); + } } } @@ -5608,8 +5620,9 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { Load(property->key()); Load(property->value()); if (property->emit_store()) { + frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes // Ignore the result. - Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3); + Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4); } else { frame_->Drop(3); } @@ -8225,21 +8238,25 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { if (property != NULL) { Load(property->obj()); Load(property->key()); - Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2); + frame_->Push(Smi::FromInt(strict_mode_flag())); + Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3); frame_->Push(&answer); return; } Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); if (variable != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this()); Slot* slot = variable->AsSlot(); if (variable->is_global()) { LoadGlobal(); frame_->Push(variable->name()); + frame_->Push(Smi::FromInt(kNonStrictMode)); Result answer = frame_->InvokeBuiltin(Builtins::DELETE, - CALL_FUNCTION, 2); + CALL_FUNCTION, 3); frame_->Push(&answer); - return; } else if (slot != NULL && slot->type() == Slot::LOOKUP) { // Call the runtime to delete from the context holding the named @@ -8250,13 +8267,11 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { frame_->EmitPush(Immediate(variable->name())); Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2); frame_->Push(&answer); - return; + } else { + // Default: Result of deleting non-global, not dynamically + // introduced variables is false. + frame_->Push(Factory::false_value()); } - - // Default: Result of deleting non-global, not dynamically - // introduced variables is false. - frame_->Push(Factory::false_value()); - } else { // Default: Result of deleting expressions is true. Load(node->expression()); // may have side-effects @@ -8298,6 +8313,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { switch (op) { case Token::SUB: { __ neg(value.reg()); + frame_->Push(&value); if (node->no_negative_zero()) { // -MIN_INT is MIN_INT with the overflow flag set. unsafe_bailout_->Branch(overflow); @@ -8310,17 +8326,18 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { } case Token::BIT_NOT: { __ not_(value.reg()); + frame_->Push(&value); break; } case Token::ADD: { // Unary plus has no effect on int32 values. + frame_->Push(&value); break; } default: UNREACHABLE(); break; } - frame_->Push(&value); } else { Load(node->expression()); bool can_overwrite = node->expression()->ResultOverwriteAllowed(); @@ -9456,11 +9473,13 @@ class DeferredReferenceSetKeyedValue: public DeferredCode { DeferredReferenceSetKeyedValue(Register value, Register key, Register receiver, - Register scratch) + Register scratch, + StrictModeFlag strict_mode) : value_(value), key_(key), receiver_(receiver), - scratch_(scratch) { + scratch_(scratch), + strict_mode_(strict_mode) { set_comment("[ DeferredReferenceSetKeyedValue"); } @@ -9474,6 +9493,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode { Register receiver_; Register scratch_; Label patch_site_; + StrictModeFlag strict_mode_; }; @@ -9532,7 +9552,9 @@ void DeferredReferenceSetKeyedValue::Generate() { } // Call the IC stub. - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); __ call(ic, RelocInfo::CODE_TARGET); // The delta from the start of the map-compare instruction to the // test instruction. We use masm_-> directly here instead of the @@ -9894,7 +9916,8 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) { new DeferredReferenceSetKeyedValue(result.reg(), key.reg(), receiver.reg(), - tmp.reg()); + tmp.reg(), + strict_mode_flag()); // Check that the receiver is not a smi. __ test(receiver.reg(), Immediate(kSmiTagMask)); @@ -9949,7 +9972,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) { deferred->BindExit(); } else { - result = frame()->CallKeyedStoreIC(); + result = frame()->CallKeyedStoreIC(strict_mode_flag()); // Make sure that we do not have a test instruction after the // call. A test instruction after the call is used to // indicate that we have generated an inline version of the diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc index a646052e..5f4d9444 100644 --- a/src/ia32/deoptimizer-ia32.cc +++ b/src/ia32/deoptimizer-ia32.cc @@ -80,6 +80,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { Address prev_address = code_start_address; for (unsigned i = 0; i < table.length(); ++i) { Address curr_address = code_start_address + table.GetPcOffset(i); + ASSERT_GE(curr_address, prev_address); ZapCodeRange(prev_address, curr_address); SafepointEntry safepoint_entry = table.GetEntry(i); @@ -97,7 +98,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { RelocInfo::RUNTIME_ENTRY, reinterpret_cast<intptr_t>(deopt_entry)); reloc_info_writer.Write(&rinfo); - + ASSERT_GE(reloc_info_writer.pos(), + reloc_info->address() + ByteArray::kHeaderSize); curr_address += patch_size(); } prev_address = curr_address; @@ -137,39 +139,39 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { - Address call_target_address = pc_after - kPointerSize; - ASSERT(check_code->entry() == - Assembler::target_address_at(call_target_address)); - // The stack check code matches the pattern: - // - // cmp esp, <limit> - // jae ok - // call <stack guard> - // test eax, <loop nesting depth> - // ok: ... - // - // We will patch away the branch so the code is: - // - // cmp esp, <limit> ;; Not changed - // nop - // nop - // call <on-stack replacment> - // test eax, <loop nesting depth> - // ok: - ASSERT(*(call_target_address - 3) == 0x73 && // jae - *(call_target_address - 2) == 0x07 && // offset - *(call_target_address - 1) == 0xe8); // call - *(call_target_address - 3) = 0x90; // nop - *(call_target_address - 2) = 0x90; // nop - Assembler::set_target_address_at(call_target_address, - replacement_code->entry()); + Address call_target_address = pc_after - kIntSize; + ASSERT(check_code->entry() == + Assembler::target_address_at(call_target_address)); + // The stack check code matches the pattern: + // + // cmp esp, <limit> + // jae ok + // call <stack guard> + // test eax, <loop nesting depth> + // ok: ... + // + // We will patch away the branch so the code is: + // + // cmp esp, <limit> ;; Not changed + // nop + // nop + // call <on-stack replacment> + // test eax, <loop nesting depth> + // ok: + ASSERT(*(call_target_address - 3) == 0x73 && // jae + *(call_target_address - 2) == 0x07 && // offset + *(call_target_address - 1) == 0xe8); // call + *(call_target_address - 3) = 0x90; // nop + *(call_target_address - 2) = 0x90; // nop + Assembler::set_target_address_at(call_target_address, + replacement_code->entry()); } void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { - Address call_target_address = pc_after - kPointerSize; + Address call_target_address = pc_after - kIntSize; ASSERT(replacement_code->entry() == Assembler::target_address_at(call_target_address)); // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to @@ -429,14 +431,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, fp_value, output_offset, value); } - // The context can be gotten from the function so long as we don't - // optimize functions that need local contexts. + // For the bottommost output frame the context can be gotten from the input + // frame. For all subsequent output frames it can be gotten from the function + // so long as we don't inline functions that need local contexts. output_offset -= kPointerSize; input_offset -= kPointerSize; - value = reinterpret_cast<uint32_t>(function->context()); - // The context for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); + if (is_bottommost) { + value = input_->GetFrameSlot(input_offset); + } else { + value = reinterpret_cast<uint32_t>(function->context()); + } output_frame->SetFrameSlot(output_offset, value); if (is_topmost) output_frame->SetRegister(esi.code(), value); if (FLAG_trace_deopt) { diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index 18c9319c..9a7d41ad 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -322,22 +322,6 @@ void FullCodeGenerator::EmitReturnSequence() { } -FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand( - Token::Value op, Expression* left, Expression* right) { - ASSERT(ShouldInlineSmiCase(op)); - if (op == Token::DIV || op == Token::MOD || op == Token::MUL) { - // We never generate inlined constant smi operations for these. - return kNoConstants; - } else if (right->IsSmiLiteral()) { - return kRightConstant; - } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) { - return kLeftConstant; - } else { - return kNoConstants; - } -} - - void FullCodeGenerator::EffectContext::Plug(Slot* slot) const { } @@ -547,7 +531,7 @@ void FullCodeGenerator::DoTest(Label* if_true, __ j(equal, if_true); __ cmp(result_register(), Factory::false_value()); __ j(equal, if_false); - ASSERT_EQ(0, kSmiTag); + STATIC_ASSERT(kSmiTag == 0); __ test(result_register(), Operand(result_register())); __ j(zero, if_false); __ test(result_register(), Immediate(kSmiTagMask)); @@ -654,6 +638,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, ASSERT(variable != NULL); // Must have been resolved. Slot* slot = variable->AsSlot(); Property* prop = variable->AsProperty(); + if (slot != NULL) { switch (slot->type()) { case Slot::PARAMETER: @@ -739,7 +724,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, prop->key()->AsLiteral()->handle()->IsSmi()); __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle())); - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin(is_strict() + ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); } } @@ -756,7 +743,8 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { __ push(esi); // The context is the first argument. __ push(Immediate(pairs)); __ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0))); - __ CallRuntime(Runtime::kDeclareGlobals, 3); + __ push(Immediate(Smi::FromInt(strict_mode_flag()))); + __ CallRuntime(Runtime::kDeclareGlobals, 4); // Return value is ignored. } @@ -813,7 +801,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { SetSourcePosition(clause->position()); Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); EmitCallIC(ic, &patch_site); - __ test(eax, Operand(eax)); __ j(not_equal, &next_test); __ Drop(1); // Switch value is no longer needed. @@ -894,7 +881,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ cmp(edx, Factory::empty_descriptor_array()); __ j(equal, &call_runtime); - // Check that there in an enum cache in the non-empty instance + // Check that there is an enum cache in the non-empty instance // descriptors (edx). This is the case if the next enumeration // index field does not contain a smi. __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset)); @@ -1379,7 +1366,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { VisitForAccumulatorValue(value); __ mov(ecx, Immediate(key->handle())); __ mov(edx, Operand(esp, 0)); - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(key->id(), NO_REGISTERS); } else { @@ -1393,7 +1382,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { VisitForStackValue(key); VisitForStackValue(value); if (property->emit_store()) { - __ CallRuntime(Runtime::kSetProperty, 3); + __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes + __ CallRuntime(Runtime::kSetProperty, 4); } else { __ Drop(3); } @@ -1571,14 +1561,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } Token::Value op = expr->binary_op(); - ConstantOperand constant = ShouldInlineSmiCase(op) - ? GetConstantOperand(op, expr->target(), expr->value()) - : kNoConstants; - ASSERT(constant == kRightConstant || constant == kNoConstants); - if (constant == kNoConstants) { - __ push(eax); // Left operand goes on the stack. - VisitForAccumulatorValue(expr->value()); - } + __ push(eax); // Left operand goes on the stack. + VisitForAccumulatorValue(expr->value()); OverwriteMode mode = expr->value()->ResultOverwriteAllowed() ? OVERWRITE_RIGHT @@ -1590,8 +1574,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { op, mode, expr->target(), - expr->value(), - constant); + expr->value()); } else { EmitBinaryOp(op, mode); } @@ -1639,214 +1622,11 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { } -void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr, - OverwriteMode mode, - bool left_is_constant_smi, - Smi* value) { - NearLabel call_stub, done; - __ add(Operand(eax), Immediate(value)); - __ j(overflow, &call_stub); - JumpPatchSite patch_site(masm_); - patch_site.EmitJumpIfSmi(eax, &done); - - // Undo the optimistic add operation and call the shared stub. - __ bind(&call_stub); - __ sub(Operand(eax), Immediate(value)); - Token::Value op = Token::ADD; - TypeRecordingBinaryOpStub stub(op, mode); - if (left_is_constant_smi) { - __ mov(edx, Immediate(value)); - } else { - __ mov(edx, eax); - __ mov(eax, Immediate(value)); - } - EmitCallIC(stub.GetCode(), &patch_site); - - __ bind(&done); - context()->Plug(eax); -} - - -void FullCodeGenerator::EmitConstantSmiSub(Expression* expr, - OverwriteMode mode, - bool left_is_constant_smi, - Smi* value) { - NearLabel call_stub, done; - if (left_is_constant_smi) { - __ mov(ecx, eax); - __ mov(eax, Immediate(value)); - __ sub(Operand(eax), ecx); - } else { - __ sub(Operand(eax), Immediate(value)); - } - __ j(overflow, &call_stub); - JumpPatchSite patch_site(masm_); - patch_site.EmitJumpIfSmi(eax, &done); - - __ bind(&call_stub); - if (left_is_constant_smi) { - __ mov(edx, Immediate(value)); - __ mov(eax, ecx); - } else { - __ add(Operand(eax), Immediate(value)); // Undo the subtraction. - __ mov(edx, eax); - __ mov(eax, Immediate(value)); - } - Token::Value op = Token::SUB; - TypeRecordingBinaryOpStub stub(op, mode); - EmitCallIC(stub.GetCode(), &patch_site); - - __ bind(&done); - context()->Plug(eax); -} - - -void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr, - Token::Value op, - OverwriteMode mode, - Smi* value) { - NearLabel call_stub, smi_case, done; - int shift_value = value->value() & 0x1f; - - JumpPatchSite patch_site(masm_); - patch_site.EmitJumpIfSmi(eax, &smi_case); - - // Call stub. - __ bind(&call_stub); - __ mov(edx, eax); - __ mov(eax, Immediate(value)); - TypeRecordingBinaryOpStub stub(op, mode); - EmitCallIC(stub.GetCode(), &patch_site); - __ jmp(&done); - - // Smi case. - __ bind(&smi_case); - switch (op) { - case Token::SHL: - if (shift_value != 0) { - __ mov(edx, eax); - if (shift_value > 1) { - __ shl(edx, shift_value - 1); - } - // Convert int result to smi, checking that it is in int range. - ASSERT(kSmiTagSize == 1); // Adjust code if not the case. - __ add(edx, Operand(edx)); - __ j(overflow, &call_stub); - __ mov(eax, edx); // Put result back into eax. - } - break; - case Token::SAR: - if (shift_value != 0) { - __ sar(eax, shift_value); - __ and_(eax, ~kSmiTagMask); - } - break; - case Token::SHR: - if (shift_value < 2) { - __ mov(edx, eax); - __ SmiUntag(edx); - __ shr(edx, shift_value); - __ test(edx, Immediate(0xc0000000)); - __ j(not_zero, &call_stub); - __ SmiTag(edx); - __ mov(eax, edx); // Put result back into eax. - } else { - __ SmiUntag(eax); - __ shr(eax, shift_value); - __ SmiTag(eax); - } - break; - default: - UNREACHABLE(); - } - - __ bind(&done); - context()->Plug(eax); -} - - -void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr, - Token::Value op, - OverwriteMode mode, - Smi* value) { - NearLabel smi_case, done; - - JumpPatchSite patch_site(masm_); - patch_site.EmitJumpIfSmi(eax, &smi_case); - - // The order of the arguments does not matter for bit-ops with a - // constant operand. - __ mov(edx, Immediate(value)); - TypeRecordingBinaryOpStub stub(op, mode); - EmitCallIC(stub.GetCode(), &patch_site); - __ jmp(&done); - - // Smi case. - __ bind(&smi_case); - switch (op) { - case Token::BIT_OR: - __ or_(Operand(eax), Immediate(value)); - break; - case Token::BIT_XOR: - __ xor_(Operand(eax), Immediate(value)); - break; - case Token::BIT_AND: - __ and_(Operand(eax), Immediate(value)); - break; - default: - UNREACHABLE(); - } - - __ bind(&done); - context()->Plug(eax); -} - - -void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr, - Token::Value op, - OverwriteMode mode, - bool left_is_constant_smi, - Smi* value) { - switch (op) { - case Token::BIT_OR: - case Token::BIT_XOR: - case Token::BIT_AND: - EmitConstantSmiBitOp(expr, op, mode, value); - break; - case Token::SHL: - case Token::SAR: - case Token::SHR: - ASSERT(!left_is_constant_smi); - EmitConstantSmiShiftOp(expr, op, mode, value); - break; - case Token::ADD: - EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value); - break; - case Token::SUB: - EmitConstantSmiSub(expr, mode, left_is_constant_smi, value); - break; - default: - UNREACHABLE(); - } -} - - void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, Token::Value op, OverwriteMode mode, Expression* left, - Expression* right, - ConstantOperand constant) { - if (constant == kRightConstant) { - Smi* value = Smi::cast(*right->AsLiteral()->handle()); - EmitConstantSmiBinaryOp(expr, op, mode, false, value); - return; - } else if (constant == kLeftConstant) { - Smi* value = Smi::cast(*left->AsLiteral()->handle()); - EmitConstantSmiBinaryOp(expr, op, mode, true, value); - return; - } - + Expression* right) { // Do combined smi check of the operands. Left operand is on the // stack. Right operand is in eax. NearLabel done, smi_case, stub_call; @@ -1978,18 +1758,32 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { __ mov(edx, eax); __ pop(eax); // Restore value. __ mov(ecx, prop->key()->AsLiteral()->handle()); - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); break; } case KEYED_PROPERTY: { __ push(eax); // Preserve value. - VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - __ mov(ecx, eax); - __ pop(edx); + if (prop->is_synthetic()) { + ASSERT(prop->obj()->AsVariableProxy() != NULL); + ASSERT(prop->key()->AsLiteral() != NULL); + { AccumulatorValueContext for_object(this); + EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + } + __ mov(edx, eax); + __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle())); + } else { + VisitForStackValue(prop->obj()); + VisitForAccumulatorValue(prop->key()); + __ mov(ecx, eax); + __ pop(edx); + } __ pop(eax); // Restore value. - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); break; } @@ -2084,7 +1878,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ push(eax); // Value. __ push(esi); // Context. __ push(Immediate(var->name())); - __ CallRuntime(Runtime::kStoreContextSlot, 3); + __ push(Immediate(Smi::FromInt(strict_mode_flag()))); + __ CallRuntime(Runtime::kStoreContextSlot, 4); break; } } @@ -2115,7 +1910,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { } else { __ pop(edx); } - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. @@ -2153,7 +1950,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { } // Record source code position before IC call. SetSourcePosition(expr->position()); - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. @@ -2266,6 +2065,27 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) { } +void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, + int arg_count) { + // Push copy of the first argument or undefined if it doesn't exist. + if (arg_count > 0) { + __ push(Operand(esp, arg_count * kPointerSize)); + } else { + __ push(Immediate(Factory::undefined_value())); + } + + // Push the receiver of the enclosing function. + __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize)); + + // Push the strict mode flag. + __ push(Immediate(Smi::FromInt(strict_mode_flag()))); + + __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP + ? Runtime::kResolvePossiblyDirectEvalNoLookup + : Runtime::kResolvePossiblyDirectEval, 4); +} + + void FullCodeGenerator::VisitCall(Call* expr) { #ifdef DEBUG // We want to verify that RecordJSReturnSite gets called on all paths @@ -2294,21 +2114,30 @@ void FullCodeGenerator::VisitCall(Call* expr) { VisitForStackValue(args->at(i)); } - // Push copy of the function - found below the arguments. - __ push(Operand(esp, (arg_count + 1) * kPointerSize)); - - // Push copy of the first argument or undefined if it doesn't exist. - if (arg_count > 0) { - __ push(Operand(esp, arg_count * kPointerSize)); - } else { - __ push(Immediate(Factory::undefined_value())); + // If we know that eval can only be shadowed by eval-introduced + // variables we attempt to load the global eval function directly + // in generated code. If we succeed, there is no need to perform a + // context lookup in the runtime system. + Label done; + if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) { + Label slow; + EmitLoadGlobalSlotCheckExtensions(var->AsSlot(), + NOT_INSIDE_TYPEOF, + &slow); + // Push the function and resolve eval. + __ push(eax); + EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count); + __ jmp(&done); + __ bind(&slow); } - // Push the receiver of the enclosing function and do runtime call. - __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize)); - // Push the strict mode flag. - __ push(Immediate(Smi::FromInt(strict_mode_flag()))); - __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); + // Push copy of the function (found below the arguments) and + // resolve eval. + __ push(Operand(esp, (arg_count + 1) * kPointerSize)); + EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count); + if (done.is_linked()) { + __ bind(&done); + } // The runtime call returns a pair of values in eax (function) and // edx (receiver). Touch up the stack with the right values. @@ -2373,7 +2202,9 @@ void FullCodeGenerator::VisitCall(Call* expr) { Literal* key = prop->key()->AsLiteral(); if (key != NULL && key->handle()->IsSymbol()) { // Call to a named property, use call IC. - VisitForStackValue(prop->obj()); + { PreservePositionScope scope(masm()->positions_recorder()); + VisitForStackValue(prop->obj()); + } EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET); } else { // Call to a keyed property. @@ -3384,7 +3215,6 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); - VisitForAccumulatorValue(args->at(0)); if (FLAG_debug_code) { @@ -3400,7 +3230,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { Label bailout, done, one_char_separator, long_separator, - non_trivial_array, not_size_one_array, loop, loop_condition, + non_trivial_array, not_size_one_array, loop, loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry; ASSERT(args->length() == 2); @@ -3442,7 +3272,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { // If the array has length zero, return the empty string. __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset)); - __ sar(array_length, 1); + __ SmiUntag(array_length); __ j(not_zero, &non_trivial_array); __ mov(result_operand, Factory::empty_string()); __ jmp(&done); @@ -3465,14 +3295,15 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { // Loop condition: while (index < length). // Live loop registers: index, array_length, string, // scratch, string_length, elements. - __ jmp(&loop_condition); + if (FLAG_debug_code) { + __ cmp(index, Operand(array_length)); + __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin"); + } __ bind(&loop); - __ cmp(index, Operand(array_length)); - __ j(greater_equal, &done); - - __ mov(string, FieldOperand(elements, index, - times_pointer_size, - FixedArray::kHeaderSize)); + __ mov(string, FieldOperand(elements, + index, + times_pointer_size, + FixedArray::kHeaderSize)); __ test(string, Immediate(kSmiTagMask)); __ j(zero, &bailout); __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset)); @@ -3485,7 +3316,6 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { FieldOperand(string, SeqAsciiString::kLengthOffset)); __ j(overflow, &bailout); __ add(Operand(index), Immediate(1)); - __ bind(&loop_condition); __ cmp(index, Operand(array_length)); __ j(less, &loop); @@ -3514,7 +3344,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); __ and_(scratch, Immediate( kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask)); - __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag); + __ cmp(scratch, ASCII_STRING_TYPE); __ j(not_equal, &bailout); // Add (separator length times array_length) - separator length @@ -3711,19 +3541,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { Comment cmnt(masm_, "[ UnaryOperation (DELETE)"); Property* prop = expr->expression()->AsProperty(); Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); - if (prop == NULL && var == NULL) { - // Result of deleting non-property, non-variable reference is true. - // The subexpression may have side effects. - VisitForEffect(expr->expression()); - context()->Plug(true); - } else if (var != NULL && - !var->is_global() && - var->AsSlot() != NULL && - var->AsSlot()->type() != Slot::LOOKUP) { - // Result of deleting non-global, non-dynamic variables is false. - // The subexpression does not have side effects. - context()->Plug(false); - } else if (prop != NULL) { + + if (prop != NULL) { if (prop->is_synthetic()) { // Result of deleting parameters is false, even when they rewrite // to accesses on the arguments object. @@ -3731,21 +3550,38 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { } else { VisitForStackValue(prop->obj()); VisitForStackValue(prop->key()); + __ push(Immediate(Smi::FromInt(strict_mode_flag()))); __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); context()->Plug(eax); } - } else if (var->is_global()) { - __ push(GlobalObjectOperand()); - __ push(Immediate(var->name())); - __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); - context()->Plug(eax); + } else if (var != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this()); + if (var->is_global()) { + __ push(GlobalObjectOperand()); + __ push(Immediate(var->name())); + __ push(Immediate(Smi::FromInt(kNonStrictMode))); + __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); + context()->Plug(eax); + } else if (var->AsSlot() != NULL && + var->AsSlot()->type() != Slot::LOOKUP) { + // Result of deleting non-global, non-dynamic variables is false. + // The subexpression does not have side effects. + context()->Plug(false); + } else { + // Non-global variable. Call the runtime to try to delete from the + // context where the variable was introduced. + __ push(context_register()); + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kDeleteContextSlot, 2); + context()->Plug(eax); + } } else { - // Non-global variable. Call the runtime to try to delete from the - // context where the variable was introduced. - __ push(context_register()); - __ push(Immediate(var->name())); - __ CallRuntime(Runtime::kDeleteContextSlot, 2); - context()->Plug(eax); + // Result of deleting non-property, non-variable reference is true. + // The subexpression may have side effects. + VisitForEffect(expr->expression()); + context()->Plug(true); } break; } @@ -3759,17 +3595,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::NOT: { Comment cmnt(masm_, "[ UnaryOperation (NOT)"); - - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - // Notice that the labels are swapped. - context()->PrepareTest(&materialize_true, &materialize_false, - &if_false, &if_true, &fall_through); - if (context()->IsTest()) ForwardBailoutToChild(expr); - VisitForControl(expr->expression(), if_true, if_false, fall_through); - context()->Plug(if_false, if_true); // Labels swapped. + if (context()->IsEffect()) { + // Unary NOT has no side effects so it's only necessary to visit the + // subexpression. Match the optimizing compiler by not branching. + VisitForEffect(expr->expression()); + } else { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + + // Notice that the labels are swapped. + context()->PrepareTest(&materialize_true, &materialize_false, + &if_false, &if_true, &fall_through); + if (context()->IsTest()) ForwardBailoutToChild(expr); + VisitForControl(expr->expression(), if_true, if_false, fall_through); + context()->Plug(if_false, if_true); // Labels swapped. + } break; } @@ -3995,7 +3837,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case NAMED_PROPERTY: { __ mov(ecx, prop->key()->AsLiteral()->handle()); __ pop(edx); - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -4010,7 +3854,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case KEYED_PROPERTY: { __ pop(ecx); __ pop(edx); - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -4090,21 +3936,18 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op, PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); if (check->Equals(Heap::number_symbol())) { - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, if_true); + __ JumpIfSmi(eax, if_true); __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map()); Split(equal, if_true, if_false, fall_through); } else if (check->Equals(Heap::string_symbol())) { - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, if_false); + __ JumpIfSmi(eax, if_false); + __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx); + __ j(above_equal, if_false); // Check for undetectable objects => false. - __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset)); - __ test(ecx, Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, if_false); - __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE); - Split(below, if_true, if_false, fall_through); + __ test_b(FieldOperand(edx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + Split(zero, if_true, if_false, fall_through); } else if (check->Equals(Heap::boolean_symbol())) { __ cmp(eax, Factory::true_value()); __ j(equal, if_true); @@ -4113,39 +3956,28 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op, } else if (check->Equals(Heap::undefined_symbol())) { __ cmp(eax, Factory::undefined_value()); __ j(equal, if_true); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, if_false); + __ JumpIfSmi(eax, if_false); // Check for undetectable objects => true. __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset)); __ test(ecx, Immediate(1 << Map::kIsUndetectable)); Split(not_zero, if_true, if_false, fall_through); } else if (check->Equals(Heap::function_symbol())) { - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, if_false); - __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx); - __ j(equal, if_true); - // Regular expressions => 'function' (they are callable). - __ CmpInstanceType(edx, JS_REGEXP_TYPE); - Split(equal, if_true, if_false, fall_through); + __ JumpIfSmi(eax, if_false); + __ CmpObjectType(eax, FIRST_FUNCTION_CLASS_TYPE, edx); + Split(above_equal, if_true, if_false, fall_through); } else if (check->Equals(Heap::object_symbol())) { - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, if_false); + __ JumpIfSmi(eax, if_false); __ cmp(eax, Factory::null_value()); __ j(equal, if_true); - // Regular expressions => 'function', not 'object'. - __ CmpObjectType(eax, JS_REGEXP_TYPE, edx); - __ j(equal, if_false); + __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edx); + __ j(below, if_false); + __ CmpInstanceType(edx, FIRST_FUNCTION_CLASS_TYPE); + __ j(above_equal, if_false); // Check for undetectable objects => false. - __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset)); - __ test(ecx, Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, if_false); - // Check for JS objects => true. - __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset)); - __ cmp(ecx, FIRST_JS_OBJECT_TYPE); - __ j(less, if_false); - __ cmp(ecx, LAST_JS_OBJECT_TYPE); - Split(less_equal, if_true, if_false, fall_through); + __ test_b(FieldOperand(edx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + Split(zero, if_true, if_false, fall_through); } else { if (if_false != fall_through) __ jmp(if_false); } @@ -4358,6 +4190,22 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) { void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) { + switch (ic->kind()) { + case Code::LOAD_IC: + __ IncrementCounter(&Counters::named_load_full, 1); + break; + case Code::KEYED_LOAD_IC: + __ IncrementCounter(&Counters::keyed_load_full, 1); + break; + case Code::STORE_IC: + __ IncrementCounter(&Counters::named_store_full, 1); + break; + case Code::KEYED_STORE_IC: + __ IncrementCounter(&Counters::keyed_store_full, 1); + default: + break; + } + __ call(ic, RelocInfo::CODE_TARGET); if (patch_site != NULL && patch_site->is_bound()) { patch_site->EmitPatchInfo(); diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc index 76681ce1..6b9e7496 100644 --- a/src/ia32/ic-ia32.cc +++ b/src/ia32/ic-ia32.cc @@ -108,6 +108,9 @@ static void GenerateStringDictionaryProbes(MacroAssembler* masm, Register name, Register r0, Register r1) { + // Assert that name contains a string. + if (FLAG_debug_code) __ AbortIfNotString(name); + // Compute the capacity mask. const int kCapacityOffset = StringDictionary::kHeaderSize + @@ -758,7 +761,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { } -void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { +void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key @@ -798,7 +802,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // Slow case: call runtime. __ bind(&slow); - GenerateRuntimeSetProperty(masm); + GenerateRuntimeSetProperty(masm, strict_mode); // Check whether the elements is a pixel array. __ bind(&check_pixel_array); @@ -806,28 +810,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // ecx: key (a smi) // edx: receiver // edi: elements array - __ CheckMap(edi, Factory::pixel_array_map(), &slow, true); - // Check that the value is a smi. If a conversion is needed call into the - // runtime to convert and clamp. - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &slow); - __ mov(ebx, ecx); - __ SmiUntag(ebx); - __ cmp(ebx, FieldOperand(edi, PixelArray::kLengthOffset)); - __ j(above_equal, &slow); - __ mov(ecx, eax); // Save the value. Key is not longer needed. - __ SmiUntag(ecx); - { // Clamp the value to [0..255]. - Label done; - __ test(ecx, Immediate(0xFFFFFF00)); - __ j(zero, &done); - __ setcc(negative, ecx); // 1 if negative, 0 if positive. - __ dec_b(ecx); // 0 if negative, 255 if positive. - __ bind(&done); - } - __ mov(edi, FieldOperand(edi, PixelArray::kExternalPointerOffset)); - __ mov_b(Operand(edi, ebx, times_1, 0), ecx); - __ ret(0); // Return value in eax. + GenerateFastPixelArrayStore(masm, + edx, + ecx, + eax, + edi, + ebx, + false, + NULL, + &slow, + &slow, + &slow); // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one @@ -1208,7 +1201,14 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { // -- esp[(argc + 1) * 4] : receiver // ----------------------------------- + // Check if the name is a string. + Label miss; + __ test(ecx, Immediate(kSmiTagMask)); + __ j(zero, &miss); + Condition cond = masm->IsObjectStringType(ecx, eax, eax); + __ j(NegateCondition(cond), &miss); GenerateCallNormal(masm, argc); + __ bind(&miss); GenerateMiss(masm, argc); } @@ -1489,7 +1489,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { void StoreIC::GenerateMegamorphic(MacroAssembler* masm, - Code::ExtraICState extra_ic_state) { + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : name @@ -1500,7 +1500,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm, Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, NOT_IN_LOOP, MONOMORPHIC, - extra_ic_state); + strict_mode); StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg); // Cache miss: Jump to runtime. @@ -1618,7 +1618,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { } -void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { +void StoreIC::GenerateGlobalProxy(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : name @@ -1629,14 +1630,17 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { __ push(edx); __ push(ecx); __ push(eax); - __ push(ebx); + __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes + __ push(Immediate(Smi::FromInt(strict_mode))); + __ push(ebx); // return address // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); } -void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { +void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key @@ -1648,10 +1652,12 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { __ push(edx); __ push(ecx); __ push(eax); - __ push(ebx); + __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes + __ push(Immediate(Smi::FromInt(strict_mode))); // Strict mode. + __ push(ebx); // return address // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); } diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index 7724f1b8..9dcca9ee 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -43,13 +43,20 @@ class SafepointGenerator : public PostCallGenerator { public: SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers, - int deoptimization_index) + int deoptimization_index, + bool ensure_reloc_space = false) : codegen_(codegen), pointers_(pointers), - deoptimization_index_(deoptimization_index) { } + deoptimization_index_(deoptimization_index), + ensure_reloc_space_(ensure_reloc_space) { } virtual ~SafepointGenerator() { } virtual void Generate() { + // Ensure that we have enough space in the reloc info to patch + // this with calls when doing deoptimization. + if (ensure_reloc_space_) { + codegen_->EnsureRelocSpaceForDeoptimization(); + } codegen_->RecordSafepoint(pointers_, deoptimization_index_); } @@ -57,6 +64,7 @@ class SafepointGenerator : public PostCallGenerator { LCodeGen* codegen_; LPointerMap* pointers_; int deoptimization_index_; + bool ensure_reloc_space_; }; @@ -70,6 +78,7 @@ bool LCodeGen::GenerateCode() { return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && + GenerateRelocPadding() && GenerateSafepointTable(); } @@ -84,8 +93,8 @@ void LCodeGen::FinishCode(Handle<Code> code) { void LCodeGen::Abort(const char* format, ...) { if (FLAG_trace_bailout) { - SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); - PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name); + SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); + PrintF("Aborting LCodeGen in @\"%s\": ", *name); va_list arguments; va_start(arguments, format); OS::VPrint(format, arguments); @@ -114,6 +123,16 @@ void LCodeGen::Comment(const char* format, ...) { } +bool LCodeGen::GenerateRelocPadding() { + int reloc_size = masm()->relocation_writer_size(); + while (reloc_size < deoptimization_reloc_size.min_size) { + __ RecordComment(RelocInfo::kFillerCommentString, true); + reloc_size += RelocInfo::kMinRelocCommentSize; + } + return !is_aborted(); +} + + bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -155,6 +174,45 @@ bool LCodeGen::GeneratePrologue() { } } + // Possibly allocate a local context. + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment(";;; Allocate local context"); + // Argument to NewContext is the function, which is still in edi. + __ push(edi); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kNewContext, 1); + } + RecordSafepoint(Safepoint::kNoDeoptimizationIndex); + // Context is returned in both eax and esi. It replaces the context + // passed to us. It's saved in the stack and kept live in esi. + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); + + // Copy parameters into context if necessary. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Slot* slot = scope()->parameter(i)->AsSlot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ mov(eax, Operand(ebp, parameter_offset)); + // Store it in the context. + int context_offset = Context::SlotOffset(slot->index()); + __ mov(Operand(esi, context_offset), eax); + // Update the write barrier. This clobbers all involved + // registers, so we have to use a third register to avoid + // clobbering esi. + __ mov(ecx, esi); + __ RecordWrite(ecx, context_offset, eax, ebx); + } + } + Comment(";;; End allocate local context"); + } + // Trace the call. if (FLAG_trace) { // We have not executed any compiled code yet, so esi still holds the @@ -327,6 +385,22 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, } +void LCodeGen::EnsureRelocSpaceForDeoptimization() { + // Since we patch the reloc info with RUNTIME_ENTRY calls every patch + // site will take up 2 bytes + any pc-jumps. + // We are conservative and always reserver 6 bytes in case where a + // simple pc-jump is not enough. + uint32_t pc_delta = + masm()->pc_offset() - deoptimization_reloc_size.last_pc_offset; + if (is_uintn(pc_delta, 6)) { + deoptimization_reloc_size.min_size += 2; + } else { + deoptimization_reloc_size.min_size += 6; + } + deoptimization_reloc_size.last_pc_offset = masm()->pc_offset(); +} + + void LCodeGen::AddToTranslation(Translation* translation, LOperand* op, bool is_tagged) { @@ -374,10 +448,13 @@ void LCodeGen::CallCode(Handle<Code> code, ASSERT(instr != NULL); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); + if (!adjusted) { __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); } __ call(code, mode); + + EnsureRelocSpaceForDeoptimization(); RegisterLazyDeoptimization(instr); // Signal that we don't inline smi code before these stubs in the @@ -511,7 +588,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { Handle<DeoptimizationInputData> data = Factory::NewDeoptimizationInputData(length, TENURED); - data->SetTranslationByteArray(*translations_.CreateByteArray()); + Handle<ByteArray> translations = translations_.CreateByteArray(); + data->SetTranslationByteArray(*translations); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); Handle<FixedArray> literals = @@ -587,6 +665,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, } +void LCodeGen::RecordSafepoint(int deoptimization_index) { + LPointerMap empty_pointers(RelocInfo::kNoPosition); + RecordSafepoint(&empty_pointers, deoptimization_index); +} + + void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index) { @@ -1064,35 +1148,36 @@ void LCodeGen::DoAddI(LAddI* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - LOperand* left = instr->InputAt(0); - LOperand* right = instr->InputAt(1); + XMMRegister left = ToDoubleRegister(instr->InputAt(0)); + XMMRegister right = ToDoubleRegister(instr->InputAt(1)); + XMMRegister result = ToDoubleRegister(instr->result()); // Modulo uses a fixed result register. - ASSERT(instr->op() == Token::MOD || left->Equals(instr->result())); + ASSERT(instr->op() == Token::MOD || left.is(result)); switch (instr->op()) { case Token::ADD: - __ addsd(ToDoubleRegister(left), ToDoubleRegister(right)); + __ addsd(left, right); break; case Token::SUB: - __ subsd(ToDoubleRegister(left), ToDoubleRegister(right)); + __ subsd(left, right); break; case Token::MUL: - __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right)); + __ mulsd(left, right); break; case Token::DIV: - __ divsd(ToDoubleRegister(left), ToDoubleRegister(right)); + __ divsd(left, right); break; case Token::MOD: { // Pass two doubles as arguments on the stack. __ PrepareCallCFunction(4, eax); - __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left)); - __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right)); + __ movdbl(Operand(esp, 0 * kDoubleSize), left); + __ movdbl(Operand(esp, 1 * kDoubleSize), right); __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4); // Return value is in st(0) on ia32. // Store it into the (fixed) result register. __ sub(Operand(esp), Immediate(kDoubleSize)); __ fstp_d(Operand(esp, 0)); - __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0)); + __ movdbl(result, Operand(esp, 0)); __ add(Operand(esp), Immediate(kDoubleSize)); break; } @@ -1569,6 +1654,19 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { } +void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + + if (FLAG_debug_code) { + __ AbortIfNotString(input); + } + + __ mov(result, FieldOperand(input, String::kHashFieldOffset)); + __ IndexFromHash(result, result); +} + + void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { Register input = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); @@ -1578,7 +1676,7 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { __ test(FieldOperand(input, String::kHashFieldOffset), Immediate(String::kContainsCachedArrayIndexMask)); NearLabel done; - __ j(not_zero, &done); + __ j(zero, &done); __ mov(result, Factory::false_value()); __ bind(&done); } @@ -1593,7 +1691,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch( __ test(FieldOperand(input, String::kHashFieldOffset), Immediate(String::kContainsCachedArrayIndexMask)); - EmitBranch(true_block, false_block, not_equal); + EmitBranch(true_block, false_block, equal); } @@ -1764,11 +1862,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { Register object = ToRegister(instr->InputAt(0)); Register temp = ToRegister(instr->TempAt(0)); - // A Smi is not instance of anything. + // A Smi is not an instance of anything. __ test(object, Immediate(kSmiTagMask)); __ j(zero, &false_result, not_taken); - // This is the inlined call site instanceof cache. The two occourences of the + // This is the inlined call site instanceof cache. The two occurences of the // hole value will be patched to the last map/result pair generated by the // instanceof stub. NearLabel cache_miss; @@ -1780,10 +1878,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { __ mov(eax, Factory::the_hole_value()); // Patched to either true or false. __ jmp(&done); - // The inlined call site cache did not match. Check null and string before - // calling the deferred code. + // The inlined call site cache did not match. Check for null and string + // before calling the deferred code. __ bind(&cache_miss); - // Null is not instance of anything. + // Null is not an instance of anything. __ cmp(object, Factory::null_value()); __ j(equal, &false_result); @@ -1825,19 +1923,11 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, __ mov(InstanceofStub::right(), Immediate(instr->function())); static const int kAdditionalDelta = 16; int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; - Label before_push_delta; - __ bind(&before_push_delta); __ mov(temp, Immediate(delta)); - __ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp); - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ call(stub.GetCode(), RelocInfo::CODE_TARGET); - ASSERT_EQ(kAdditionalDelta, - masm_->SizeOfCodeGeneratedSince(&before_push_delta)); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); + __ StoreToSafepointRegisterSlot(temp, temp); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false); // Put the result value into the eax slot and restore all registers. - __ mov(Operand(esp, EspIndexForPushAll(eax) * kPointerSize), eax); - + __ StoreToSafepointRegisterSlot(eax, eax); __ PopSafepointRegisters(); } @@ -2092,13 +2182,13 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) { - Register external_elements = ToRegister(instr->external_pointer()); + Register external_pointer = ToRegister(instr->external_pointer()); Register key = ToRegister(instr->key()); Register result = ToRegister(instr->result()); - ASSERT(result.is(external_elements)); + ASSERT(result.is(external_pointer)); // Load the result. - __ movzx_b(result, Operand(external_elements, key, times_1, 0)); + __ movzx_b(result, Operand(external_pointer, key, times_1, 0)); } @@ -2221,7 +2311,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { RegisterEnvironmentForDeoptimization(env); SafepointGenerator safepoint_generator(this, pointers, - env->deoptimization_index()); + env->deoptimization_index(), + true); v8::internal::ParameterCount actual(eax); __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator); } @@ -2270,7 +2361,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, LInstruction* instr) { // Change context if needed. bool change_context = - (graph()->info()->closure()->context() != function->context()) || + (info()->closure()->context() != function->context()) || scope()->contains_with() || (scope()->num_heap_slots() > 0); if (change_context) { @@ -2289,10 +2380,11 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, RecordPosition(pointers->position()); // Invoke function. - if (*function == *graph()->info()->closure()) { + if (*function == *info()->closure()) { __ CallSelf(); } else { __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); + EnsureRelocSpaceForDeoptimization(); } // Setup deoptimization. @@ -2347,7 +2439,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { if (!tmp.is(eax)) __ mov(tmp, eax); // Restore input_reg after call to runtime. - __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize)); + __ LoadFromSafepointRegisterSlot(input_reg, input_reg); __ bind(&allocated); __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); @@ -2355,7 +2447,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); - __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp); + __ StoreToSafepointRegisterSlot(input_reg, tmp); __ bind(&done); __ PopSafepointRegisters(); @@ -2480,11 +2572,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { XMMRegister xmm_scratch = xmm0; XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); - ExternalReference negative_infinity = - ExternalReference::address_of_negative_infinity(); - __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity)); - __ ucomisd(xmm_scratch, input_reg); - DeoptimizeIf(equal, instr->environment()); __ xorpd(xmm_scratch, xmm_scratch); __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. __ sqrtsd(input_reg, input_reg); @@ -2707,7 +2794,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { ASSERT(ToRegister(instr->value()).is(eax)); __ mov(ecx, instr->name()); - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + info_->is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -2718,6 +2807,25 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { } +void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) { + Register external_pointer = ToRegister(instr->external_pointer()); + Register key = ToRegister(instr->key()); + Register value = ToRegister(instr->value()); + ASSERT(ToRegister(instr->TempAt(0)).is(eax)); + + __ mov(eax, value); + { // Clamp the value to [0..255]. + NearLabel done; + __ test(eax, Immediate(0xFFFFFF00)); + __ j(zero, &done); + __ setcc(negative, eax); // 1 if negative, 0 if positive. + __ dec_b(eax); // 0 if negative, 255 if positive. + __ bind(&done); + } + __ mov_b(Operand(external_pointer, key, times_1, 0), eax); +} + + void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register value = ToRegister(instr->value()); Register elements = ToRegister(instr->object()); @@ -2756,7 +2864,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { ASSERT(ToRegister(instr->key()).is(ecx)); ASSERT(ToRegister(instr->value()).is(eax)); - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -2827,19 +2937,20 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { __ test(result, Immediate(kStringRepresentationMask)); __ j(not_zero, deferred->entry()); - // Check for 1-byte or 2-byte string. + // Check for ASCII or two-byte string. __ bind(&flat_string); STATIC_ASSERT(kAsciiStringTag != 0); __ test(result, Immediate(kStringEncodingMask)); __ j(not_zero, &ascii_string); - // 2-byte string. - // Load the 2-byte character code into the result register. + // Two-byte string. + // Load the two-byte character code into the result register. STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); if (instr->index()->IsConstantOperand()) { __ movzx_w(result, FieldOperand(string, - SeqTwoByteString::kHeaderSize + 2 * const_index)); + SeqTwoByteString::kHeaderSize + + (kUC16Size * const_index))); } else { __ movzx_w(result, FieldOperand(string, index, @@ -2895,7 +3006,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { __ AbortIfNotSmi(eax); } __ SmiUntag(eax); - __ mov(Operand(esp, EspIndexForPushAll(result) * kPointerSize), eax); + __ StoreToSafepointRegisterSlot(result, eax); __ PopSafepointRegisters(); } @@ -2963,7 +3074,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { // TODO(3095996): Put a valid pointer value in the stack slot where the result // register is stored, as this register is in the pointer map, but contains an // integer value. - __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0)); + __ StoreToSafepointRegisterSlot(reg, Immediate(0)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); @@ -2975,7 +3086,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { // number. __ bind(&done); __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); - __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg); + __ StoreToSafepointRegisterSlot(reg, reg); __ PopSafepointRegisters(); } @@ -3017,7 +3128,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); - __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax); + __ StoreToSafepointRegisterSlot(reg, eax); __ PopSafepointRegisters(); } @@ -3589,21 +3700,18 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Handle<String> type_name) { Condition final_branch_condition = no_condition; if (type_name->Equals(Heap::number_symbol())) { - __ test(input, Immediate(kSmiTagMask)); - __ j(zero, true_label); + __ JumpIfSmi(input, true_label); __ cmp(FieldOperand(input, HeapObject::kMapOffset), Factory::heap_number_map()); final_branch_condition = equal; } else if (type_name->Equals(Heap::string_symbol())) { - __ test(input, Immediate(kSmiTagMask)); - __ j(zero, false_label); - __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); + __ JumpIfSmi(input, false_label); + __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); + __ j(above_equal, false_label); __ test_b(FieldOperand(input, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); - __ j(not_zero, false_label); - __ CmpInstanceType(input, FIRST_NONSTRING_TYPE); - final_branch_condition = below; + final_branch_condition = zero; } else if (type_name->Equals(Heap::boolean_symbol())) { __ cmp(input, Factory::true_value()); @@ -3614,8 +3722,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, } else if (type_name->Equals(Heap::undefined_symbol())) { __ cmp(input, Factory::undefined_value()); __ j(equal, true_label); - __ test(input, Immediate(kSmiTagMask)); - __ j(zero, false_label); + __ JumpIfSmi(input, false_label); // Check for undetectable objects => true. __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); __ test_b(FieldOperand(input, Map::kBitFieldOffset), @@ -3623,8 +3730,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = not_zero; } else if (type_name->Equals(Heap::function_symbol())) { - __ test(input, Immediate(kSmiTagMask)); - __ j(zero, false_label); + __ JumpIfSmi(input, false_label); __ CmpObjectType(input, JS_FUNCTION_TYPE, input); __ j(equal, true_label); // Regular expressions => 'function' (they are callable). @@ -3632,22 +3738,18 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = equal; } else if (type_name->Equals(Heap::object_symbol())) { - __ test(input, Immediate(kSmiTagMask)); - __ j(zero, false_label); + __ JumpIfSmi(input, false_label); __ cmp(input, Factory::null_value()); __ j(equal, true_label); // Regular expressions => 'function', not 'object'. - __ CmpObjectType(input, JS_REGEXP_TYPE, input); - __ j(equal, false_label); + __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input); + __ j(below, false_label); + __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE); + __ j(above_equal, false_label); // Check for undetectable objects => false. __ test_b(FieldOperand(input, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); - __ j(not_zero, false_label); - // Check for JS objects => true. - __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE); - __ j(below, false_label); - __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE); - final_branch_condition = below_equal; + final_branch_condition = zero; } else { final_branch_condition = not_equal; @@ -3731,10 +3833,15 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { LEnvironment* env = instr->deoptimization_environment(); RecordPosition(pointers->position()); RegisterEnvironmentForDeoptimization(env); + // Create safepoint generator that will also ensure enough space in the + // reloc info for patching in deoptimization (since this is invoking a + // builtin) SafepointGenerator safepoint_generator(this, pointers, - env->deoptimization_index()); + env->deoptimization_index(), + true); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ push(Immediate(Smi::FromInt(strict_mode_flag()))); __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator); } diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h index 3ac3a416..ecd6caa3 100644 --- a/src/ia32/lithium-codegen-ia32.h +++ b/src/ia32/lithium-codegen-ia32.h @@ -56,16 +56,18 @@ class LCodeGen BASE_EMBEDDED { deoptimizations_(4), deoptimization_literals_(8), inlined_function_count_(0), - scope_(chunk->graph()->info()->scope()), + scope_(info->scope()), status_(UNUSED), deferred_(8), osr_pc_offset_(-1), + deoptimization_reloc_size(), resolver_(this) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } // Simple accessors. MacroAssembler* masm() const { return masm_; } + CompilationInfo* info() const { return info_; } // Support for converting LOperands to assembler types. Operand ToOperand(LOperand* op) const; @@ -102,6 +104,8 @@ class LCodeGen BASE_EMBEDDED { // Emit frame translation commands for an environment. void WriteTranslation(LEnvironment* environment, Translation* translation); + void EnsureRelocSpaceForDeoptimization(); + // Declare methods that deal with the individual node types. #define DECLARE_DO(type) void Do##type(L##type* node); LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -120,6 +124,10 @@ class LCodeGen BASE_EMBEDDED { bool is_done() const { return status_ == DONE; } bool is_aborted() const { return status_ == ABORTED; } + int strict_mode_flag() const { + return info()->is_strict() ? kStrictMode : kNonStrictMode; + } + LChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } HGraph* graph() const { return chunk_->graph(); } @@ -147,6 +155,9 @@ class LCodeGen BASE_EMBEDDED { bool GeneratePrologue(); bool GenerateBody(); bool GenerateDeferredCode(); + // Pad the reloc info to ensure that we have enough space to patch during + // deoptimization. + bool GenerateRelocPadding(); bool GenerateSafepointTable(); void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr, @@ -200,6 +211,7 @@ class LCodeGen BASE_EMBEDDED { int arguments, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); + void RecordSafepoint(int deoptimization_index); void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index); @@ -247,6 +259,13 @@ class LCodeGen BASE_EMBEDDED { ZoneList<LDeferredCode*> deferred_; int osr_pc_offset_; + struct DeoptimizationRelocSize { + int min_size; + int last_pc_offset; + }; + + DeoptimizationRelocSize deoptimization_reloc_size; + // Builder that keeps track of safepoints in the code. The table // itself is emitted at the end of the generated code. SafepointTableBuilder safepoints_; diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc index 0ad38192..4440cdfa 100644 --- a/src/ia32/lithium-ia32.cc +++ b/src/ia32/lithium-ia32.cc @@ -404,7 +404,7 @@ void LChunk::MarkEmptyBlocks() { } -void LStoreNamed::PrintDataTo(StringStream* stream) { +void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("."); stream->Add(*String::cast(*name())->ToCString()); @@ -413,7 +413,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) { } -void LStoreKeyed::PrintDataTo(StringStream* stream) { +void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); @@ -451,7 +469,7 @@ int LChunk::GetParameterStackSlot(int index) const { // shift all parameter indexes down by the number of parameters, and // make sure they end up negative so they are distinguishable from // spill slots. - int result = index - graph()->info()->scope()->num_parameters() - 1; + int result = index - info()->scope()->num_parameters() - 1; ASSERT(result < 0); return result; } @@ -459,7 +477,7 @@ int LChunk::GetParameterStackSlot(int index) const { // A parameter relative to ebp in the arguments stub. int LChunk::ParameterAt(int index) { ASSERT(-1 <= index); // -1 is the receiver. - return (1 + graph()->info()->scope()->num_parameters() - index) * + return (1 + info()->scope()->num_parameters() - index) * kPointerSize; } @@ -498,7 +516,7 @@ Representation LChunk::LookupLiteralRepresentation( LChunk* LChunkBuilder::Build() { ASSERT(is_unused()); - chunk_ = new LChunk(graph()); + chunk_ = new LChunk(info(), graph()); HPhase phase("Building chunk", chunk_); status_ = BUILDING; const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); @@ -515,8 +533,8 @@ LChunk* LChunkBuilder::Build() { void LChunkBuilder::Abort(const char* format, ...) { if (FLAG_trace_bailout) { - SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); - PrintF("Aborting LChunk building in @\"%s\": ", *debug_name); + SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); + PrintF("Aborting LChunk building in @\"%s\": ", *name); va_list arguments; va_start(arguments, format); OS::VPrint(format, arguments); @@ -852,6 +870,7 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, ASSERT(instr->representation().IsDouble()); ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble()); + ASSERT(op != Token::MOD); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); LArithmeticD* result = new LArithmeticD(op, left, right); @@ -1147,8 +1166,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( new LInstanceOfKnownGlobal( UseFixed(instr->value(), InstanceofStub::left()), FixedTemp(edi)); - MarkAsSaveDoubles(result); - return AssignEnvironment(AssignPointerMap(DefineFixed(result, eax))); + return MarkAsCall(DefineFixed(result, eax), instr); } @@ -1223,7 +1241,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { case kMathSqrt: return DefineSameAsFirst(result); case kMathPowHalf: - return AssignEnvironment(DefineSameAsFirst(result)); + return DefineSameAsFirst(result); default: UNREACHABLE(); return NULL; @@ -1366,8 +1384,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) { // We call a C function for double modulo. It can't trigger a GC. // We need to use fixed result register for the call. // TODO(fschneider): Allow any register as input registers. - LOperand* left = UseFixedDouble(instr->left(), xmm1); - LOperand* right = UseFixedDouble(instr->right(), xmm2); + LOperand* left = UseFixedDouble(instr->left(), xmm2); + LOperand* right = UseFixedDouble(instr->right(), xmm1); LArithmeticD* result = new LArithmeticD(Token::MOD, left, right); return MarkAsCall(DefineFixedDouble(result, xmm1), instr); } @@ -1521,6 +1539,15 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) { } +LInstruction* LChunkBuilder::DoGetCachedArrayIndex( + HGetCachedArrayIndex* instr) { + ASSERT(instr->value()->representation().IsTagged()); + LOperand* value = UseRegisterAtStart(instr->value()); + + return DefineAsRegister(new LGetCachedArrayIndex(value)); +} + + LInstruction* LChunkBuilder::DoHasCachedArrayIndex( HHasCachedArrayIndex* instr) { ASSERT(instr->value()->representation().IsTagged()); @@ -1833,6 +1860,23 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( } +LInstruction* LChunkBuilder::DoStorePixelArrayElement( + HStorePixelArrayElement* instr) { + ASSERT(instr->value()->representation().IsInteger32()); + ASSERT(instr->external_pointer()->representation().IsExternal()); + ASSERT(instr->key()->representation().IsInteger32()); + + LOperand* external_pointer = UseRegister(instr->external_pointer()); + LOperand* val = UseRegister(instr->value()); + LOperand* key = UseRegister(instr->key()); + // The generated code requires that the clamped value is in a byte + // register. eax is an arbitrary choice to satisfy this requirement. + LOperand* clamped = FixedTemp(eax); + + return new LStorePixelArrayElement(external_pointer, key, val, clamped); +} + + LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), esi); LOperand* object = UseFixed(instr->object(), edx); @@ -1916,8 +1960,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) { - LDeleteProperty* result = new LDeleteProperty(Use(instr->object()), - UseOrConstant(instr->key())); + LDeleteProperty* result = + new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key())); return MarkAsCall(DefineFixed(result, eax), instr); } @@ -1950,8 +1994,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object (we bail out in all other - // cases). + // There are no real uses of the arguments object. + // arguments.length and element access are supported directly on + // stack arguments, and any real arguments object use causes a bailout. + // So this value is never used. return NULL; } diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h index f1b9ffc9..f8cb8710 100644 --- a/src/ia32/lithium-ia32.h +++ b/src/ia32/lithium-ia32.h @@ -42,8 +42,6 @@ class LCodeGen; #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ V(ControlInstruction) \ V(Call) \ - V(StoreKeyed) \ - V(StoreNamed) \ LITHIUM_CONCRETE_INSTRUCTION_LIST(V) @@ -94,6 +92,7 @@ class LCodeGen; V(FixedArrayLength) \ V(FunctionLiteral) \ V(Gap) \ + V(GetCachedArrayIndex) \ V(GlobalObject) \ V(GlobalReceiver) \ V(Goto) \ @@ -150,6 +149,7 @@ class LCodeGen; V(StoreKeyedGeneric) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ + V(StorePixelArrayElement) \ V(StringCharCodeAt) \ V(StringLength) \ V(SubI) \ @@ -744,6 +744,17 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> { }; +class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { + public: + explicit LGetCachedArrayIndex(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index") + DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex) +}; + + class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { public: explicit LHasCachedArrayIndex(LOperand* value) { @@ -1580,34 +1591,23 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { }; -class LStoreNamed: public LTemplateInstruction<0, 2, 1> { +class LStoreNamedField: public LTemplateInstruction<0, 2, 1> { public: - LStoreNamed(LOperand* obj, LOperand* val) { + LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) { inputs_[0] = obj; inputs_[1] = val; + temps_[0] = temp; } - DECLARE_INSTRUCTION(StoreNamed) - DECLARE_HYDROGEN_ACCESSOR(StoreNamed) + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) virtual void PrintDataTo(StringStream* stream); LOperand* object() { return inputs_[0]; } LOperand* value() { return inputs_[1]; } - Handle<Object> name() const { return hydrogen()->name(); } -}; - - -class LStoreNamedField: public LStoreNamed { - public: - LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) - : LStoreNamed(obj, val) { - temps_[0] = temp; - } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) + Handle<Object> name() const { return hydrogen()->name(); } bool is_in_object() { return hydrogen()->is_in_object(); } int offset() { return hydrogen()->offset(); } bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } @@ -1626,6 +1626,8 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> { DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) + virtual void PrintDataTo(StringStream* stream); + LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } @@ -1633,15 +1635,17 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { + LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) { inputs_[0] = obj; inputs_[1] = key; inputs_[2] = val; } - DECLARE_INSTRUCTION(StoreKeyed) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store-keyed-fast-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) virtual void PrintDataTo(StringStream* stream); @@ -1651,14 +1655,25 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedFastElement: public LStoreKeyed { +class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 1> { public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) {} + LStorePixelArrayElement(LOperand* external_pointer, + LOperand* key, + LOperand* val, + LOperand* clamped) { + inputs_[0] = external_pointer; + inputs_[1] = key; + inputs_[2] = val; + temps_[0] = clamped; + } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) + DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement, + "store-pixel-array-element") + DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement) + + LOperand* external_pointer() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } }; @@ -1676,6 +1691,8 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> { DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + virtual void PrintDataTo(StringStream* stream); + LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } LOperand* key() { return inputs_[2]; } @@ -1900,8 +1917,9 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> { class LChunkBuilder; class LChunk: public ZoneObject { public: - explicit LChunk(HGraph* graph) + explicit LChunk(CompilationInfo* info, HGraph* graph) : spill_slot_count_(0), + info_(info), graph_(graph), instructions_(32), pointer_maps_(8), @@ -1918,6 +1936,7 @@ class LChunk: public ZoneObject { int ParameterAt(int index); int GetParameterStackSlot(int index) const; int spill_slot_count() const { return spill_slot_count_; } + CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } const ZoneList<LInstruction*>* instructions() const { return &instructions_; } void AddGapMove(int index, LOperand* from, LOperand* to); @@ -1954,6 +1973,7 @@ class LChunk: public ZoneObject { private: int spill_slot_count_; + CompilationInfo* info_; HGraph* const graph_; ZoneList<LInstruction*> instructions_; ZoneList<LPointerMap*> pointer_maps_; @@ -1963,8 +1983,9 @@ class LChunk: public ZoneObject { class LChunkBuilder BASE_EMBEDDED { public: - LChunkBuilder(HGraph* graph, LAllocator* allocator) + LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) : chunk_(NULL), + info_(info), graph_(graph), status_(UNUSED), current_instruction_(NULL), @@ -1993,6 +2014,7 @@ class LChunkBuilder BASE_EMBEDDED { }; LChunk* chunk() const { return chunk_; } + CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } bool is_unused() const { return status_ == UNUSED; } @@ -2099,6 +2121,7 @@ class LChunkBuilder BASE_EMBEDDED { HArithmeticBinaryOperation* instr); LChunk* chunk_; + CompilationInfo* info_; HGraph* const graph_; Status status_; HInstruction* current_instruction_; diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc index cd612b52..91b6651f 100644 --- a/src/ia32/macro-assembler-ia32.cc +++ b/src/ia32/macro-assembler-ia32.cc @@ -448,6 +448,97 @@ void MacroAssembler::PopTryHandler() { } +void MacroAssembler::Throw(Register value) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // eax must hold the exception. + if (!value.is(eax)) { + mov(eax, value); + } + + // Drop the sp to the top of the handler. + ExternalReference handler_address(Top::k_handler_address); + mov(esp, Operand::StaticVariable(handler_address)); + + // Restore next handler and frame pointer, discard handler state. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(Operand::StaticVariable(handler_address)); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); + pop(ebp); + pop(edx); // Remove state. + + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of + // a JS entry frame. + Set(esi, Immediate(0)); // Tentatively set context pointer to NULL. + NearLabel skip; + cmp(ebp, 0); + j(equal, &skip, not_taken); + mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + bind(&skip); + + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + ret(0); +} + + +void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, + Register value) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // eax must hold the exception. + if (!value.is(eax)) { + mov(eax, value); + } + + // Drop sp to the top stack handler. + ExternalReference handler_address(Top::k_handler_address); + mov(esp, Operand::StaticVariable(handler_address)); + + // Unwind the handlers until the ENTRY handler is found. + NearLabel loop, done; + bind(&loop); + // Load the type of the current stack handler. + const int kStateOffset = StackHandlerConstants::kStateOffset; + cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY)); + j(equal, &done); + // Fetch the next handler in the list. + const int kNextOffset = StackHandlerConstants::kNextOffset; + mov(esp, Operand(esp, kNextOffset)); + jmp(&loop); + bind(&done); + + // Set the top handler address to next handler past the current ENTRY handler. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(Operand::StaticVariable(handler_address)); + + if (type == OUT_OF_MEMORY) { + // Set external caught exception to false. + ExternalReference external_caught(Top::k_external_caught_exception_address); + mov(eax, false); + mov(Operand::StaticVariable(external_caught), eax); + + // Set pending exception and eax to out of memory exception. + ExternalReference pending_exception(Top::k_pending_exception_address); + mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException())); + mov(Operand::StaticVariable(pending_exception), eax); + } + + // Clear the context pointer. + Set(esi, Immediate(0)); + + // Restore fp from handler and discard handler state. + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); + pop(ebp); + pop(edx); // State. + + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + ret(0); +} + + void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label* miss) { @@ -1192,7 +1283,7 @@ MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid, // If false, it is returned as a pointer to a preallocated by caller memory // region. Pointer to this region should be passed to a function as an // implicit first argument. -#if defined(USING_BSD_ABI) || defined(__MINGW32__) +#if defined(USING_BSD_ABI) || defined(__MINGW32__) || defined(__CYGWIN__) static const bool kReturnHandlesDirectly = true; #else static const bool kReturnHandlesDirectly = false; @@ -1563,6 +1654,28 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, } +// Store the value in register src in the safepoint register stack +// slot for register dst. +void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) { + mov(SafepointRegisterSlot(dst), src); +} + + +void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) { + mov(SafepointRegisterSlot(dst), src); +} + + +void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { + mov(dst, SafepointRegisterSlot(src)); +} + + +Operand MacroAssembler::SafepointRegisterSlot(Register reg) { + return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); +} + + int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { // The registers are pushed starting with the lowest encoding, // which means that lowest encodings are furthest away from diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h index 09584f7a..62bb0f36 100644 --- a/src/ia32/macro-assembler-ia32.h +++ b/src/ia32/macro-assembler-ia32.h @@ -143,7 +143,11 @@ class MacroAssembler: public Assembler { // Push and pop the registers that can hold pointers. void PushSafepointRegisters() { pushad(); } void PopSafepointRegisters() { popad(); } - static int SafepointRegisterStackIndex(int reg_code); + // Store the value in register/immediate src in the safepoint + // register stack slot for register dst. + void StoreToSafepointRegisterSlot(Register dst, Register src); + void StoreToSafepointRegisterSlot(Register dst, Immediate src); + void LoadFromSafepointRegisterSlot(Register dst, Register src); // --------------------------------------------------------------------------- // JavaScript invokes @@ -304,6 +308,11 @@ class MacroAssembler: public Assembler { // Unlink the stack handler on top of the stack from the try handler chain. void PopTryHandler(); + // Activate the top handler in the try hander chain. + void Throw(Register value); + + void ThrowUncatchable(UncatchableExceptionType type, Register value); + // --------------------------------------------------------------------------- // Inline caching support @@ -662,6 +671,15 @@ class MacroAssembler: public Assembler { MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved, Register scratch, bool gc_allowed); + + + // Compute memory operands for safepoint stack slots. + Operand SafepointRegisterSlot(Register reg); + static int SafepointRegisterStackIndex(int reg_code); + + // Needs access to SafepointRegisterStackIndex for optimized frame + // traversal. + friend class OptimizedFrame; }; diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h index 88d0b618..43b7ea3b 100644 --- a/src/ia32/simulator-ia32.h +++ b/src/ia32/simulator-ia32.h @@ -38,10 +38,15 @@ namespace internal { #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ (entry(p0, p1, p2, p3, p4)) -// Call the generated regexp code directly. The entry function pointer should + +typedef int (*regexp_matcher)(String*, int, const byte*, + const byte*, int*, Address, int); + +// Call the generated regexp code directly. The code at the entry address should // expect seven int/pointer sized arguments and return an int. #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ - (entry(p0, p1, p2, p3, p4, p5, p6)) + (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6)) + #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ (reinterpret_cast<TryCatch*>(try_catch_address)) diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc index f96ef5ce..633097af 100644 --- a/src/ia32/stub-cache-ia32.cc +++ b/src/ia32/stub-cache-ia32.cc @@ -2204,8 +2204,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case STRING_CHECK: - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { // Check that the object is a string or a symbol. @@ -2220,8 +2221,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case NUMBER_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2241,8 +2243,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } case BOOLEAN_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2549,12 +2552,13 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, __ push(edx); // receiver __ push(ecx); // name __ push(eax); // value + __ push(Immediate(Smi::FromInt(strict_mode_))); __ push(ebx); // restore return address // Do tail-call to the runtime system. ExternalReference store_ic_property = ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); - __ TailCallExternalReference(store_ic_property, 3, 1); + __ TailCallExternalReference(store_ic_property, 4, 1); // Handle store cache miss. __ bind(&miss); @@ -2586,8 +2590,8 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Compute the cell operand to use. Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell)); if (Serializer::enabled()) { - __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell))); - cell_operand = FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset); + __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell))); + cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset); } // Check that the value in the cell is not the hole. If it is, this @@ -2709,6 +2713,42 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized( } +MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray( + JSObject* receiver) { + // ----------- S t a t e ------------- + // -- eax : value + // -- ecx : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + Label miss; + + // Check that the map matches. + __ CheckMap(edx, Handle<Map>(receiver->map()), &miss, false); + + // Do the load. + GenerateFastPixelArrayStore(masm(), + edx, + ecx, + eax, + edi, + ebx, + true, + &miss, + &miss, + NULL, + &miss); + + // Handle store cache miss. + __ bind(&miss); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); + __ jmp(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, NULL); +} + + MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name, JSObject* object, JSObject* last) { @@ -3673,10 +3713,13 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub( __ push(edx); __ push(ecx); __ push(eax); - __ push(ebx); + __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes + __ push(Immediate(Smi::FromInt( + Code::ExtractExtraICStateFromFlags(flags) & kStrictMode))); + __ push(ebx); // return address // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); return GetCode(flags); } diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc index 1cc91a9f..93d711e9 100644 --- a/src/ia32/virtual-frame-ia32.cc +++ b/src/ia32/virtual-frame-ia32.cc @@ -1038,9 +1038,9 @@ Result VirtualFrame::CallStoreIC(Handle<String> name, StrictModeFlag strict_mode) { // Value and (if not contextual) receiver are on top of the frame. // The IC expects name in ecx, value in eax, and receiver in edx. - Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode - ? Builtins::StoreIC_Initialize_Strict - : Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); Result value = Pop(); RelocInfo::Mode mode; @@ -1061,7 +1061,7 @@ Result VirtualFrame::CallStoreIC(Handle<String> name, } -Result VirtualFrame::CallKeyedStoreIC() { +Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) { // Value, key, and receiver are on the top of the frame. The IC // expects value in eax, key in ecx, and receiver in edx. Result value = Pop(); @@ -1105,7 +1105,9 @@ Result VirtualFrame::CallKeyedStoreIC() { receiver.Unuse(); } - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); return RawCallCodeObject(ic, RelocInfo::CODE_TARGET); } @@ -1306,6 +1308,7 @@ void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) { void VirtualFrame::PushUntaggedElement(Handle<Object> value) { + ASSERT(!ConstantPoolOverflowed()); elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED)); elements_[element_count() - 1].set_untagged_int32(true); } @@ -1336,6 +1339,20 @@ void VirtualFrame::Push(Expression* expr) { } +void VirtualFrame::Push(Handle<Object> value) { + if (ConstantPoolOverflowed()) { + Result temp = cgen()->allocator()->Allocate(); + ASSERT(temp.is_valid()); + __ Set(temp.reg(), Immediate(value)); + Push(&temp); + } else { + FrameElement element = + FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED); + elements_.Add(element); + } +} + + #undef __ } } // namespace v8::internal diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h index 729469fd..51874309 100644 --- a/src/ia32/virtual-frame-ia32.h +++ b/src/ia32/virtual-frame-ia32.h @@ -370,7 +370,7 @@ class VirtualFrame: public ZoneObject { // Call keyed store IC. Value, key, and receiver are found on top // of the frame. All three are dropped. - Result CallKeyedStoreIC(); + Result CallKeyedStoreIC(StrictModeFlag strict_mode); // Call call IC. Function name, arguments, and receiver are found on top // of the frame and dropped by the call. The argument count does not @@ -419,9 +419,11 @@ class VirtualFrame: public ZoneObject { void EmitPush(Immediate immediate, TypeInfo info = TypeInfo::Unknown()); + inline bool ConstantPoolOverflowed(); + // Push an element on the virtual frame. + void Push(Handle<Object> value); inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown()); - inline void Push(Handle<Object> value); inline void Push(Smi* value); void PushUntaggedElement(Handle<Object> value); diff --git a/src/ic-inl.h b/src/ic-inl.h index 8fbc1843..9d358edd 100644 --- a/src/ic-inl.h +++ b/src/ic-inl.h @@ -76,6 +76,15 @@ Code* IC::GetTargetAtAddress(Address address) { void IC::SetTargetAtAddress(Address address, Code* target) { ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub()); +#ifdef DEBUG + // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark + // ICs as strict mode. The strict-ness of the IC must be preserved. + Code* old_target = GetTargetAtAddress(address); + if (old_target->kind() == Code::STORE_IC || + old_target->kind() == Code::KEYED_STORE_IC) { + ASSERT(old_target->extra_ic_state() == target->extra_ic_state()); + } +#endif Assembler::set_target_address_at(address, target->instruction_start()); } @@ -343,7 +343,7 @@ void StoreIC::Clear(Address address, Code* target) { if (target->ic_state() == UNINITIALIZED) return; ClearInlinedVersion(address); SetTargetAtAddress(address, - target->extra_ic_state() == kStoreICStrict + (target->extra_ic_state() == kStrictMode) ? initialize_stub_strict() : initialize_stub()); } @@ -366,7 +366,10 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) { void KeyedStoreIC::Clear(Address address, Code* target) { if (target->ic_state() == UNINITIALIZED) return; - SetTargetAtAddress(address, initialize_stub()); + SetTargetAtAddress(address, + (target->extra_ic_state() == kStrictMode) + ? initialize_stub_strict() + : initialize_stub()); } @@ -435,16 +438,25 @@ Object* CallICBase::TryCallAsFunction(Object* object) { } -void CallICBase::ReceiverToObject(Handle<Object> object) { - HandleScope scope; - Handle<Object> receiver(object); +void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee, + Handle<Object> object) { + if (callee->IsJSFunction()) { + Handle<JSFunction> function = Handle<JSFunction>::cast(callee); + if (function->shared()->strict_mode() || function->IsBuiltin()) { + // Do not wrap receiver for strict mode functions or for builtins. + return; + } + } - // Change the receiver to the result of calling ToObject on it. - const int argc = this->target()->arguments_count(); - StackFrameLocator locator; - JavaScriptFrame* frame = locator.FindJavaScriptFrame(0); - int index = frame->ComputeExpressionsCount() - (argc + 1); - frame->SetExpression(index, *Factory::ToObject(object)); + // And only wrap string, number or boolean. + if (object->IsString() || object->IsNumber() || object->IsBoolean()) { + // Change the receiver to the result of calling ToObject on it. + const int argc = this->target()->arguments_count(); + StackFrameLocator locator; + JavaScriptFrame* frame = locator.FindJavaScriptFrame(0); + int index = frame->ComputeExpressionsCount() - (argc + 1); + frame->SetExpression(index, *Factory::ToObject(object)); + } } @@ -458,10 +470,6 @@ MaybeObject* CallICBase::LoadFunction(State state, return TypeError("non_object_property_call", object, name); } - if (object->IsString() || object->IsNumber() || object->IsBoolean()) { - ReceiverToObject(object); - } - // Check if the name is trivially convertible to an index and get // the element if so. uint32_t index; @@ -505,6 +513,7 @@ MaybeObject* CallICBase::LoadFunction(State state, object->GetProperty(*object, &lookup, *name, &attr); if (!maybe_result->ToObject(&result)) return maybe_result; } + if (lookup.type() == INTERCEPTOR) { // If the object does not have the requested property, check which // exception we need to throw. @@ -516,31 +525,37 @@ MaybeObject* CallICBase::LoadFunction(State state, } } - ASSERT(result != Heap::the_hole_value()); + ASSERT(!result->IsTheHole()); - if (result->IsJSFunction()) { + HandleScope scope; + // Wrap result in a handle because ReceiverToObjectIfRequired may allocate + // new object and cause GC. + Handle<Object> result_handle(result); + // Make receiver an object if the callee requires it. Strict mode or builtin + // functions do not wrap the receiver, non-strict functions and objects + // called as functions do. + ReceiverToObjectIfRequired(result_handle, object); + + if (result_handle->IsJSFunction()) { #ifdef ENABLE_DEBUGGER_SUPPORT // Handle stepping into a function if step into is active. if (Debug::StepInActive()) { // Protect the result in a handle as the debugger can allocate and might // cause GC. - HandleScope scope; - Handle<JSFunction> function(JSFunction::cast(result)); + Handle<JSFunction> function(JSFunction::cast(*result_handle)); Debug::HandleStepIn(function, object, fp(), false); return *function; } #endif - return result; + return *result_handle; } // Try to find a suitable function delegate for the object at hand. - result = TryCallAsFunction(result); - MaybeObject* answer = result; - if (!result->IsJSFunction()) { - answer = TypeError("property_not_function", object, name); - } - return answer; + result_handle = Handle<Object>(TryCallAsFunction(*result_handle)); + if (result_handle->IsJSFunction()) return *result_handle; + + return TypeError("property_not_function", object, name); } @@ -565,8 +580,8 @@ bool CallICBase::TryUpdateExtraICState(LookupResult* lookup, case kStringCharAt: if (object->IsString()) { String* string = String::cast(*object); - // Check that there's the right wrapper in the receiver slot. - ASSERT(string == JSValue::cast(args[0])->value()); + // Check there's the right string value or wrapper in the receiver slot. + ASSERT(string == args[0] || string == JSValue::cast(args[0])->value()); // If we're in the default (fastest) state and the index is // out of bounds, update the state to record this fact. if (*extra_ic_state == DEFAULT_STRING_STUB && @@ -775,10 +790,6 @@ MaybeObject* KeyedCallIC::LoadFunction(State state, return TypeError("non_object_property_call", object, key); } - if (object->IsString() || object->IsNumber() || object->IsBoolean()) { - ReceiverToObject(object); - } - if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) { int argc = target()->arguments_count(); InLoopFlag in_loop = target()->ic_in_loop(); @@ -793,17 +804,21 @@ MaybeObject* KeyedCallIC::LoadFunction(State state, #endif } } - Object* result; - { MaybeObject* maybe_result = Runtime::GetObjectProperty(object, key); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - if (result->IsJSFunction()) return result; - result = TryCallAsFunction(result); - MaybeObject* answer = result; - if (!result->IsJSFunction()) { - answer = TypeError("property_not_function", object, key); - } - return answer; + + HandleScope scope; + Handle<Object> result = GetProperty(object, key); + RETURN_IF_EMPTY_HANDLE(result); + + // Make receiver an object if the callee requires it. Strict mode or builtin + // functions do not wrap the receiver, non-strict functions and objects + // called as functions do. + ReceiverToObjectIfRequired(result, object); + + if (result->IsJSFunction()) return *result; + result = Handle<Object>(TryCallAsFunction(*result)); + if (result->IsJSFunction()) return *result; + + return TypeError("property_not_function", object, key); } @@ -1215,7 +1230,8 @@ MaybeObject* KeyedLoadIC::Load(State state, if (receiver->HasExternalArrayElements()) { MaybeObject* probe = StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, - false); + false, + kNonStrictMode); stub = probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked()); } else if (receiver->HasIndexedInterceptor()) { @@ -1371,7 +1387,7 @@ static bool LookupForWrite(JSObject* object, MaybeObject* StoreIC::Store(State state, - Code::ExtraICState extra_ic_state, + StrictModeFlag strict_mode, Handle<Object> object, Handle<String> name, Handle<Object> value) { @@ -1381,15 +1397,23 @@ MaybeObject* StoreIC::Store(State state, return TypeError("non_object_property_store", object, name); } - // Ignore stores where the receiver is not a JSObject. - if (!object->IsJSObject()) return *value; + if (!object->IsJSObject()) { + // The length property of string values is read-only. Throw in strict mode. + if (strict_mode == kStrictMode && object->IsString() && + name->Equals(Heap::length_symbol())) { + return TypeError("strict_read_only_property", object, name); + } + // Ignore stores where the receiver is not a JSObject. + return *value; + } + Handle<JSObject> receiver = Handle<JSObject>::cast(object); // Check if the given name is an array index. uint32_t index; if (name->AsArrayIndex(&index)) { HandleScope scope; - Handle<Object> result = SetElement(receiver, index, value); + Handle<Object> result = SetElement(receiver, index, value, strict_mode); if (result.is_null()) return Failure::Exception(); return *value; } @@ -1401,11 +1425,11 @@ MaybeObject* StoreIC::Store(State state, #ifdef DEBUG if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n"); #endif - Builtins::Name target = (extra_ic_state == kStoreICStrict) + Builtins::Name target = (strict_mode == kStrictMode) ? Builtins::StoreIC_ArrayLength_Strict : Builtins::StoreIC_ArrayLength; set_target(Builtins::builtin(target)); - return receiver->SetProperty(*name, *value, NONE); + return receiver->SetProperty(*name, *value, NONE, strict_mode); } // Lookup the property locally in the receiver. @@ -1429,13 +1453,15 @@ MaybeObject* StoreIC::Store(State state, // Index is an offset from the end of the object. int offset = map->instance_size() + (index * kPointerSize); if (PatchInlinedStore(address(), map, offset)) { - set_target(megamorphic_stub()); + set_target((strict_mode == kStrictMode) + ? megamorphic_stub_strict() + : megamorphic_stub()); #ifdef DEBUG if (FLAG_trace_ic) { PrintF("[StoreIC : inline patch %s]\n", *name->ToCString()); } #endif - return receiver->SetProperty(*name, *value, NONE); + return receiver->SetProperty(*name, *value, NONE, strict_mode); #ifdef DEBUG } else { @@ -1462,11 +1488,16 @@ MaybeObject* StoreIC::Store(State state, // If no inlined store ic was patched, generate a stub for this // store. - UpdateCaches(&lookup, state, extra_ic_state, receiver, name, value); + UpdateCaches(&lookup, state, strict_mode, receiver, name, value); } else { - // Strict mode doesn't allow setting non-existent global property. - if (extra_ic_state == kStoreICStrict && IsContextual(object)) { - return ReferenceError("not_defined", name); + // Strict mode doesn't allow setting non-existent global property + // or an assignment to a read only property. + if (strict_mode == kStrictMode) { + if (lookup.IsFound() && lookup.IsReadOnly()) { + return TypeError("strict_read_only_property", object, name); + } else if (IsContextual(object)) { + return ReferenceError("not_defined", name); + } } } } @@ -1474,7 +1505,7 @@ MaybeObject* StoreIC::Store(State state, if (receiver->IsJSGlobalProxy()) { // Generate a generic stub that goes to the runtime when we see a global // proxy as receiver. - Code* stub = (extra_ic_state == kStoreICStrict) + Code* stub = (strict_mode == kStrictMode) ? global_proxy_stub_strict() : global_proxy_stub(); if (target() != stub) { @@ -1486,13 +1517,13 @@ MaybeObject* StoreIC::Store(State state, } // Set the property. - return receiver->SetProperty(*name, *value, NONE); + return receiver->SetProperty(*name, *value, NONE, strict_mode); } void StoreIC::UpdateCaches(LookupResult* lookup, State state, - Code::ExtraICState extra_ic_state, + StrictModeFlag strict_mode, Handle<JSObject> receiver, Handle<String> name, Handle<Object> value) { @@ -1514,7 +1545,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup, switch (type) { case FIELD: { maybe_code = StubCache::ComputeStoreField( - *name, *receiver, lookup->GetFieldIndex(), NULL, extra_ic_state); + *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode); break; } case MAP_TRANSITION: { @@ -1524,7 +1555,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup, Handle<Map> transition(lookup->GetTransitionMap()); int index = transition->PropertyIndexFor(*name); maybe_code = StubCache::ComputeStoreField( - *name, *receiver, index, *transition, extra_ic_state); + *name, *receiver, index, *transition, strict_mode); break; } case NORMAL: { @@ -1536,10 +1567,10 @@ void StoreIC::UpdateCaches(LookupResult* lookup, JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup)); maybe_code = StubCache::ComputeStoreGlobal( - *name, *global, cell, extra_ic_state); + *name, *global, cell, strict_mode); } else { if (lookup->holder() != *receiver) return; - maybe_code = StubCache::ComputeStoreNormal(extra_ic_state); + maybe_code = StubCache::ComputeStoreNormal(strict_mode); } break; } @@ -1548,13 +1579,13 @@ void StoreIC::UpdateCaches(LookupResult* lookup, AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject()); if (v8::ToCData<Address>(callback->setter()) == 0) return; maybe_code = StubCache::ComputeStoreCallback( - *name, *receiver, callback, extra_ic_state); + *name, *receiver, callback, strict_mode); break; } case INTERCEPTOR: { ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined()); maybe_code = StubCache::ComputeStoreInterceptor( - *name, *receiver, extra_ic_state); + *name, *receiver, strict_mode); break; } default: @@ -1571,7 +1602,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup, } else if (state == MONOMORPHIC) { // Only move to megamorphic if the target changes. if (target() != Code::cast(code)) { - set_target(extra_ic_state == kStoreICStrict + set_target((strict_mode == kStrictMode) ? megamorphic_stub_strict() : megamorphic_stub()); } @@ -1587,6 +1618,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup, MaybeObject* KeyedStoreIC::Store(State state, + StrictModeFlag strict_mode, Handle<Object> object, Handle<Object> key, Handle<Object> value) { @@ -1607,7 +1639,7 @@ MaybeObject* KeyedStoreIC::Store(State state, uint32_t index; if (name->AsArrayIndex(&index)) { HandleScope scope; - Handle<Object> result = SetElement(receiver, index, value); + Handle<Object> result = SetElement(receiver, index, value, strict_mode); if (result.is_null()) return Failure::Exception(); return *value; } @@ -1618,11 +1650,11 @@ MaybeObject* KeyedStoreIC::Store(State state, // Update inline cache and stub cache. if (FLAG_use_ic) { - UpdateCaches(&lookup, state, receiver, name, value); + UpdateCaches(&lookup, state, strict_mode, receiver, name, value); } // Set the property. - return receiver->SetProperty(*name, *value, NONE); + return receiver->SetProperty(*name, *value, NONE, strict_mode); } // Do not use ICs for objects that require access checks (including @@ -1631,32 +1663,41 @@ MaybeObject* KeyedStoreIC::Store(State state, ASSERT(!(use_ic && object->IsJSGlobalProxy())); if (use_ic) { - Code* stub = generic_stub(); - if (object->IsJSObject()) { - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - if (receiver->HasExternalArrayElements()) { - MaybeObject* probe = - StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true); - stub = - probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked()); - } else if (state == UNINITIALIZED && - key->IsSmi() && - receiver->map()->has_fast_elements()) { - MaybeObject* probe = StubCache::ComputeKeyedStoreSpecialized(*receiver); - stub = - probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked()); + Code* stub = + (strict_mode == kStrictMode) ? generic_stub_strict() : generic_stub(); + if (state == UNINITIALIZED) { + if (object->IsJSObject()) { + Handle<JSObject> receiver = Handle<JSObject>::cast(object); + if (receiver->HasExternalArrayElements()) { + MaybeObject* probe = + StubCache::ComputeKeyedLoadOrStoreExternalArray( + *receiver, true, strict_mode); + stub = probe->IsFailure() ? + NULL : Code::cast(probe->ToObjectUnchecked()); + } else if (receiver->HasPixelElements()) { + MaybeObject* probe = + StubCache::ComputeKeyedStorePixelArray(*receiver, strict_mode); + stub = probe->IsFailure() ? + NULL : Code::cast(probe->ToObjectUnchecked()); + } else if (key->IsSmi() && receiver->map()->has_fast_elements()) { + MaybeObject* probe = + StubCache::ComputeKeyedStoreSpecialized(*receiver, strict_mode); + stub = probe->IsFailure() ? + NULL : Code::cast(probe->ToObjectUnchecked()); + } } } if (stub != NULL) set_target(stub); } // Set the property. - return Runtime::SetObjectProperty(object, key, value, NONE); + return Runtime::SetObjectProperty(object, key, value, NONE, strict_mode); } void KeyedStoreIC::UpdateCaches(LookupResult* lookup, State state, + StrictModeFlag strict_mode, Handle<JSObject> receiver, Handle<String> name, Handle<Object> value) { @@ -1683,8 +1724,8 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup, switch (type) { case FIELD: { - maybe_code = StubCache::ComputeKeyedStoreField(*name, *receiver, - lookup->GetFieldIndex()); + maybe_code = StubCache::ComputeKeyedStoreField( + *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode); break; } case MAP_TRANSITION: { @@ -1693,8 +1734,8 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup, ASSERT(type == MAP_TRANSITION); Handle<Map> transition(lookup->GetTransitionMap()); int index = transition->PropertyIndexFor(*name); - maybe_code = StubCache::ComputeKeyedStoreField(*name, *receiver, - index, *transition); + maybe_code = StubCache::ComputeKeyedStoreField( + *name, *receiver, index, *transition, strict_mode); break; } // fall through. @@ -1702,7 +1743,9 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup, default: { // Always rewrite to the generic case so that we do not // repeatedly try to rewrite. - maybe_code = generic_stub(); + maybe_code = (strict_mode == kStrictMode) + ? generic_stub_strict() + : generic_stub(); break; } } @@ -1717,7 +1760,9 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup, if (state == UNINITIALIZED || state == PREMONOMORPHIC) { set_target(Code::cast(code)); } else if (state == MONOMORPHIC) { - set_target(megamorphic_stub()); + set_target((strict_mode == kStrictMode) + ? megamorphic_stub_strict() + : megamorphic_stub()); } #ifdef DEBUG @@ -1818,8 +1863,11 @@ MUST_USE_RESULT MaybeObject* StoreIC_Miss(Arguments args) { StoreIC ic; IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); - return ic.Store(state, extra_ic_state, args.at<Object>(0), - args.at<String>(1), args.at<Object>(2)); + return ic.Store(state, + static_cast<StrictModeFlag>(extra_ic_state & kStrictMode), + args.at<Object>(0), + args.at<String>(1), + args.at<Object>(2)); } @@ -1883,7 +1931,11 @@ MUST_USE_RESULT MaybeObject* KeyedStoreIC_Miss(Arguments args) { ASSERT(args.length() == 3); KeyedStoreIC ic; IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - return ic.Store(state, args.at<Object>(0), args.at<Object>(1), + Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); + return ic.Store(state, + static_cast<StrictModeFlag>(extra_ic_state & kStrictMode), + args.at<Object>(0), + args.at<Object>(1), args.at<Object>(2)); } @@ -224,7 +224,7 @@ class CallICBase: public IC { // Otherwise, it returns the undefined value. Object* TryCallAsFunction(Object* object); - void ReceiverToObject(Handle<Object> object); + void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object); static void Clear(Address address, Code* target); friend class IC; @@ -398,16 +398,10 @@ class KeyedLoadIC: public IC { class StoreIC: public IC { public: - - enum StoreICStrictMode { - kStoreICNonStrict = kNonStrictMode, - kStoreICStrict = kStrictMode - }; - StoreIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_store_stub()); } MUST_USE_RESULT MaybeObject* Store(State state, - Code::ExtraICState extra_ic_state, + StrictModeFlag strict_mode, Handle<Object> object, Handle<String> name, Handle<Object> value); @@ -416,10 +410,11 @@ class StoreIC: public IC { static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); } static void GenerateMiss(MacroAssembler* masm); static void GenerateMegamorphic(MacroAssembler* masm, - Code::ExtraICState extra_ic_state); + StrictModeFlag strict_mode); static void GenerateArrayLength(MacroAssembler* masm); static void GenerateNormal(MacroAssembler* masm); - static void GenerateGlobalProxy(MacroAssembler* masm); + static void GenerateGlobalProxy(MacroAssembler* masm, + StrictModeFlag strict_mode); // Clear the use of an inlined version. static void ClearInlinedVersion(Address address); @@ -433,11 +428,18 @@ class StoreIC: public IC { // lookup result. void UpdateCaches(LookupResult* lookup, State state, - Code::ExtraICState extra_ic_state, + StrictModeFlag strict_mode, Handle<JSObject> receiver, Handle<String> name, Handle<Object> value); + void set_target(Code* code) { + // Strict mode must be preserved across IC patching. + ASSERT((code->extra_ic_state() & kStrictMode) == + (target()->extra_ic_state() & kStrictMode)); + IC::set_target(code); + } + // Stub accessors. static Code* megamorphic_stub() { return Builtins::builtin(Builtins::StoreIC_Megamorphic); @@ -473,6 +475,7 @@ class KeyedStoreIC: public IC { KeyedStoreIC() : IC(NO_EXTRA_FRAME) { } MUST_USE_RESULT MaybeObject* Store(State state, + StrictModeFlag strict_mode, Handle<Object> object, Handle<Object> name, Handle<Object> value); @@ -480,8 +483,9 @@ class KeyedStoreIC: public IC { // Code generators for stub routines. Only called once at startup. static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); } static void GenerateMiss(MacroAssembler* masm); - static void GenerateRuntimeSetProperty(MacroAssembler* masm); - static void GenerateGeneric(MacroAssembler* masm); + static void GenerateRuntimeSetProperty(MacroAssembler* masm, + StrictModeFlag strict_mode); + static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode); // Clear the inlined version so the IC is always hit. static void ClearInlinedVersion(Address address); @@ -493,20 +497,37 @@ class KeyedStoreIC: public IC { // Update the inline cache. void UpdateCaches(LookupResult* lookup, State state, + StrictModeFlag strict_mode, Handle<JSObject> receiver, Handle<String> name, Handle<Object> value); + void set_target(Code* code) { + // Strict mode must be preserved across IC patching. + ASSERT((code->extra_ic_state() & kStrictMode) == + (target()->extra_ic_state() & kStrictMode)); + IC::set_target(code); + } + // Stub accessors. static Code* initialize_stub() { return Builtins::builtin(Builtins::KeyedStoreIC_Initialize); } + static Code* initialize_stub_strict() { + return Builtins::builtin(Builtins::KeyedStoreIC_Initialize_Strict); + } static Code* megamorphic_stub() { return Builtins::builtin(Builtins::KeyedStoreIC_Generic); } + static Code* megamorphic_stub_strict() { + return Builtins::builtin(Builtins::KeyedStoreIC_Generic_Strict); + } static Code* generic_stub() { return Builtins::builtin(Builtins::KeyedStoreIC_Generic); } + static Code* generic_stub_strict() { + return Builtins::builtin(Builtins::KeyedStoreIC_Generic_Strict); + } static void Clear(Address address, Code* target); diff --git a/src/json.js b/src/json.js index e6ada51b..7a6189cd 100644 --- a/src/json.js +++ b/src/json.js @@ -49,7 +49,7 @@ function Revive(holder, name, reviver) { } } } - return reviver.call(holder, name, val); + return %_CallFunction(holder, name, val, reviver); } function JSONParse(text, reviver) { @@ -63,11 +63,11 @@ function JSONParse(text, reviver) { function SerializeArray(value, replacer, stack, indent, gap) { if (!%PushIfAbsent(stack, value)) { - throw MakeTypeError('circular_structure', []); + throw MakeTypeError('circular_structure', $Array()); } var stepback = indent; indent += gap; - var partial = []; + var partial = new InternalArray(); var len = value.length; for (var i = 0; i < len; i++) { var strP = JSONSerialize($String(i), value, replacer, stack, @@ -93,11 +93,11 @@ function SerializeArray(value, replacer, stack, indent, gap) { function SerializeObject(value, replacer, stack, indent, gap) { if (!%PushIfAbsent(stack, value)) { - throw MakeTypeError('circular_structure', []); + throw MakeTypeError('circular_structure', $Array()); } var stepback = indent; indent += gap; - var partial = []; + var partial = new InternalArray(); if (IS_ARRAY(replacer)) { var length = replacer.length; for (var i = 0; i < length; i++) { @@ -185,7 +185,7 @@ function BasicSerializeArray(value, stack, builder) { return; } if (!%PushIfAbsent(stack, value)) { - throw MakeTypeError('circular_structure', []); + throw MakeTypeError('circular_structure', $Array()); } builder.push("["); var val = value[0]; @@ -238,7 +238,7 @@ function BasicSerializeArray(value, stack, builder) { function BasicSerializeObject(value, stack, builder) { if (!%PushIfAbsent(stack, value)) { - throw MakeTypeError('circular_structure', []); + throw MakeTypeError('circular_structure', $Array()); } builder.push("{"); var first = true; @@ -301,8 +301,8 @@ function BasicJSONSerialize(key, value, stack, builder) { function JSONStringify(value, replacer, space) { if (%_ArgumentsLength() == 1) { - var builder = []; - BasicJSONSerialize('', value, [], builder); + var builder = new InternalArray(); + BasicJSONSerialize('', value, new InternalArray(), builder); if (builder.length == 0) return; var result = %_FastAsciiArrayJoin(builder, ""); if (!IS_UNDEFINED(result)) return result; @@ -329,7 +329,7 @@ function JSONStringify(value, replacer, space) { } else { gap = ""; } - return JSONSerialize('', {'': value}, replacer, [], "", gap); + return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap); } function SetupJSON() { diff --git a/src/jsregexp.cc b/src/jsregexp.cc index 8e7c35f5..b271b027 100644 --- a/src/jsregexp.cc +++ b/src/jsregexp.cc @@ -97,9 +97,10 @@ static inline void ThrowRegExpException(Handle<JSRegExp> re, Handle<String> pattern, Handle<String> error_text, const char* message) { - Handle<JSArray> array = Factory::NewJSArray(2); - SetElement(array, 0, pattern); - SetElement(array, 1, error_text); + Handle<FixedArray> elements = Factory::NewFixedArray(2); + elements->set(0, *pattern); + elements->set(1, *error_text); + Handle<JSArray> array = Factory::NewJSArrayWithElements(elements); Handle<Object> regexp_err = Factory::NewSyntaxError(message, array); Top::Throw(*regexp_err); } @@ -325,11 +326,12 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) { is_ascii); if (result.error_message != NULL) { // Unable to compile regexp. - Handle<JSArray> array = Factory::NewJSArray(2); - SetElement(array, 0, pattern); - SetElement(array, - 1, - Factory::NewStringFromUtf8(CStrVector(result.error_message))); + Handle<FixedArray> elements = Factory::NewFixedArray(2); + elements->set(0, *pattern); + Handle<String> error_message = + Factory::NewStringFromUtf8(CStrVector(result.error_message)); + elements->set(1, *error_message); + Handle<JSArray> array = Factory::NewJSArrayWithElements(elements); Handle<Object> regexp_err = Factory::NewSyntaxError("malformed_regexp", array); Top::Throw(*regexp_err); diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc index 9f5f1b97..5755bb2d 100644 --- a/src/lithium-allocator.cc +++ b/src/lithium-allocator.cc @@ -478,11 +478,6 @@ void LiveRange::ConvertOperands() { } -UsePosition* LiveRange::AddUsePosition(LifetimePosition pos) { - return AddUsePosition(pos, CreateAssignedOperand()); -} - - bool LiveRange::CanCover(LifetimePosition position) const { if (IsEmpty()) return false; return Start().Value() <= position.Value() && @@ -1098,6 +1093,21 @@ void LAllocator::ResolveControlFlow(LiveRange* range, } else { ASSERT(pred->end()->SecondSuccessor() == NULL); gap = GetLastGap(pred); + + // We are going to insert a move before the branch instruction. + // Some branch instructions (e.g. loops' back edges) + // can potentially cause a GC so they have a pointer map. + // By insterting a move we essentially create a copy of a + // value which is invisible to PopulatePointerMaps(), because we store + // it into a location different from the operand of a live range + // covering a branch instruction. + // Thus we need to manually record a pointer. + if (HasTaggedValue(range->id())) { + LInstruction* branch = InstructionAt(pred->last_instruction_index()); + if (branch->HasPointerMap()) { + branch->pointer_map()->RecordPointer(cur_op); + } + } } gap->GetOrCreateParallelMove(LGap::START)->AddMove(pred_op, cur_op); } @@ -1264,7 +1274,7 @@ void LAllocator::BuildLiveRanges() { found = true; int operand_index = iterator.Current(); PrintF("Function: %s\n", - *graph_->info()->function()->debug_name()->ToCString()); + *chunk_->info()->function()->debug_name()->ToCString()); PrintF("Value %d used before first definition!\n", operand_index); LiveRange* range = LiveRangeFor(operand_index); PrintF("First use is at %d\n", range->first_pos()->pos().Value()); diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h index 914a5b68..d53ea787 100644 --- a/src/lithium-allocator.h +++ b/src/lithium-allocator.h @@ -286,7 +286,6 @@ class LiveRange: public ZoneObject { LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; } LiveRange* next() const { return next_; } bool IsChild() const { return parent() != NULL; } - bool IsParent() const { return parent() == NULL; } int id() const { return id_; } bool IsFixed() const { return id_ < 0; } bool IsEmpty() const { return first_interval() == NULL; } @@ -360,7 +359,6 @@ class LiveRange: public ZoneObject { void EnsureInterval(LifetimePosition start, LifetimePosition end); void AddUseInterval(LifetimePosition start, LifetimePosition end); UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand); - UsePosition* AddUsePosition(LifetimePosition pos); // Shorten the most recently added interval by setting a new start. void ShortenTo(LifetimePosition start); diff --git a/src/lithium.h b/src/lithium.h index a2f9df0f..d85a87c1 100644 --- a/src/lithium.h +++ b/src/lithium.h @@ -536,10 +536,12 @@ class ShallowIterator BASE_EMBEDDED { inline LEnvironment* env() { return env_; } private: + inline bool ShouldSkip(LOperand* op) { + return op == NULL || op->IsConstantOperand() || op->IsArgument(); + } + inline int AdvanceToNext(int start) { - while (start < limit_ && - (env_->values()->at(start) == NULL || - env_->values()->at(start)->IsConstantOperand())) { + while (start < limit_ && ShouldSkip(env_->values()->at(start))) { start++; } return start; diff --git a/src/liveedit.cc b/src/liveedit.cc index a395c511..744ed49d 100644 --- a/src/liveedit.cc +++ b/src/liveedit.cc @@ -286,11 +286,18 @@ class CompareOutputArrayWriter { } void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) { - SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1))); - SetElement(array_, current_size_ + 1, - Handle<Object>(Smi::FromInt(char_pos1 + char_len1))); - SetElement(array_, current_size_ + 2, - Handle<Object>(Smi::FromInt(char_pos2 + char_len2))); + SetElement(array_, + current_size_, + Handle<Object>(Smi::FromInt(char_pos1)), + kNonStrictMode); + SetElement(array_, + current_size_ + 1, + Handle<Object>(Smi::FromInt(char_pos1 + char_len1)), + kNonStrictMode); + SetElement(array_, + current_size_ + 2, + Handle<Object>(Smi::FromInt(char_pos2 + char_len2)), + kNonStrictMode); current_size_ += 3; } @@ -545,10 +552,13 @@ class JSArrayBasedStruct { protected: void SetField(int field_position, Handle<Object> value) { - SetElement(array_, field_position, value); + SetElement(array_, field_position, value, kNonStrictMode); } void SetSmiValueField(int field_position, int value) { - SetElement(array_, field_position, Handle<Smi>(Smi::FromInt(value))); + SetElement(array_, + field_position, + Handle<Smi>(Smi::FromInt(value)), + kNonStrictMode); } Object* GetField(int field_position) { return array_->GetElementNoExceptionThrown(field_position); @@ -687,7 +697,7 @@ class FunctionInfoListener { fun->end_position(), fun->num_parameters(), current_parent_index_); current_parent_index_ = len_; - SetElement(result_, len_, info.GetJSArray()); + SetElement(result_, len_, info.GetJSArray(), kNonStrictMode); len_++; } @@ -767,14 +777,16 @@ class FunctionInfoListener { list[k] = list[l]; } for (int i = 0; i < j; i++) { - SetElement(scope_info_list, scope_info_length, list[i]->name()); + SetElement(scope_info_list, scope_info_length, + list[i]->name(), kNonStrictMode); scope_info_length++; SetElement(scope_info_list, scope_info_length, - Handle<Smi>(Smi::FromInt(list[i]->AsSlot()->index()))); + Handle<Smi>(Smi::FromInt(list[i]->AsSlot()->index())), + kNonStrictMode); scope_info_length++; } SetElement(scope_info_list, scope_info_length, - Handle<Object>(Heap::null_value())); + Handle<Object>(Heap::null_value()), kNonStrictMode); scope_info_length++; outer_scope = outer_scope->outer_scope(); @@ -817,7 +829,7 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) { Handle<String> name_handle(String::cast(info->name())); info_wrapper.SetProperties(name_handle, info->start_position(), info->end_position(), info); - SetElement(array, i, info_wrapper.GetJSArray()); + SetElement(array, i, info_wrapper.GetJSArray(), kNonStrictMode); } } @@ -1315,7 +1327,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array, SharedFunctionInfo::cast(wrapper->value())); if (function->shared() == *shared || IsInlined(*function, *shared)) { - SetElement(result, i, Handle<Smi>(Smi::FromInt(status))); + SetElement(result, i, Handle<Smi>(Smi::FromInt(status)), kNonStrictMode); return true; } } @@ -1520,7 +1532,7 @@ static const char* DropActivationsInActiveThread( Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) { Handle<Object> replaced( Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK)); - SetElement(result, i, replaced); + SetElement(result, i, replaced, kNonStrictMode); } } return NULL; @@ -1561,7 +1573,8 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations( // Fill the default values. for (int i = 0; i < len; i++) { SetElement(result, i, - Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH))); + Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH)), + kNonStrictMode); } @@ -1580,7 +1593,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations( // Add error message as an array extra element. Vector<const char> vector_message(error_message, StrLength(error_message)); Handle<String> str = Factory::NewStringFromAscii(vector_message); - SetElement(result, len, str); + SetElement(result, len, str, kNonStrictMode); } return result; } diff --git a/src/liveobjectlist-inl.h b/src/liveobjectlist-inl.h index 997da4ee..f742de3a 100644 --- a/src/liveobjectlist-inl.h +++ b/src/liveobjectlist-inl.h @@ -32,5 +32,95 @@ #include "liveobjectlist.h" +namespace v8 { +namespace internal { + +#ifdef LIVE_OBJECT_LIST + +void LiveObjectList::GCEpilogue() { + if (!NeedLOLProcessing()) return; + GCEpiloguePrivate(); +} + + +void LiveObjectList::GCPrologue() { + if (!NeedLOLProcessing()) return; +#ifdef VERIFY_LOL + if (FLAG_verify_lol) { + Verify(); + } +#endif +} + + +void LiveObjectList::IterateElements(ObjectVisitor* v) { + if (!NeedLOLProcessing()) return; + IterateElementsPrivate(v); +} + + +void LiveObjectList::ProcessNonLive(HeapObject *obj) { + // Only do work if we have at least one list to process. + if (last()) DoProcessNonLive(obj); +} + + +void LiveObjectList::UpdateReferencesForScavengeGC() { + if (LiveObjectList::NeedLOLProcessing()) { + UpdateLiveObjectListVisitor update_visitor; + LiveObjectList::IterateElements(&update_visitor); + } +} + + +LiveObjectList* LiveObjectList::FindLolForId(int id, + LiveObjectList* start_lol) { + if (id != 0) { + LiveObjectList* lol = start_lol; + while (lol != NULL) { + if (lol->id() == id) { + return lol; + } + lol = lol->prev_; + } + } + return NULL; +} + + +// Iterates the elements in every lol and returns the one that matches the +// specified key. If no matching element is found, then it returns NULL. +template <typename T> +inline LiveObjectList::Element* +LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) { + LiveObjectList *lol = last(); + while (lol != NULL) { + Element* elements = lol->elements_; + for (int i = 0; i < lol->obj_count_; i++) { + Element* element = &elements[i]; + if (GetValue(element) == key) { + return element; + } + } + lol = lol->prev_; + } + return NULL; +} + + +inline int LiveObjectList::GetElementId(LiveObjectList::Element* element) { + return element->id_; +} + + +inline HeapObject* +LiveObjectList::GetElementObj(LiveObjectList::Element* element) { + return element->obj_; +} + +#endif // LIVE_OBJECT_LIST + +} } // namespace v8::internal + #endif // V8_LIVEOBJECTLIST_INL_H_ diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc index 28a3d6d6..5795a6b0 100644 --- a/src/liveobjectlist.cc +++ b/src/liveobjectlist.cc @@ -37,7 +37,7 @@ #include "heap.h" #include "inspector.h" #include "list-inl.h" -#include "liveobjectlist.h" +#include "liveobjectlist-inl.h" #include "string-stream.h" #include "top.h" #include "v8utils.h" @@ -46,6 +46,2542 @@ namespace v8 { namespace internal { +typedef int (*RawComparer)(const void*, const void*); + + +#ifdef CHECK_ALL_OBJECT_TYPES + +#define DEBUG_LIVE_OBJECT_TYPES(v) \ + v(Smi, "unexpected: Smi") \ + \ + v(CodeCache, "unexpected: CodeCache") \ + v(BreakPointInfo, "unexpected: BreakPointInfo") \ + v(DebugInfo, "unexpected: DebugInfo") \ + v(TypeSwitchInfo, "unexpected: TypeSwitchInfo") \ + v(SignatureInfo, "unexpected: SignatureInfo") \ + v(Script, "unexpected: Script") \ + v(ObjectTemplateInfo, "unexpected: ObjectTemplateInfo") \ + v(FunctionTemplateInfo, "unexpected: FunctionTemplateInfo") \ + v(CallHandlerInfo, "unexpected: CallHandlerInfo") \ + v(InterceptorInfo, "unexpected: InterceptorInfo") \ + v(AccessCheckInfo, "unexpected: AccessCheckInfo") \ + v(AccessorInfo, "unexpected: AccessorInfo") \ + v(ExternalTwoByteString, "unexpected: ExternalTwoByteString") \ + v(ExternalAsciiString, "unexpected: ExternalAsciiString") \ + v(ExternalString, "unexpected: ExternalString") \ + v(SeqTwoByteString, "unexpected: SeqTwoByteString") \ + v(SeqAsciiString, "unexpected: SeqAsciiString") \ + v(SeqString, "unexpected: SeqString") \ + v(JSFunctionResultCache, "unexpected: JSFunctionResultCache") \ + v(GlobalContext, "unexpected: GlobalContext") \ + v(MapCache, "unexpected: MapCache") \ + v(CodeCacheHashTable, "unexpected: CodeCacheHashTable") \ + v(CompilationCacheTable, "unexpected: CompilationCacheTable") \ + v(SymbolTable, "unexpected: SymbolTable") \ + v(Dictionary, "unexpected: Dictionary") \ + v(HashTable, "unexpected: HashTable") \ + v(DescriptorArray, "unexpected: DescriptorArray") \ + v(ExternalFloatArray, "unexpected: ExternalFloatArray") \ + v(ExternalUnsignedIntArray, "unexpected: ExternalUnsignedIntArray") \ + v(ExternalIntArray, "unexpected: ExternalIntArray") \ + v(ExternalUnsignedShortArray, "unexpected: ExternalUnsignedShortArray") \ + v(ExternalShortArray, "unexpected: ExternalShortArray") \ + v(ExternalUnsignedByteArray, "unexpected: ExternalUnsignedByteArray") \ + v(ExternalByteArray, "unexpected: ExternalByteArray") \ + v(JSValue, "unexpected: JSValue") + +#else +#define DEBUG_LIVE_OBJECT_TYPES(v) +#endif + + +#define FOR_EACH_LIVE_OBJECT_TYPE(v) \ + DEBUG_LIVE_OBJECT_TYPES(v) \ + \ + v(JSArray, "JSArray") \ + v(JSRegExp, "JSRegExp") \ + v(JSFunction, "JSFunction") \ + v(JSGlobalObject, "JSGlobal") \ + v(JSBuiltinsObject, "JSBuiltins") \ + v(GlobalObject, "Global") \ + v(JSGlobalProxy, "JSGlobalProxy") \ + v(JSObject, "JSObject") \ + \ + v(Context, "meta: Context") \ + v(ByteArray, "meta: ByteArray") \ + v(PixelArray, "meta: PixelArray") \ + v(ExternalArray, "meta: ExternalArray") \ + v(FixedArray, "meta: FixedArray") \ + v(String, "String") \ + v(HeapNumber, "HeapNumber") \ + \ + v(Code, "meta: Code") \ + v(Map, "meta: Map") \ + v(Oddball, "Oddball") \ + v(Proxy, "meta: Proxy") \ + v(SharedFunctionInfo, "meta: SharedFunctionInfo") \ + v(Struct, "meta: Struct") \ + \ + v(HeapObject, "HeapObject") + + +enum /* LiveObjectType */ { +#define DECLARE_OBJECT_TYPE_ENUM(type, name) kType##type, + FOR_EACH_LIVE_OBJECT_TYPE(DECLARE_OBJECT_TYPE_ENUM) + kInvalidLiveObjType, + kNumberOfTypes +#undef DECLARE_OBJECT_TYPE_ENUM +}; + + +LiveObjectType GetObjectType(HeapObject* heap_obj) { + // TODO(mlam): investigate usint Map::instance_type() instead. +#define CHECK_FOR_OBJECT_TYPE(type, name) \ + if (heap_obj->Is##type()) return kType##type; + FOR_EACH_LIVE_OBJECT_TYPE(CHECK_FOR_OBJECT_TYPE) +#undef CHECK_FOR_OBJECT_TYPE + + UNREACHABLE(); + return kInvalidLiveObjType; +} + + +inline const char* GetObjectTypeDesc(LiveObjectType type) { + static const char* const name[kNumberOfTypes] = { + #define DEFINE_OBJECT_TYPE_NAME(type, name) name, + FOR_EACH_LIVE_OBJECT_TYPE(DEFINE_OBJECT_TYPE_NAME) + "invalid" + #undef DEFINE_OBJECT_TYPE_NAME + }; + ASSERT(type < kNumberOfTypes); + return name[type]; +} + + +const char* GetObjectTypeDesc(HeapObject* heap_obj) { + LiveObjectType type = GetObjectType(heap_obj); + return GetObjectTypeDesc(type); +} + + +bool IsOfType(LiveObjectType type, HeapObject *obj) { + // Note: there are types that are more general (e.g. JSObject) that would + // have passed the Is##type_() test for more specialized types (e.g. + // JSFunction). If we find a more specialized match but we're looking for + // the general type, then we should reject the ones that matches the + // specialized type. +#define CHECK_OBJECT_TYPE(type_, name) \ + if (obj->Is##type_()) return (type == kType##type_); + + FOR_EACH_LIVE_OBJECT_TYPE(CHECK_OBJECT_TYPE) +#undef CHECK_OBJECT_TYPE + + return false; +} + + +const AllocationSpace kInvalidSpace = static_cast<AllocationSpace>(-1); + +static AllocationSpace FindSpaceFor(String* space_str) { + SmartPointer<char> s = + space_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + + const char* key_str = *s; + switch (key_str[0]) { + case 'c': + if (strcmp(key_str, "cell") == 0) return CELL_SPACE; + if (strcmp(key_str, "code") == 0) return CODE_SPACE; + break; + case 'l': + if (strcmp(key_str, "lo") == 0) return LO_SPACE; + break; + case 'm': + if (strcmp(key_str, "map") == 0) return MAP_SPACE; + break; + case 'n': + if (strcmp(key_str, "new") == 0) return NEW_SPACE; + break; + case 'o': + if (strcmp(key_str, "old-pointer") == 0) return OLD_POINTER_SPACE; + if (strcmp(key_str, "old-data") == 0) return OLD_DATA_SPACE; + break; + } + return kInvalidSpace; +} + + +static bool InSpace(AllocationSpace space, HeapObject *heap_obj) { + if (space != LO_SPACE) { + return Heap::InSpace(heap_obj, space); + } + + // This is an optimization to speed up the check for an object in the LO + // space by exclusion because we know that all object pointers passed in + // here are guaranteed to be in the heap. Hence, it is safe to infer + // using an exclusion test. + // Note: calling Heap::InSpace(heap_obj, LO_SPACE) is too slow for our + // filters. + int first_space = static_cast<int>(FIRST_SPACE); + int last_space = static_cast<int>(LO_SPACE); + for (int sp = first_space; sp < last_space; sp++) { + if (Heap::InSpace(heap_obj, static_cast<AllocationSpace>(sp))) { + return false; + } + } + SLOW_ASSERT(Heap::InSpace(heap_obj, LO_SPACE)); + return true; +} + + +static LiveObjectType FindTypeFor(String* type_str) { + SmartPointer<char> s = + type_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + +#define CHECK_OBJECT_TYPE(type_, name) { \ + const char* type_desc = GetObjectTypeDesc(kType##type_); \ + const char* key_str = *s; \ + if (strstr(type_desc, key_str) != NULL) return kType##type_; \ + } + FOR_EACH_LIVE_OBJECT_TYPE(CHECK_OBJECT_TYPE) +#undef CHECK_OBJECT_TYPE + + return kInvalidLiveObjType; +} + + +class LolFilter { + public: + explicit LolFilter(Handle<JSObject> filter_obj); + + inline bool is_active() const { return is_active_; } + inline bool Matches(HeapObject* obj) { + return !is_active() || MatchesSlow(obj); + } + + private: + void InitTypeFilter(Handle<JSObject> filter_obj); + void InitSpaceFilter(Handle<JSObject> filter_obj); + void InitPropertyFilter(Handle<JSObject> filter_obj); + bool MatchesSlow(HeapObject* obj); + + bool is_active_; + LiveObjectType type_; + AllocationSpace space_; + Handle<String> prop_; +}; + + +LolFilter::LolFilter(Handle<JSObject> filter_obj) + : is_active_(false), + type_(kInvalidLiveObjType), + space_(kInvalidSpace), + prop_() { + if (filter_obj.is_null()) return; + + InitTypeFilter(filter_obj); + InitSpaceFilter(filter_obj); + InitPropertyFilter(filter_obj); +} + + +void LolFilter::InitTypeFilter(Handle<JSObject> filter_obj) { + Handle<String> type_sym = Factory::LookupAsciiSymbol("type"); + MaybeObject* maybe_result = filter_obj->GetProperty(*type_sym); + Object* type_obj; + if (maybe_result->ToObject(&type_obj)) { + if (type_obj->IsString()) { + String* type_str = String::cast(type_obj); + type_ = FindTypeFor(type_str); + if (type_ != kInvalidLiveObjType) { + is_active_ = true; + } + } + } +} + + +void LolFilter::InitSpaceFilter(Handle<JSObject> filter_obj) { + Handle<String> space_sym = Factory::LookupAsciiSymbol("space"); + MaybeObject* maybe_result = filter_obj->GetProperty(*space_sym); + Object* space_obj; + if (maybe_result->ToObject(&space_obj)) { + if (space_obj->IsString()) { + String* space_str = String::cast(space_obj); + space_ = FindSpaceFor(space_str); + if (space_ != kInvalidSpace) { + is_active_ = true; + } + } + } +} + + +void LolFilter::InitPropertyFilter(Handle<JSObject> filter_obj) { + Handle<String> prop_sym = Factory::LookupAsciiSymbol("prop"); + MaybeObject* maybe_result = filter_obj->GetProperty(*prop_sym); + Object* prop_obj; + if (maybe_result->ToObject(&prop_obj)) { + if (prop_obj->IsString()) { + prop_ = Handle<String>(String::cast(prop_obj)); + is_active_ = true; + } + } +} + + +bool LolFilter::MatchesSlow(HeapObject* obj) { + if ((type_ != kInvalidLiveObjType) && !IsOfType(type_, obj)) { + return false; // Fail because obj is not of the type of interest. + } + if ((space_ != kInvalidSpace) && !InSpace(space_, obj)) { + return false; // Fail because obj is not in the space of interest. + } + if (!prop_.is_null() && obj->IsJSObject()) { + LookupResult result; + obj->Lookup(*prop_, &result); + if (!result.IsProperty()) { + return false; // Fail because obj does not have the property of interest. + } + } + return true; +} + + +class LolIterator { + public: + LolIterator(LiveObjectList* older, LiveObjectList* newer) + : older_(older), + newer_(newer), + curr_(0), + elements_(0), + count_(0), + index_(0) { } + + inline void Init() { + SetCurrent(newer_); + // If the elements_ list is empty, then move on to the next list as long + // as we're not at the last list (indicated by done()). + while ((elements_ == NULL) && !Done()) { + SetCurrent(curr_->prev_); + } + } + + inline bool Done() const { + return (curr_ == older_); + } + + // Object level iteration. + inline void Next() { + index_++; + if (index_ >= count_) { + // Iterate backwards until we get to the oldest list. + while (!Done()) { + SetCurrent(curr_->prev_); + // If we have elements to process, we're good to go. + if (elements_ != NULL) break; + + // Else, we should advance to the next older list. + } + } + } + + inline int Id() const { + return elements_[index_].id_; + } + inline HeapObject* Obj() const { + return elements_[index_].obj_; + } + + inline int LolObjCount() const { + if (curr_ != NULL) return curr_->obj_count_; + return 0; + } + + protected: + inline void SetCurrent(LiveObjectList* new_curr) { + curr_ = new_curr; + if (curr_ != NULL) { + elements_ = curr_->elements_; + count_ = curr_->obj_count_; + index_ = 0; + } + } + + LiveObjectList* older_; + LiveObjectList* newer_; + LiveObjectList* curr_; + LiveObjectList::Element* elements_; + int count_; + int index_; +}; + + +class LolForwardIterator : public LolIterator { + public: + LolForwardIterator(LiveObjectList* first, LiveObjectList* last) + : LolIterator(first, last) { + } + + inline void Init() { + SetCurrent(older_); + // If the elements_ list is empty, then move on to the next list as long + // as we're not at the last list (indicated by Done()). + while ((elements_ == NULL) && !Done()) { + SetCurrent(curr_->next_); + } + } + + inline bool Done() const { + return (curr_ == newer_); + } + + // Object level iteration. + inline void Next() { + index_++; + if (index_ >= count_) { + // Done with current list. Move on to the next. + while (!Done()) { // If not at the last list already, ... + SetCurrent(curr_->next_); + // If we have elements to process, we're good to go. + if (elements_ != NULL) break; + + // Else, we should advance to the next list. + } + } + } +}; + + +// Minimizes the white space in a string. Tabs and newlines are replaced +// with a space where appropriate. +static int CompactString(char* str) { + char* src = str; + char* dst = str; + char prev_ch = 0; + while (*dst != '\0') { + char ch = *src++; + // We will treat non-ascii chars as '?'. + if ((ch & 0x80) != 0) { + ch = '?'; + } + // Compact contiguous whitespace chars into a single ' '. + if (isspace(ch)) { + if (prev_ch != ' ') *dst++ = ' '; + prev_ch = ' '; + continue; + } + *dst++ = ch; + prev_ch = ch; + } + return (dst - str); +} + + +// Generates a custom description based on the specific type of +// object we're looking at. We only generate specialized +// descriptions where we can. In all other cases, we emit the +// generic info. +static void GenerateObjectDesc(HeapObject* obj, + char* buffer, + int buffer_size) { + Vector<char> buffer_v(buffer, buffer_size); + ASSERT(obj != NULL); + if (obj->IsJSArray()) { + JSArray* jsarray = JSArray::cast(obj); + double length = jsarray->length()->Number(); + OS::SNPrintF(buffer_v, + "%p <%s> len %g", + reinterpret_cast<void*>(obj), + GetObjectTypeDesc(obj), + length); + + } else if (obj->IsString()) { + String *str = String::cast(obj); + // Only grab up to 160 chars in case they are double byte. + // We'll only dump 80 of them after we compact them. + const int kMaxCharToDump = 80; + const int kMaxBufferSize = kMaxCharToDump * 2; + SmartPointer<char> str_sp = str->ToCString(DISALLOW_NULLS, + ROBUST_STRING_TRAVERSAL, + 0, + kMaxBufferSize); + char* str_cstr = *str_sp; + int length = CompactString(str_cstr); + OS::SNPrintF(buffer_v, + "%p <%s> '%.80s%s'", + reinterpret_cast<void*>(obj), + GetObjectTypeDesc(obj), + str_cstr, + (length > kMaxCharToDump) ? "..." : ""); + + } else if (obj->IsJSFunction() || obj->IsSharedFunctionInfo()) { + SharedFunctionInfo* sinfo; + if (obj->IsJSFunction()) { + JSFunction* func = JSFunction::cast(obj); + sinfo = func->shared(); + } else { + sinfo = SharedFunctionInfo::cast(obj); + } + + String* name = sinfo->DebugName(); + SmartPointer<char> name_sp = + name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + char* name_cstr = *name_sp; + + HeapStringAllocator string_allocator; + StringStream stream(&string_allocator); + sinfo->SourceCodePrint(&stream, 50); + SmartPointer<const char> source_sp = stream.ToCString(); + const char* source_cstr = *source_sp; + + OS::SNPrintF(buffer_v, + "%p <%s> '%s' %s", + reinterpret_cast<void*>(obj), + GetObjectTypeDesc(obj), + name_cstr, + source_cstr); + + } else if (obj->IsFixedArray()) { + FixedArray* fixed = FixedArray::cast(obj); + + OS::SNPrintF(buffer_v, + "%p <%s> len %d", + reinterpret_cast<void*>(obj), + GetObjectTypeDesc(obj), + fixed->length()); + + } else { + OS::SNPrintF(buffer_v, + "%p <%s>", + reinterpret_cast<void*>(obj), + GetObjectTypeDesc(obj)); + } +} + + +// Utility function for filling in a line of detail in a verbose dump. +static bool AddObjDetail(Handle<FixedArray> arr, + int index, + int obj_id, + Handle<HeapObject> target, + const char* desc_str, + Handle<String> id_sym, + Handle<String> desc_sym, + Handle<String> size_sym, + Handle<JSObject> detail, + Handle<String> desc, + Handle<Object> error) { + detail = Factory::NewJSObject(Top::object_function()); + if (detail->IsFailure()) { + error = detail; + return false; + } + + int size = 0; + char buffer[512]; + if (desc_str == NULL) { + ASSERT(!target.is_null()); + HeapObject* obj = *target; + GenerateObjectDesc(obj, buffer, sizeof(buffer)); + desc_str = buffer; + size = obj->Size(); + } + desc = Factory::NewStringFromAscii(CStrVector(desc_str)); + if (desc->IsFailure()) { + error = desc; + return false; + } + + { MaybeObject* maybe_result = detail->SetProperty(*id_sym, + Smi::FromInt(obj_id), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return false; + } + { MaybeObject* maybe_result = detail->SetProperty(*desc_sym, + *desc, + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return false; + } + { MaybeObject* maybe_result = detail->SetProperty(*size_sym, + Smi::FromInt(size), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return false; + } + + arr->set(index, *detail); + return true; +} + + +class DumpWriter { + public: + virtual ~DumpWriter() {} + + virtual void ComputeTotalCountAndSize(LolFilter* filter, + int* count, + int* size) = 0; + virtual bool Write(Handle<FixedArray> elements_arr, + int start, + int dump_limit, + LolFilter* filter, + Handle<Object> error) = 0; +}; + + +class LolDumpWriter: public DumpWriter { + public: + LolDumpWriter(LiveObjectList* older, LiveObjectList* newer) + : older_(older), newer_(newer) { + } + + void ComputeTotalCountAndSize(LolFilter* filter, int* count, int* size) { + *count = 0; + *size = 0; + + LolIterator it(older_, newer_); + for (it.Init(); !it.Done(); it.Next()) { + HeapObject* heap_obj = it.Obj(); + if (!filter->Matches(heap_obj)) { + continue; + } + + *size += heap_obj->Size(); + (*count)++; + } + } + + bool Write(Handle<FixedArray> elements_arr, + int start, + int dump_limit, + LolFilter* filter, + Handle<Object> error) { + // The lols are listed in latest to earliest. We want to dump from + // earliest to latest. So, compute the last element to start with. + int index = 0; + int count = 0; + + // Prefetch some needed symbols. + Handle<String> id_sym = Factory::LookupAsciiSymbol("id"); + Handle<String> desc_sym = Factory::LookupAsciiSymbol("desc"); + Handle<String> size_sym = Factory::LookupAsciiSymbol("size"); + + // Fill the array with the lol object details. + Handle<JSObject> detail; + Handle<String> desc; + Handle<HeapObject> target; + + LiveObjectList* first_lol = (older_ != NULL) ? + older_->next_ : LiveObjectList::first_; + LiveObjectList* last_lol = (newer_ != NULL) ? newer_->next_ : NULL; + + LolForwardIterator it(first_lol, last_lol); + for (it.Init(); !it.Done() && (index < dump_limit); it.Next()) { + HeapObject* heap_obj = it.Obj(); + + // Skip objects that have been filtered out. + if (!filter->Matches(heap_obj)) { + continue; + } + + // Only report objects that are in the section of interest. + if (count >= start) { + target = Handle<HeapObject>(heap_obj); + bool success = AddObjDetail(elements_arr, + index++, + it.Id(), + target, + NULL, + id_sym, + desc_sym, + size_sym, + detail, + desc, + error); + if (!success) return false; + } + count++; + } + return true; + } + + private: + LiveObjectList* older_; + LiveObjectList* newer_; +}; + + +class RetainersDumpWriter: public DumpWriter { + public: + RetainersDumpWriter(Handle<HeapObject> target, + Handle<JSObject> instance_filter, + Handle<JSFunction> args_function) + : target_(target), + instance_filter_(instance_filter), + args_function_(args_function) { + } + + void ComputeTotalCountAndSize(LolFilter* filter, int* count, int* size) { + Handle<FixedArray> retainers_arr; + Handle<Object> error; + + *size = -1; + LiveObjectList::GetRetainers(target_, + instance_filter_, + retainers_arr, + 0, + Smi::kMaxValue, + count, + filter, + NULL, + *args_function_, + error); + } + + bool Write(Handle<FixedArray> elements_arr, + int start, + int dump_limit, + LolFilter* filter, + Handle<Object> error) { + int dummy; + int count; + + // Fill the retainer objects. + count = LiveObjectList::GetRetainers(target_, + instance_filter_, + elements_arr, + start, + dump_limit, + &dummy, + filter, + NULL, + *args_function_, + error); + if (count < 0) { + return false; + } + return true; + } + + private: + Handle<HeapObject> target_; + Handle<JSObject> instance_filter_; + Handle<JSFunction> args_function_; +}; + + +class LiveObjectSummary { + public: + explicit LiveObjectSummary(LolFilter* filter) + : total_count_(0), + total_size_(0), + found_root_(false), + found_weak_root_(false), + filter_(filter) { + memset(counts_, 0, sizeof(counts_[0]) * kNumberOfEntries); + memset(sizes_, 0, sizeof(sizes_[0]) * kNumberOfEntries); + } + + void Add(HeapObject* heap_obj) { + int size = heap_obj->Size(); + LiveObjectType type = GetObjectType(heap_obj); + ASSERT(type != kInvalidLiveObjType); + counts_[type]++; + sizes_[type] += size; + total_count_++; + total_size_ += size; + } + + void set_found_root() { found_root_ = true; } + void set_found_weak_root() { found_weak_root_ = true; } + + inline int Count(LiveObjectType type) { + return counts_[type]; + } + inline int Size(LiveObjectType type) { + return sizes_[type]; + } + inline int total_count() { + return total_count_; + } + inline int total_size() { + return total_size_; + } + inline bool found_root() { + return found_root_; + } + inline bool found_weak_root() { + return found_weak_root_; + } + int GetNumberOfEntries() { + int entries = 0; + for (int i = 0; i < kNumberOfEntries; i++) { + if (counts_[i]) entries++; + } + return entries; + } + + inline LolFilter* filter() { return filter_; } + + static const int kNumberOfEntries = kNumberOfTypes; + + private: + int counts_[kNumberOfEntries]; + int sizes_[kNumberOfEntries]; + int total_count_; + int total_size_; + bool found_root_; + bool found_weak_root_; + + LolFilter *filter_; +}; + + +// Abstraction for a summary writer. +class SummaryWriter { + public: + virtual ~SummaryWriter() {} + virtual void Write(LiveObjectSummary* summary) = 0; +}; + + +// A summary writer for filling in a summary of lol lists and diffs. +class LolSummaryWriter: public SummaryWriter { + public: + LolSummaryWriter(LiveObjectList *older_lol, + LiveObjectList *newer_lol) + : older_(older_lol), newer_(newer_lol) { + } + + void Write(LiveObjectSummary* summary) { + LolFilter* filter = summary->filter(); + + // Fill the summary with the lol object details. + LolIterator it(older_, newer_); + for (it.Init(); !it.Done(); it.Next()) { + HeapObject* heap_obj = it.Obj(); + if (!filter->Matches(heap_obj)) { + continue; + } + summary->Add(heap_obj); + } + } + + private: + LiveObjectList* older_; + LiveObjectList* newer_; +}; + + +// A summary writer for filling in a retainers list. +class RetainersSummaryWriter: public SummaryWriter { + public: + RetainersSummaryWriter(Handle<HeapObject> target, + Handle<JSObject> instance_filter, + Handle<JSFunction> args_function) + : target_(target), + instance_filter_(instance_filter), + args_function_(args_function) { + } + + void Write(LiveObjectSummary* summary) { + Handle<FixedArray> retainers_arr; + Handle<Object> error; + int dummy_total_count; + LiveObjectList::GetRetainers(target_, + instance_filter_, + retainers_arr, + 0, + Smi::kMaxValue, + &dummy_total_count, + summary->filter(), + summary, + *args_function_, + error); + } + + private: + Handle<HeapObject> target_; + Handle<JSObject> instance_filter_; + Handle<JSFunction> args_function_; +}; + + +uint32_t LiveObjectList::next_element_id_ = 1; +int LiveObjectList::list_count_ = 0; +int LiveObjectList::last_id_ = 0; +LiveObjectList* LiveObjectList::first_ = NULL; +LiveObjectList* LiveObjectList::last_ = NULL; + + +LiveObjectList::LiveObjectList(LiveObjectList* prev, int capacity) + : prev_(prev), + next_(NULL), + capacity_(capacity), + obj_count_(0) { + elements_ = NewArray<Element>(capacity); + id_ = ++last_id_; + + list_count_++; +} + + +LiveObjectList::~LiveObjectList() { + DeleteArray<Element>(elements_); + delete prev_; +} + + +int LiveObjectList::GetTotalObjCountAndSize(int* size_p) { + int size = 0; + int count = 0; + LiveObjectList *lol = this; + do { + // Only compute total size if requested i.e. when size_p is not null. + if (size_p != NULL) { + Element* elements = lol->elements_; + for (int i = 0; i < lol->obj_count_; i++) { + HeapObject* heap_obj = elements[i].obj_; + size += heap_obj->Size(); + } + } + count += lol->obj_count_; + lol = lol->prev_; + } while (lol != NULL); + + if (size_p != NULL) { + *size_p = size; + } + return count; +} + + +// Adds an object to the lol. +// Returns true if successful, else returns false. +bool LiveObjectList::Add(HeapObject* obj) { + // If the object is already accounted for in the prev list which we inherit + // from, then no need to add it to this list. + if ((prev() != NULL) && (prev()->Find(obj) != NULL)) { + return true; + } + ASSERT(obj_count_ <= capacity_); + if (obj_count_ == capacity_) { + // The heap must have grown and we have more objects than capacity to store + // them. + return false; // Fail this addition. + } + Element& element = elements_[obj_count_++]; + element.id_ = next_element_id_++; + element.obj_ = obj; + return true; +} + + +// Comparator used for sorting and searching the lol. +int LiveObjectList::CompareElement(const Element* a, const Element* b) { + const HeapObject* obj1 = a->obj_; + const HeapObject* obj2 = b->obj_; + // For lol elements, it doesn't matter which comes first if 2 elements point + // to the same object (which gets culled later). Hence, we only care about + // the the greater than / less than relationships. + return (obj1 > obj2) ? 1 : (obj1 == obj2) ? 0 : -1; +} + + +// Looks for the specified object in the lol, and returns its element if found. +LiveObjectList::Element* LiveObjectList::Find(HeapObject* obj) { + LiveObjectList* lol = this; + Element key; + Element* result = NULL; + + key.obj_ = obj; + // Iterate through the chain of lol's to look for the object. + while ((result == NULL) && (lol != NULL)) { + result = reinterpret_cast<Element*>( + bsearch(&key, lol->elements_, lol->obj_count_, + sizeof(Element), + reinterpret_cast<RawComparer>(CompareElement))); + lol = lol->prev_; + } + return result; +} + + +// "Nullifies" (convert the HeapObject* into an SMI) so that it will get cleaned +// up in the GCEpilogue, while preserving the sort order of the lol. +// NOTE: the lols need to be already sorted before NullifyMostRecent() is +// called. +void LiveObjectList::NullifyMostRecent(HeapObject* obj) { + LiveObjectList* lol = last(); + Element key; + Element* result = NULL; + + key.obj_ = obj; + // Iterate through the chain of lol's to look for the object. + while (lol != NULL) { + result = reinterpret_cast<Element*>( + bsearch(&key, lol->elements_, lol->obj_count_, + sizeof(Element), + reinterpret_cast<RawComparer>(CompareElement))); + if (result != NULL) { + // Since there may be more than one (we are nullifying dup's after all), + // find the first in the current lol, and nullify that. The lol should + // be sorted already to make this easy (see the use of SortAll()). + int i = result - lol->elements_; + + // NOTE: we sort the lol in increasing order. So, if an object has been + // "nullified" (its lowest bit will be cleared to make it look like an + // SMI), it would/should show up before the equivalent dups that have not + // yet been "nullified". Hence, we should be searching backwards for the + // first occurence of a matching object and nullify that instance. This + // will ensure that we preserve the expected sorting order. + for (i--; i > 0; i--) { + Element* element = &lol->elements_[i]; + HeapObject* curr_obj = element->obj_; + if (curr_obj != obj) { + break; // No more matches. Let's move on. + } + result = element; // Let this earlier match be the result. + } + + // Nullify the object. + NullifyNonLivePointer(&result->obj_); + return; + } + lol = lol->prev_; + } +} + + +// Sorts the lol. +void LiveObjectList::Sort() { + if (obj_count_ > 0) { + Vector<Element> elements_v(elements_, obj_count_); + elements_v.Sort(CompareElement); + } +} + + +// Sorts all captured lols starting from the latest. +void LiveObjectList::SortAll() { + LiveObjectList* lol = last(); + while (lol != NULL) { + lol->Sort(); + lol = lol->prev_; + } +} + + +// Counts the number of objects in the heap. +static int CountHeapObjects() { + int count = 0; + // Iterate over all the heap spaces and count the number of objects. + HeapIterator iterator(HeapIterator::kFilterFreeListNodes); + HeapObject* heap_obj = NULL; + while ((heap_obj = iterator.next()) != NULL) { + count++; + } + return count; +} + + +// Captures a current snapshot of all objects in the heap. +MaybeObject* LiveObjectList::Capture() { + HandleScope scope; + + // Count the number of objects in the heap. + int total_count = CountHeapObjects(); + int count = total_count; + int size = 0; + + LiveObjectList* last_lol = last(); + if (last_lol != NULL) { + count -= last_lol->TotalObjCount(); + } + + LiveObjectList* lol; + + // Create a lol large enough to track all the objects. + lol = new LiveObjectList(last_lol, count); + if (lol == NULL) { + return NULL; // No memory to proceed. + } + + // The HeapIterator needs to be in its own scope because it disables + // allocation, and we need allocate below. + { + // Iterate over all the heap spaces and add the objects. + HeapIterator iterator(HeapIterator::kFilterFreeListNodes); + HeapObject* heap_obj = NULL; + bool failed = false; + while (!failed && (heap_obj = iterator.next()) != NULL) { + failed = !lol->Add(heap_obj); + size += heap_obj->Size(); + } + ASSERT(!failed); + + lol->Sort(); + + // Add the current lol to the list of lols. + if (last_ != NULL) { + last_->next_ = lol; + } else { + first_ = lol; + } + last_ = lol; + +#ifdef VERIFY_LOL + if (FLAG_verify_lol) { + Verify(true); + } +#endif + } + + Handle<String> id_sym = Factory::LookupAsciiSymbol("id"); + Handle<String> count_sym = Factory::LookupAsciiSymbol("count"); + Handle<String> size_sym = Factory::LookupAsciiSymbol("size"); + + Handle<JSObject> result = Factory::NewJSObject(Top::object_function()); + if (result->IsFailure()) return Object::cast(*result); + + { MaybeObject* maybe_result = result->SetProperty(*id_sym, + Smi::FromInt(lol->id()), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + } + { MaybeObject* maybe_result = result->SetProperty(*count_sym, + Smi::FromInt(total_count), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + } + { MaybeObject* maybe_result = result->SetProperty(*size_sym, + Smi::FromInt(size), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + } + + return *result; +} + + +// Delete doesn't actually deletes an lol. It just marks it as invisible since +// its contents are considered to be part of subsequent lists as well. The +// only time we'll actually delete the lol is when we Reset() or if the lol is +// invisible, and its element count reaches 0. +bool LiveObjectList::Delete(int id) { + LiveObjectList *lol = last(); + while (lol != NULL) { + if (lol->id() == id) { + break; + } + lol = lol->prev_; + } + + // If no lol is found for this id, then we fail to delete. + if (lol == NULL) return false; + + // Else, mark the lol as invisible i.e. id == 0. + lol->id_ = 0; + list_count_--; + ASSERT(list_count_ >= 0); + if (lol->obj_count_ == 0) { + // Point the next lol's prev to this lol's prev. + LiveObjectList* next = lol->next_; + LiveObjectList* prev = lol->prev_; + // Point next's prev to prev. + if (next != NULL) { + next->prev_ = lol->prev_; + } else { + last_ = lol->prev_; + } + // Point prev's next to next. + if (prev != NULL) { + prev->next_ = lol->next_; + } else { + first_ = lol->next_; + } + + lol->prev_ = NULL; + lol->next_ = NULL; + + // Delete this now empty and invisible lol. + delete lol; + } + + // Just in case we've marked everything invisible, then clean up completely. + if (list_count_ == 0) { + Reset(); + } + + return true; +} + + +MaybeObject* LiveObjectList::Dump(int older_id, + int newer_id, + int start_idx, + int dump_limit, + Handle<JSObject> filter_obj) { + if ((older_id < 0) || (newer_id < 0) || (last() == NULL)) { + return Failure::Exception(); // Fail: 0 is not a valid lol id. + } + if (newer_id < older_id) { + // They are not in the expected order. Swap them. + int temp = older_id; + older_id = newer_id; + newer_id = temp; + } + + LiveObjectList *newer_lol = FindLolForId(newer_id, last()); + LiveObjectList *older_lol = FindLolForId(older_id, newer_lol); + + // If the id is defined, and we can't find a LOL for it, then we have an + // invalid id. + if ((newer_id != 0) && (newer_lol == NULL)) { + return Failure::Exception(); // Fail: the newer lol id is invalid. + } + if ((older_id != 0) && (older_lol == NULL)) { + return Failure::Exception(); // Fail: the older lol id is invalid. + } + + LolFilter filter(filter_obj); + LolDumpWriter writer(older_lol, newer_lol); + return DumpPrivate(&writer, start_idx, dump_limit, &filter); +} + + +MaybeObject* LiveObjectList::DumpPrivate(DumpWriter* writer, + int start, + int dump_limit, + LolFilter* filter) { + HandleScope scope; + + // Calculate the number of entries of the dump. + int count = -1; + int size = -1; + writer->ComputeTotalCountAndSize(filter, &count, &size); + + // Adjust for where to start the dump. + if ((start < 0) || (start >= count)) { + return Failure::Exception(); // invalid start. + } + + int remaining_count = count - start; + if (dump_limit > remaining_count) { + dump_limit = remaining_count; + } + + // Allocate an array to hold the result. + Handle<FixedArray> elements_arr = Factory::NewFixedArray(dump_limit); + if (elements_arr->IsFailure()) return Object::cast(*elements_arr); + + // Fill in the dump. + Handle<Object> error; + bool success = writer->Write(elements_arr, + start, + dump_limit, + filter, + error); + if (!success) return Object::cast(*error); + + MaybeObject* maybe_result; + + // Allocate the result body. + Handle<JSObject> body = Factory::NewJSObject(Top::object_function()); + if (body->IsFailure()) return Object::cast(*body); + + // Set the updated body.count. + Handle<String> count_sym = Factory::LookupAsciiSymbol("count"); + maybe_result = body->SetProperty(*count_sym, + Smi::FromInt(count), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + // Set the updated body.size if appropriate. + if (size >= 0) { + Handle<String> size_sym = Factory::LookupAsciiSymbol("size"); + maybe_result = body->SetProperty(*size_sym, + Smi::FromInt(size), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + } + + // Set body.first_index. + Handle<String> first_sym = Factory::LookupAsciiSymbol("first_index"); + maybe_result = body->SetProperty(*first_sym, + Smi::FromInt(start), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + // Allocate the JSArray of the elements. + Handle<JSObject> elements = Factory::NewJSObject(Top::array_function()); + if (elements->IsFailure()) return Object::cast(*elements); + Handle<JSArray>::cast(elements)->SetContent(*elements_arr); + + // Set body.elements. + Handle<String> elements_sym = Factory::LookupAsciiSymbol("elements"); + maybe_result = body->SetProperty(*elements_sym, + *elements, + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + return *body; +} + + +MaybeObject* LiveObjectList::Summarize(int older_id, + int newer_id, + Handle<JSObject> filter_obj) { + if ((older_id < 0) || (newer_id < 0) || (last() == NULL)) { + return Failure::Exception(); // Fail: 0 is not a valid lol id. + } + if (newer_id < older_id) { + // They are not in the expected order. Swap them. + int temp = older_id; + older_id = newer_id; + newer_id = temp; + } + + LiveObjectList *newer_lol = FindLolForId(newer_id, last()); + LiveObjectList *older_lol = FindLolForId(older_id, newer_lol); + + // If the id is defined, and we can't find a LOL for it, then we have an + // invalid id. + if ((newer_id != 0) && (newer_lol == NULL)) { + return Failure::Exception(); // Fail: the newer lol id is invalid. + } + if ((older_id != 0) && (older_lol == NULL)) { + return Failure::Exception(); // Fail: the older lol id is invalid. + } + + LolFilter filter(filter_obj); + LolSummaryWriter writer(older_lol, newer_lol); + return SummarizePrivate(&writer, &filter, false); +} + + +// Creates a summary report for the debugger. +// Note: the SummaryWriter takes care of iterating over objects and filling in +// the summary. +MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer, + LolFilter* filter, + bool is_tracking_roots) { + HandleScope scope; + MaybeObject* maybe_result; + + LiveObjectSummary summary(filter); + writer->Write(&summary); + + // The result body will look like this: + // body: { + // count: <total_count>, + // size: <total_size>, + // found_root: <boolean>, // optional. + // found_weak_root: <boolean>, // optional. + // summary: [ + // { + // desc: "<object type name>", + // count: <count>, + // size: size + // }, + // ... + // ] + // } + + // Prefetch some needed symbols. + Handle<String> desc_sym = Factory::LookupAsciiSymbol("desc"); + Handle<String> count_sym = Factory::LookupAsciiSymbol("count"); + Handle<String> size_sym = Factory::LookupAsciiSymbol("size"); + Handle<String> summary_sym = Factory::LookupAsciiSymbol("summary"); + + // Allocate the summary array. + int entries_count = summary.GetNumberOfEntries(); + Handle<FixedArray> summary_arr = + Factory::NewFixedArray(entries_count); + if (summary_arr->IsFailure()) return Object::cast(*summary_arr); + + int idx = 0; + for (int i = 0; i < LiveObjectSummary::kNumberOfEntries; i++) { + // Allocate the summary record. + Handle<JSObject> detail = Factory::NewJSObject(Top::object_function()); + if (detail->IsFailure()) return Object::cast(*detail); + + // Fill in the summary record. + LiveObjectType type = static_cast<LiveObjectType>(i); + int count = summary.Count(type); + if (count) { + const char* desc_cstr = GetObjectTypeDesc(type); + Handle<String> desc = Factory::LookupAsciiSymbol(desc_cstr); + int size = summary.Size(type); + + maybe_result = detail->SetProperty(*desc_sym, + *desc, + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + maybe_result = detail->SetProperty(*count_sym, + Smi::FromInt(count), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + maybe_result = detail->SetProperty(*size_sym, + Smi::FromInt(size), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + summary_arr->set(idx++, *detail); + } + } + + // Wrap the summary fixed array in a JS array. + Handle<JSObject> summary_obj = Factory::NewJSObject(Top::array_function()); + if (summary_obj->IsFailure()) return Object::cast(*summary_obj); + Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr); + + // Create the body object. + Handle<JSObject> body = Factory::NewJSObject(Top::object_function()); + if (body->IsFailure()) return Object::cast(*body); + + // Fill out the body object. + int total_count = summary.total_count(); + int total_size = summary.total_size(); + maybe_result = body->SetProperty(*count_sym, + Smi::FromInt(total_count), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + maybe_result = body->SetProperty(*size_sym, + Smi::FromInt(total_size), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + if (is_tracking_roots) { + int found_root = summary.found_root(); + int found_weak_root = summary.found_weak_root(); + Handle<String> root_sym = Factory::LookupAsciiSymbol("found_root"); + Handle<String> weak_root_sym = + Factory::LookupAsciiSymbol("found_weak_root"); + maybe_result = body->SetProperty(*root_sym, + Smi::FromInt(found_root), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + maybe_result = body->SetProperty(*weak_root_sym, + Smi::FromInt(found_weak_root), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + } + + maybe_result = body->SetProperty(*summary_sym, + *summary_obj, + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + return *body; +} + + +// Returns an array listing the captured lols. +// Note: only dumps the section starting at start_idx and only up to +// dump_limit entries. +MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) { + HandleScope scope; + MaybeObject* maybe_result; + + int total_count = LiveObjectList::list_count(); + int dump_count = total_count; + + // Adjust for where to start the dump. + if (total_count == 0) { + start_idx = 0; // Ensure this to get an empty list. + } else if ((start_idx < 0) || (start_idx >= total_count)) { + return Failure::Exception(); // invalid start. + } + dump_count -= start_idx; + + // Adjust for the dump limit. + if (dump_count > dump_limit) { + dump_count = dump_limit; + } + + // Allocate an array to hold the result. + Handle<FixedArray> list = Factory::NewFixedArray(dump_count); + if (list->IsFailure()) return Object::cast(*list); + + // Prefetch some needed symbols. + Handle<String> id_sym = Factory::LookupAsciiSymbol("id"); + Handle<String> count_sym = Factory::LookupAsciiSymbol("count"); + Handle<String> size_sym = Factory::LookupAsciiSymbol("size"); + + // Fill the array with the lol details. + int idx = 0; + LiveObjectList* lol = first_; + while ((lol != NULL) && (idx < start_idx)) { // Skip tail entries. + if (lol->id() != 0) { + idx++; + } + lol = lol->next(); + } + idx = 0; + while ((lol != NULL) && (dump_limit != 0)) { + if (lol->id() != 0) { + int count; + int size; + count = lol->GetTotalObjCountAndSize(&size); + + Handle<JSObject> detail = Factory::NewJSObject(Top::object_function()); + if (detail->IsFailure()) return Object::cast(*detail); + + maybe_result = detail->SetProperty(*id_sym, + Smi::FromInt(lol->id()), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + maybe_result = detail->SetProperty(*count_sym, + Smi::FromInt(count), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + maybe_result = detail->SetProperty(*size_sym, + Smi::FromInt(size), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + list->set(idx++, *detail); + dump_limit--; + } + lol = lol->next(); + } + + // Return the result as a JS array. + Handle<JSObject> lols = Factory::NewJSObject(Top::array_function()); + Handle<JSArray>::cast(lols)->SetContent(*list); + + Handle<JSObject> result = Factory::NewJSObject(Top::object_function()); + if (result->IsFailure()) return Object::cast(*result); + + maybe_result = result->SetProperty(*count_sym, + Smi::FromInt(total_count), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + Handle<String> first_sym = Factory::LookupAsciiSymbol("first_index"); + maybe_result = result->SetProperty(*first_sym, + Smi::FromInt(start_idx), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + Handle<String> lists_sym = Factory::LookupAsciiSymbol("lists"); + maybe_result = result->SetProperty(*lists_sym, + *lols, + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + return *result; +} + + +// Deletes all captured lols. +void LiveObjectList::Reset() { + LiveObjectList *lol = last(); + // Just delete the last. Each lol will delete it's prev automatically. + delete lol; + + next_element_id_ = 1; + list_count_ = 0; + last_id_ = 0; + first_ = NULL; + last_ = NULL; +} + + +// Gets the object for the specified obj id. +Object* LiveObjectList::GetObj(int obj_id) { + Element* element = FindElementFor<int>(GetElementId, obj_id); + if (element != NULL) { + return Object::cast(element->obj_); + } + return Heap::undefined_value(); +} + + +// Gets the obj id for the specified address if valid. +int LiveObjectList::GetObjId(Object* obj) { + // Make a heap object pointer from the address. + HeapObject* hobj = HeapObject::cast(obj); + Element* element = FindElementFor<HeapObject*>(GetElementObj, hobj); + if (element != NULL) { + return element->id_; + } + return 0; // Invalid address. +} + + +// Gets the obj id for the specified address if valid. +Object* LiveObjectList::GetObjId(Handle<String> address) { + SmartPointer<char> addr_str = + address->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + + // Extract the address value from the string. + int value = static_cast<int>(StringToInt(*address, 16)); + Object* obj = reinterpret_cast<Object*>(value); + return Smi::FromInt(GetObjId(obj)); +} + + +// Helper class for copying HeapObjects. +class LolVisitor: public ObjectVisitor { + public: + + LolVisitor(HeapObject* target, Handle<HeapObject> handle_to_skip) + : target_(target), handle_to_skip_(handle_to_skip), found_(false) {} + + void VisitPointer(Object** p) { CheckPointer(p); } + + void VisitPointers(Object** start, Object** end) { + // Check all HeapObject pointers in [start, end). + for (Object** p = start; !found() && p < end; p++) CheckPointer(p); + } + + inline bool found() const { return found_; } + inline bool reset() { return found_ = false; } + + private: + inline void CheckPointer(Object** p) { + Object* object = *p; + if (HeapObject::cast(object) == target_) { + // We may want to skip this handle because the handle may be a local + // handle in a handle scope in one of our callers. Once we return, + // that handle will be popped. Hence, we don't want to count it as + // a root that would have kept the target object alive. + if (!handle_to_skip_.is_null() && + handle_to_skip_.location() == reinterpret_cast<HeapObject**>(p)) { + return; // Skip this handle. + } + found_ = true; + } + } + + HeapObject* target_; + Handle<HeapObject> handle_to_skip_; + bool found_; +}; + + +inline bool AddRootRetainerIfFound(const LolVisitor& visitor, + LolFilter* filter, + LiveObjectSummary *summary, + void (*SetRootFound)(LiveObjectSummary *s), + int start, + int dump_limit, + int* total_count, + Handle<FixedArray> retainers_arr, + int* count, + int* index, + const char* root_name, + Handle<String> id_sym, + Handle<String> desc_sym, + Handle<String> size_sym, + Handle<Object> error) { + HandleScope scope; + + // Scratch handles. + Handle<JSObject> detail; + Handle<String> desc; + Handle<HeapObject> retainer; + + if (visitor.found()) { + if (!filter->is_active()) { + (*total_count)++; + if (summary) { + SetRootFound(summary); + } else if ((*total_count > start) && ((*index) < dump_limit)) { + (*count)++; + if (!retainers_arr.is_null()) { + return AddObjDetail(retainers_arr, + (*index)++, + 0, + retainer, + root_name, + id_sym, + desc_sym, + size_sym, + detail, + desc, + error); + } + } + } + } + return true; +} + + +inline void SetFoundRoot(LiveObjectSummary *summary) { + summary->set_found_root(); +} + + +inline void SetFoundWeakRoot(LiveObjectSummary *summary) { + summary->set_found_weak_root(); +} + + +int LiveObjectList::GetRetainers(Handle<HeapObject> target, + Handle<JSObject> instance_filter, + Handle<FixedArray> retainers_arr, + int start, + int dump_limit, + int* total_count, + LolFilter* filter, + LiveObjectSummary *summary, + JSFunction* arguments_function, + Handle<Object> error) { + HandleScope scope; + + // Scratch handles. + Handle<JSObject> detail; + Handle<String> desc; + Handle<HeapObject> retainer; + + // Prefetch some needed symbols. + Handle<String> id_sym = Factory::LookupAsciiSymbol("id"); + Handle<String> desc_sym = Factory::LookupAsciiSymbol("desc"); + Handle<String> size_sym = Factory::LookupAsciiSymbol("size"); + + NoHandleAllocation ha; + int count = 0; + int index = 0; + Handle<JSObject> last_obj; + + *total_count = 0; + + // Iterate roots. + LolVisitor lol_visitor(*target, target); + Heap::IterateStrongRoots(&lol_visitor, VISIT_ALL); + if (!AddRootRetainerIfFound(lol_visitor, + filter, + summary, + SetFoundRoot, + start, + dump_limit, + total_count, + retainers_arr, + &count, + &index, + "<root>", + id_sym, + desc_sym, + size_sym, + error)) { + return -1; + } + + lol_visitor.reset(); + Heap::IterateWeakRoots(&lol_visitor, VISIT_ALL); + if (!AddRootRetainerIfFound(lol_visitor, + filter, + summary, + SetFoundWeakRoot, + start, + dump_limit, + total_count, + retainers_arr, + &count, + &index, + "<weak root>", + id_sym, + desc_sym, + size_sym, + error)) { + return -1; + } + + // Iterate the live object lists. + LolIterator it(NULL, last()); + for (it.Init(); !it.Done() && (index < dump_limit); it.Next()) { + HeapObject* heap_obj = it.Obj(); + + // Only look at all JSObjects. + if (heap_obj->IsJSObject()) { + // Skip context extension objects and argument arrays as these are + // checked in the context of functions using them. + JSObject* obj = JSObject::cast(heap_obj); + if (obj->IsJSContextExtensionObject() || + obj->map()->constructor() == arguments_function) { + continue; + } + + // Check if the JS object has a reference to the object looked for. + if (obj->ReferencesObject(*target)) { + // Check instance filter if supplied. This is normally used to avoid + // references from mirror objects (see Runtime_IsInPrototypeChain). + if (!instance_filter->IsUndefined()) { + Object* V = obj; + while (true) { + Object* prototype = V->GetPrototype(); + if (prototype->IsNull()) { + break; + } + if (*instance_filter == prototype) { + obj = NULL; // Don't add this object. + break; + } + V = prototype; + } + } + + if (obj != NULL) { + // Skip objects that have been filtered out. + if (filter->Matches(heap_obj)) { + continue; + } + + // Valid reference found add to instance array if supplied an update + // count. + last_obj = Handle<JSObject>(obj); + (*total_count)++; + + if (summary != NULL) { + summary->Add(heap_obj); + } else if ((*total_count > start) && (index < dump_limit)) { + count++; + if (!retainers_arr.is_null()) { + retainer = Handle<HeapObject>(heap_obj); + bool success = AddObjDetail(retainers_arr, + index++, + it.Id(), + retainer, + NULL, + id_sym, + desc_sym, + size_sym, + detail, + desc, + error); + if (!success) return -1; + } + } + } + } + } + } + + // Check for circular reference only. This can happen when the object is only + // referenced from mirrors and has a circular reference in which case the + // object is not really alive and would have been garbage collected if not + // referenced from the mirror. + + if (*total_count == 1 && !last_obj.is_null() && *last_obj == *target) { + count = 0; + *total_count = 0; + } + + return count; +} + + +MaybeObject* LiveObjectList::GetObjRetainers(int obj_id, + Handle<JSObject> instance_filter, + bool verbose, + int start, + int dump_limit, + Handle<JSObject> filter_obj) { + HandleScope scope; + + // Get the target object. + HeapObject* heap_obj = HeapObject::cast(GetObj(obj_id)); + if (heap_obj == Heap::undefined_value()) { + return heap_obj; + } + + Handle<HeapObject> target = Handle<HeapObject>(heap_obj); + + // Get the constructor function for context extension and arguments array. + JSObject* arguments_boilerplate = + Top::context()->global_context()->arguments_boilerplate(); + JSFunction* arguments_function = + JSFunction::cast(arguments_boilerplate->map()->constructor()); + + Handle<JSFunction> args_function = Handle<JSFunction>(arguments_function); + LolFilter filter(filter_obj); + + if (!verbose) { + RetainersSummaryWriter writer(target, instance_filter, args_function); + return SummarizePrivate(&writer, &filter, true); + + } else { + RetainersDumpWriter writer(target, instance_filter, args_function); + Object* body_obj; + MaybeObject* maybe_result = + DumpPrivate(&writer, start, dump_limit, &filter); + if (!maybe_result->ToObject(&body_obj)) { + return maybe_result; + } + + // Set body.id. + Handle<JSObject> body = Handle<JSObject>(JSObject::cast(body_obj)); + Handle<String> id_sym = Factory::LookupAsciiSymbol("id"); + maybe_result = body->SetProperty(*id_sym, + Smi::FromInt(obj_id), + NONE, + kNonStrictMode); + if (maybe_result->IsFailure()) return maybe_result; + + return *body; + } +} + + +Object* LiveObjectList::PrintObj(int obj_id) { + Object* obj = GetObj(obj_id); + if (!obj) { + return Heap::undefined_value(); + } + + EmbeddedVector<char, 128> temp_filename; + static int temp_count = 0; + const char* path_prefix = "."; + + if (FLAG_lol_workdir) { + path_prefix = FLAG_lol_workdir; + } + OS::SNPrintF(temp_filename, "%s/lol-print-%d", path_prefix, ++temp_count); + + FILE* f = OS::FOpen(temp_filename.start(), "w+"); + + PrintF(f, "@%d ", LiveObjectList::GetObjId(obj)); +#ifdef OBJECT_PRINT +#ifdef INSPECTOR + Inspector::DumpObjectType(f, obj); +#endif // INSPECTOR + PrintF(f, "\n"); + obj->Print(f); +#else // !OBJECT_PRINT + obj->ShortPrint(f); +#endif // !OBJECT_PRINT + PrintF(f, "\n"); + Flush(f); + fclose(f); + + // Create a string from the temp_file. + // Note: the mmapped resource will take care of closing the file. + MemoryMappedExternalResource* resource = + new MemoryMappedExternalResource(temp_filename.start(), true); + if (resource->exists() && !resource->is_empty()) { + ASSERT(resource->IsAscii()); + Handle<String> dump_string = + Factory::NewExternalStringFromAscii(resource); + ExternalStringTable::AddString(*dump_string); + return *dump_string; + } else { + delete resource; + } + return Heap::undefined_value(); +} + + +class LolPathTracer: public PathTracer { + public: + LolPathTracer(FILE* out, + Object* search_target, + WhatToFind what_to_find) + : PathTracer(search_target, what_to_find, VISIT_ONLY_STRONG), out_(out) {} + + private: + void ProcessResults(); + + FILE* out_; +}; + + +void LolPathTracer::ProcessResults() { + if (found_target_) { + PrintF(out_, "=====================================\n"); + PrintF(out_, "==== Path to object ====\n"); + PrintF(out_, "=====================================\n\n"); + + ASSERT(!object_stack_.is_empty()); + Object* prev = NULL; + for (int i = 0, index = 0; i < object_stack_.length(); i++) { + Object* obj = object_stack_[i]; + + // Skip this object if it is basically the internals of the + // previous object (which would have dumped its details already). + if (prev && prev->IsJSObject() && + (obj != search_target_)) { + JSObject* jsobj = JSObject::cast(prev); + if (obj->IsFixedArray() && + jsobj->properties() == FixedArray::cast(obj)) { + // Skip this one because it would have been printed as the + // properties of the last object already. + continue; + } else if (obj->IsHeapObject() && + jsobj->elements() == HeapObject::cast(obj)) { + // Skip this one because it would have been printed as the + // elements of the last object already. + continue; + } + } + + // Print a connecting arrow. + if (i > 0) PrintF(out_, "\n |\n |\n V\n\n"); + + // Print the object index. + PrintF(out_, "[%d] ", ++index); + + // Print the LOL object ID: + int id = LiveObjectList::GetObjId(obj); + if (id > 0) PrintF(out_, "@%d ", id); + +#ifdef OBJECT_PRINT +#ifdef INSPECTOR + Inspector::DumpObjectType(out_, obj); +#endif // INSPECTOR + PrintF(out_, "\n"); + obj->Print(out_); +#else // !OBJECT_PRINT + obj->ShortPrint(out_); + PrintF(out_, "\n"); +#endif // !OBJECT_PRINT + Flush(out_); + } + PrintF(out_, "\n"); + PrintF(out_, "=====================================\n\n"); + Flush(out_); + } +} + + +Object* LiveObjectList::GetPathPrivate(HeapObject* obj1, HeapObject* obj2) { + EmbeddedVector<char, 128> temp_filename; + static int temp_count = 0; + const char* path_prefix = "."; + + if (FLAG_lol_workdir) { + path_prefix = FLAG_lol_workdir; + } + OS::SNPrintF(temp_filename, "%s/lol-getpath-%d", path_prefix, ++temp_count); + + FILE* f = OS::FOpen(temp_filename.start(), "w+"); + + // Save the previous verbosity. + bool prev_verbosity = FLAG_use_verbose_printer; + FLAG_use_verbose_printer = false; + + // Dump the paths. + { + // The tracer needs to be scoped because its usage asserts no allocation, + // and we need to allocate the result string below. + LolPathTracer tracer(f, obj2, LolPathTracer::FIND_FIRST); + + bool found = false; + if (obj1 == NULL) { + // Check for ObjectGroups that references this object. + // TODO(mlam): refactor this to be more modular. + { + List<ObjectGroup*>* groups = GlobalHandles::ObjectGroups(); + for (int i = 0; i < groups->length(); i++) { + ObjectGroup* group = groups->at(i); + if (group == NULL) continue; + + bool found_group = false; + List<Object**>& objects = group->objects_; + for (int j = 0; j < objects.length(); j++) { + Object* object = *objects[j]; + HeapObject* hobj = HeapObject::cast(object); + if (obj2 == hobj) { + found_group = true; + break; + } + } + + if (found_group) { + PrintF(f, + "obj %p is a member of object group %p {\n", + reinterpret_cast<void*>(obj2), + reinterpret_cast<void*>(group)); + for (int j = 0; j < objects.length(); j++) { + Object* object = *objects[j]; + if (!object->IsHeapObject()) continue; + + HeapObject* hobj = HeapObject::cast(object); + int id = GetObjId(hobj); + if (id != 0) { + PrintF(f, " @%d:", id); + } else { + PrintF(f, " <no id>:"); + } + + char buffer[512]; + GenerateObjectDesc(hobj, buffer, sizeof(buffer)); + PrintF(f, " %s", buffer); + if (hobj == obj2) { + PrintF(f, " <==="); + } + PrintF(f, "\n"); + } + PrintF(f, "}\n"); + } + } + } + + PrintF(f, "path from roots to obj %p\n", reinterpret_cast<void*>(obj2)); + Heap::IterateRoots(&tracer, VISIT_ONLY_STRONG); + found = tracer.found(); + + if (!found) { + PrintF(f, " No paths found. Checking symbol tables ...\n"); + SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); + tracer.VisitPointers(reinterpret_cast<Object**>(&symbol_table), + reinterpret_cast<Object**>(&symbol_table)+1); + found = tracer.found(); + if (!found) { + symbol_table->IteratePrefix(&tracer); + found = tracer.found(); + } + } + + if (!found) { + PrintF(f, " No paths found. Checking weak roots ...\n"); + // Check weak refs next. + GlobalHandles::IterateWeakRoots(&tracer); + found = tracer.found(); + } + + } else { + PrintF(f, "path from obj %p to obj %p:\n", + reinterpret_cast<void*>(obj1), reinterpret_cast<void*>(obj2)); + tracer.TracePathFrom(reinterpret_cast<Object**>(&obj1)); + found = tracer.found(); + } + + if (!found) { + PrintF(f, " No paths found\n\n"); + } + } + + // Flush and clean up the dumped file. + Flush(f); + fclose(f); + + // Restore the previous verbosity. + FLAG_use_verbose_printer = prev_verbosity; + + // Create a string from the temp_file. + // Note: the mmapped resource will take care of closing the file. + MemoryMappedExternalResource* resource = + new MemoryMappedExternalResource(temp_filename.start(), true); + if (resource->exists() && !resource->is_empty()) { + ASSERT(resource->IsAscii()); + Handle<String> path_string = + Factory::NewExternalStringFromAscii(resource); + ExternalStringTable::AddString(*path_string); + return *path_string; + } else { + delete resource; + } + return Heap::undefined_value(); +} + + +Object* LiveObjectList::GetPath(int obj_id1, + int obj_id2, + Handle<JSObject> instance_filter) { + HandleScope scope; + + // Get the target object. + HeapObject* obj1 = NULL; + if (obj_id1 != 0) { + obj1 = HeapObject::cast(GetObj(obj_id1)); + if (obj1 == Heap::undefined_value()) { + return obj1; + } + } + + HeapObject* obj2 = HeapObject::cast(GetObj(obj_id2)); + if (obj2 == Heap::undefined_value()) { + return obj2; + } + + return GetPathPrivate(obj1, obj2); +} + + +void LiveObjectList::DoProcessNonLive(HeapObject *obj) { + // We should only be called if we have at least one lol to search. + ASSERT(last() != NULL); + Element* element = last()->Find(obj); + if (element != NULL) { + NullifyNonLivePointer(&element->obj_); + } +} + + +void LiveObjectList::IterateElementsPrivate(ObjectVisitor* v) { + LiveObjectList* lol = last(); + while (lol != NULL) { + Element* elements = lol->elements_; + int count = lol->obj_count_; + for (int i = 0; i < count; i++) { + HeapObject** p = &elements[i].obj_; + v->VisitPointer(reinterpret_cast<Object **>(p)); + } + lol = lol->prev_; + } +} + + +// Purpose: Called by GCEpilogue to purge duplicates. Not to be called by +// anyone else. +void LiveObjectList::PurgeDuplicates() { + bool is_sorted = false; + LiveObjectList* lol = last(); + if (!lol) { + return; // Nothing to purge. + } + + int total_count = lol->TotalObjCount(); + if (!total_count) { + return; // Nothing to purge. + } + + Element* elements = NewArray<Element>(total_count); + int count = 0; + + // Copy all the object elements into a consecutive array. + while (lol) { + memcpy(&elements[count], lol->elements_, lol->obj_count_ * sizeof(Element)); + count += lol->obj_count_; + lol = lol->prev_; + } + qsort(elements, total_count, sizeof(Element), + reinterpret_cast<RawComparer>(CompareElement)); + + ASSERT(count == total_count); + + // Iterate over all objects in the consolidated list and check for dups. + total_count--; + for (int i = 0; i < total_count; ) { + Element* curr = &elements[i]; + HeapObject* curr_obj = curr->obj_; + int j = i+1; + bool done = false; + + while (!done && (j < total_count)) { + // Process if the element's object is still live after the current GC. + // Non-live objects will be converted to SMIs i.e. not HeapObjects. + if (curr_obj->IsHeapObject()) { + Element* next = &elements[j]; + HeapObject* next_obj = next->obj_; + if (next_obj->IsHeapObject()) { + if (curr_obj != next_obj) { + done = true; + continue; // Live object but no match. Move on. + } + + // NOTE: we've just GCed the LOLs. Hence, they are no longer sorted. + // Since we detected at least one need to search for entries, we'll + // sort it to enable the use of NullifyMostRecent() below. We only + // need to sort it once (except for one exception ... see below). + if (!is_sorted) { + SortAll(); + is_sorted = true; + } + + // We have a match. Need to nullify the most recent ref to this + // object. We'll keep the oldest ref: + // Note: we will nullify the element record in the LOL + // database, not in the local sorted copy of the elements. + NullifyMostRecent(curr_obj); + } + } + // Either the object was already marked for purging, or we just marked + // it. Either way, if there's more than one dup, then we need to check + // the next element for another possible dup against the current as well + // before we move on. So, here we go. + j++; + } + + // We can move on to checking the match on the next element. + i = j; + } + + DeleteArray<Element>(elements); +} + + +// Purpose: Purges dead objects and resorts the LOLs. +void LiveObjectList::GCEpiloguePrivate() { + // Note: During the GC, ConsStrings may be collected and pointers may be + // forwarded to its constituent string. As a result, we may find dupes of + // objects references in the LOL list. + // Another common way we get dups is that free chunks that have been swept + // in the oldGen heap may be kept as ByteArray objects in a free list. + // + // When we promote live objects from the youngGen, the object may be moved + // to the start of these free chunks. Since there is no free or move event + // for the free chunks, their addresses will show up 2 times: once for their + // original free ByteArray selves, and once for the newly promoted youngGen + // object. Hence, we can get a duplicate address in the LOL again. + // + // We need to eliminate these dups because the LOL implementation expects to + // only have at most one unique LOL reference to any object at any time. + PurgeDuplicates(); + + // After the GC, sweep away all free'd Elements and compact. + LiveObjectList *prev = NULL; + LiveObjectList *next = NULL; + + // Iterating from the youngest lol to the oldest lol. + for (LiveObjectList *lol = last(); lol; lol = prev) { + Element* elements = lol->elements_; + prev = lol->prev(); // Save the prev. + + // Remove any references to collected objects. + int i = 0; + while (i < lol->obj_count_) { + Element& element = elements[i]; + if (!element.obj_->IsHeapObject()) { + // If the HeapObject address was converted into a SMI, then this + // is a dead object. Copy the last element over this one. + element = elements[lol->obj_count_ - 1]; + lol->obj_count_--; + // We've just moved the last element into this index. We'll revisit + // this index again. Hence, no need to increment the iterator. + } else { + i++; // Look at the next element next. + } + } + + int new_count = lol->obj_count_; + + // Check if there are any more elements to keep after purging the dead ones. + if (new_count == 0) { + DeleteArray<Element>(elements); + lol->elements_ = NULL; + lol->capacity_ = 0; + ASSERT(lol->obj_count_ == 0); + + // If the list is also invisible, the clean up the list as well. + if (lol->id_ == 0) { + // Point the next lol's prev to this lol's prev. + if (next) { + next->prev_ = lol->prev_; + } else { + last_ = lol->prev_; + } + + // Delete this now empty and invisible lol. + delete lol; + + // Don't point the next to this lol since it is now deleted. + // Leave the next pointer pointing to the current lol. + continue; + } + + } else { + // If the obj_count_ is less than the capacity and the difference is + // greater than a specified threshold, then we should shrink the list. + int diff = lol->capacity_ - new_count; + const int kMaxUnusedSpace = 64; + if (diff > kMaxUnusedSpace) { // Threshold for shrinking. + // Shrink the list. + Element *new_elements = NewArray<Element>(new_count); + memcpy(new_elements, elements, new_count * sizeof(Element)); + + DeleteArray<Element>(elements); + lol->elements_ = new_elements; + lol->capacity_ = new_count; + } + ASSERT(lol->obj_count_ == new_count); + + lol->Sort(); // We've moved objects. Re-sort in case. + } + + // Save the next (for the previous link) in case we need it later. + next = lol; + } + +#ifdef VERIFY_LOL + if (FLAG_verify_lol) { + Verify(); + } +#endif +} + + +#ifdef VERIFY_LOL +void LiveObjectList::Verify(bool match_heap_exactly) { + OS::Print("Verifying the LiveObjectList database:\n"); + + LiveObjectList* lol = last(); + if (lol == NULL) { + OS::Print(" No lol database to verify\n"); + return; + } + + OS::Print(" Preparing the lol database ...\n"); + int total_count = lol->TotalObjCount(); + + Element* elements = NewArray<Element>(total_count); + int count = 0; + + // Copy all the object elements into a consecutive array. + OS::Print(" Copying the lol database ...\n"); + while (lol != NULL) { + memcpy(&elements[count], lol->elements_, lol->obj_count_ * sizeof(Element)); + count += lol->obj_count_; + lol = lol->prev_; + } + qsort(elements, total_count, sizeof(Element), + reinterpret_cast<RawComparer>(CompareElement)); + + ASSERT(count == total_count); + + // Iterate over all objects in the heap and check for: + // 1. object in LOL but not in heap i.e. error. + // 2. object in heap but not in LOL (possibly not an error). Usually + // just means that we don't have the a capture of the latest heap. + // That is unless we did this verify immediately after a capture, + // and specified match_heap_exactly = true. + + int number_of_heap_objects = 0; + int number_of_matches = 0; + int number_not_in_heap = total_count; + int number_not_in_lol = 0; + + OS::Print(" Start verify ...\n"); + OS::Print(" Verifying ..."); + Flush(); + HeapIterator iterator(HeapIterator::kFilterFreeListNodes); + HeapObject* heap_obj = NULL; + while ((heap_obj = iterator.next()) != NULL) { + number_of_heap_objects++; + + // Check if the heap_obj is in the lol. + Element key; + key.obj_ = heap_obj; + + Element* result = reinterpret_cast<Element*>( + bsearch(&key, elements, total_count, sizeof(Element), + reinterpret_cast<RawComparer>(CompareElement))); + + if (result != NULL) { + number_of_matches++; + number_not_in_heap--; + // Mark it as found by changing it into a SMI (mask off low bit). + // Note: we cannot use HeapObject::cast() here because it asserts that + // the HeapObject bit is set on the address, but we're unsetting it on + // purpose here for our marking. + result->obj_ = reinterpret_cast<HeapObject*>(heap_obj->address()); + + } else { + number_not_in_lol++; + if (match_heap_exactly) { + OS::Print("heap object %p NOT in lol database\n", heap_obj); + } + } + // Show some sign of life. + if (number_of_heap_objects % 1000 == 0) { + OS::Print("."); + fflush(stdout); + } + } + OS::Print("\n"); + + // Reporting lol objects not found in the heap. + if (number_not_in_heap) { + int found = 0; + for (int i = 0; (i < total_count) && (found < number_not_in_heap); i++) { + Element& element = elements[i]; + if (element.obj_->IsHeapObject()) { + OS::Print("lol database object [%d of %d] %p NOT in heap\n", + i, total_count, element.obj_); + found++; + } + } + } + + DeleteArray<Element>(elements); + + OS::Print("number of objects in lol database %d\n", total_count); + OS::Print("number of heap objects .......... %d\n", number_of_heap_objects); + OS::Print("number of matches ............... %d\n", number_of_matches); + OS::Print("number NOT in heap .............. %d\n", number_not_in_heap); + OS::Print("number NOT in lol database ...... %d\n", number_not_in_lol); + + if (number_of_matches != total_count) { + OS::Print(" *** ERROR: " + "NOT all lol database objects match heap objects.\n"); + } + if (number_not_in_heap != 0) { + OS::Print(" *** ERROR: %d lol database objects not found in heap.\n", + number_not_in_heap); + } + if (match_heap_exactly) { + if (!(number_not_in_lol == 0)) { + OS::Print(" *** ERROR: %d heap objects NOT found in lol database.\n", + number_not_in_lol); + } + } + + ASSERT(number_of_matches == total_count); + ASSERT(number_not_in_heap == 0); + ASSERT(number_not_in_lol == (number_of_heap_objects - total_count)); + if (match_heap_exactly) { + ASSERT(total_count == number_of_heap_objects); + ASSERT(number_not_in_lol == 0); + } + + OS::Print(" Verify the lol database is sorted ...\n"); + lol = last(); + while (lol != NULL) { + Element* elements = lol->elements_; + for (int i = 0; i < lol->obj_count_ - 1; i++) { + if (elements[i].obj_ >= elements[i+1].obj_) { + OS::Print(" *** ERROR: lol %p obj[%d] %p > obj[%d] %p\n", + lol, i, elements[i].obj_, i+1, elements[i+1].obj_); + } + } + lol = lol->prev_; + } + + OS::Print(" DONE verifying.\n\n\n"); +} + + +void LiveObjectList::VerifyNotInFromSpace() { + OS::Print("VerifyNotInFromSpace() ...\n"); + LolIterator it(NULL, last()); + int i = 0; + for (it.Init(); !it.Done(); it.Next()) { + HeapObject* heap_obj = it.Obj(); + if (Heap::InFromSpace(heap_obj)) { + OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n", + i++, heap_obj, Heap::new_space()->FromSpaceLow()); + } + } +} +#endif // VERIFY_LOL + } } // namespace v8::internal diff --git a/src/liveobjectlist.h b/src/liveobjectlist.h index 11f5c451..423f8f0d 100644 --- a/src/liveobjectlist.h +++ b/src/liveobjectlist.h @@ -40,54 +40,225 @@ namespace internal { #ifdef LIVE_OBJECT_LIST +#ifdef DEBUG +// The following symbol when defined enables thorough verification of lol data. +// FLAG_verify_lol will also need to set to true to enable the verification. +#define VERIFY_LOL +#endif -// Temporary stubbed out LiveObjectList implementation. + +typedef int LiveObjectType; +class LolFilter; +class LiveObjectSummary; +class DumpWriter; +class SummaryWriter; + + +// The LiveObjectList is both a mechanism for tracking a live capture of +// objects in the JS heap, as well as is the data structure which represents +// each of those captures. Unlike a snapshot, the lol is live. For example, +// if an object in a captured lol dies and is collected by the GC, the lol +// will reflect that the object is no longer available. The term +// LiveObjectList (and lol) is used to describe both the mechanism and the +// data structure depending on context of use. +// +// In captured lols, objects are tracked using their address and an object id. +// The object id is unique. Once assigned to an object, the object id can never +// be assigned to another object. That is unless all captured lols are deleted +// which allows the user to start over with a fresh set of lols and object ids. +// The uniqueness of the object ids allows the user to track specific objects +// and inspect its longevity while debugging JS code in execution. +// +// The lol comes with utility functions to capture, dump, summarize, and diff +// captured lols amongst other functionality. These functionality are +// accessible via the v8 debugger interface. class LiveObjectList { public: - inline static void GCEpilogue() {} - inline static void GCPrologue() {} - inline static void IterateElements(ObjectVisitor* v) {} - inline static void ProcessNonLive(HeapObject *obj) {} - inline static void UpdateReferencesForScavengeGC() {} + inline static void GCEpilogue(); + inline static void GCPrologue(); + inline static void IterateElements(ObjectVisitor* v); + inline static void ProcessNonLive(HeapObject *obj); + inline static void UpdateReferencesForScavengeGC(); + + // Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be + // compared/diff'ed using Dump(<lol id1>, <lol id2>, ...). This will yield + // a verbose dump of all the objects in the resultant lists. + // Similarly, a summarized result of a LOL listing or a diff can be + // attained using the Summarize(0, <lol id>) and Summarize(<lol id1, + // <lol id2>, ...) respectively. - static MaybeObject* Capture() { return Heap::undefined_value(); } - static bool Delete(int id) { return false; } + static MaybeObject* Capture(); + static bool Delete(int id); static MaybeObject* Dump(int id1, int id2, int start_idx, int dump_limit, - Handle<JSObject> filter_obj) { - return Heap::undefined_value(); - } - static MaybeObject* Info(int start_idx, int dump_limit) { - return Heap::undefined_value(); - } - static MaybeObject* Summarize(int id1, - int id2, - Handle<JSObject> filter_obj) { - return Heap::undefined_value(); - } + Handle<JSObject> filter_obj); + static MaybeObject* Info(int start_idx, int dump_limit); + static MaybeObject* Summarize(int id1, int id2, Handle<JSObject> filter_obj); - static void Reset() {} - static Object* GetObj(int obj_id) { return Heap::undefined_value(); } - static Object* GetObjId(Handle<String> address) { - return Heap::undefined_value(); - } + static void Reset(); + static Object* GetObj(int obj_id); + static int GetObjId(Object* obj); + static Object* GetObjId(Handle<String> address); static MaybeObject* GetObjRetainers(int obj_id, Handle<JSObject> instance_filter, bool verbose, int start, int count, - Handle<JSObject> filter_obj) { - return Heap::undefined_value(); - } + Handle<JSObject> filter_obj); static Object* GetPath(int obj_id1, int obj_id2, - Handle<JSObject> instance_filter) { - return Heap::undefined_value(); + Handle<JSObject> instance_filter); + static Object* PrintObj(int obj_id); + + private: + + struct Element { + int id_; + HeapObject* obj_; + }; + + explicit LiveObjectList(LiveObjectList* prev, int capacity); + ~LiveObjectList(); + + static void GCEpiloguePrivate(); + static void IterateElementsPrivate(ObjectVisitor* v); + + static void DoProcessNonLive(HeapObject *obj); + + static int CompareElement(const Element* a, const Element* b); + + static Object* GetPathPrivate(HeapObject* obj1, HeapObject* obj2); + + static int GetRetainers(Handle<HeapObject> target, + Handle<JSObject> instance_filter, + Handle<FixedArray> retainers_arr, + int start, + int dump_limit, + int* total_count, + LolFilter* filter, + LiveObjectSummary *summary, + JSFunction* arguments_function, + Handle<Object> error); + + static MaybeObject* DumpPrivate(DumpWriter* writer, + int start, + int dump_limit, + LolFilter* filter); + static MaybeObject* SummarizePrivate(SummaryWriter* writer, + LolFilter* filter, + bool is_tracking_roots); + + static bool NeedLOLProcessing() { return (last() != NULL); } + static void NullifyNonLivePointer(HeapObject **p) { + // Mask out the low bit that marks this as a heap object. We'll use this + // cleared bit as an indicator that this pointer needs to be collected. + // + // Meanwhile, we still preserve its approximate value so that we don't + // have to resort the elements list all the time. + // + // Note: Doing so also makes this HeapObject* look like an SMI. Hence, + // GC pointer updater will ignore it when it gets scanned. + *p = reinterpret_cast<HeapObject*>((*p)->address()); + } + + LiveObjectList* prev() { return prev_; } + LiveObjectList* next() { return next_; } + int id() { return id_; } + + static int list_count() { return list_count_; } + static LiveObjectList* last() { return last_; } + + inline static LiveObjectList* FindLolForId(int id, LiveObjectList* start_lol); + int TotalObjCount() { return GetTotalObjCountAndSize(NULL); } + int GetTotalObjCountAndSize(int* size_p); + + bool Add(HeapObject* obj); + Element* Find(HeapObject* obj); + static void NullifyMostRecent(HeapObject* obj); + void Sort(); + static void SortAll(); + + static void PurgeDuplicates(); // Only to be called by GCEpilogue. + +#ifdef VERIFY_LOL + static void Verify(bool match_heap_exactly = false); + static void VerifyNotInFromSpace(); +#endif + + // Iterates the elements in every lol and returns the one that matches the + // specified key. If no matching element is found, then it returns NULL. + template <typename T> + inline static LiveObjectList::Element* + FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key); + + inline static int GetElementId(Element* element); + inline static HeapObject* GetElementObj(Element* element); + + // Instance fields. + LiveObjectList* prev_; + LiveObjectList* next_; + int id_; + int capacity_; + int obj_count_; + Element *elements_; + + // Statics for managing all the lists. + static uint32_t next_element_id_; + static int list_count_; + static int last_id_; + static LiveObjectList* first_; + static LiveObjectList* last_; + + friend class LolIterator; + friend class LolForwardIterator; + friend class LolDumpWriter; + friend class RetainersDumpWriter; + friend class RetainersSummaryWriter; + friend class UpdateLiveObjectListVisitor; +}; + + +// Helper class for updating the LiveObjectList HeapObject pointers. +class UpdateLiveObjectListVisitor: public ObjectVisitor { + public: + + void VisitPointer(Object** p) { UpdatePointer(p); } + + void VisitPointers(Object** start, Object** end) { + // Copy all HeapObject pointers in [start, end). + for (Object** p = start; p < end; p++) UpdatePointer(p); + } + + private: + // Based on Heap::ScavengeObject() but only does forwarding of pointers + // to live new space objects, and not actually keep them alive. + void UpdatePointer(Object** p) { + Object* object = *p; + if (!Heap::InNewSpace(object)) return; + + HeapObject* heap_obj = HeapObject::cast(object); + ASSERT(Heap::InFromSpace(heap_obj)); + + // We use the first word (where the map pointer usually is) of a heap + // object to record the forwarding pointer. A forwarding pointer can + // point to an old space, the code space, or the to space of the new + // generation. + MapWord first_word = heap_obj->map_word(); + + // If the first word is a forwarding address, the object has already been + // copied. + if (first_word.IsForwardingAddress()) { + *p = first_word.ToForwardingAddress(); + return; + + // Else, it's a dead object. + } else { + LiveObjectList::NullifyNonLivePointer(reinterpret_cast<HeapObject**>(p)); + } } - static Object* PrintObj(int obj_id) { return Heap::undefined_value(); } }; @@ -96,11 +267,50 @@ class LiveObjectList { class LiveObjectList { public: - static void GCEpilogue() {} - static void GCPrologue() {} - static void IterateElements(ObjectVisitor* v) {} - static void ProcessNonLive(HeapObject *obj) {} - static void UpdateReferencesForScavengeGC() {} + inline static void GCEpilogue() {} + inline static void GCPrologue() {} + inline static void IterateElements(ObjectVisitor* v) {} + inline static void ProcessNonLive(HeapObject* obj) {} + inline static void UpdateReferencesForScavengeGC() {} + + inline static MaybeObject* Capture() { return Heap::undefined_value(); } + inline static bool Delete(int id) { return false; } + inline static MaybeObject* Dump(int id1, + int id2, + int start_idx, + int dump_limit, + Handle<JSObject> filter_obj) { + return Heap::undefined_value(); + } + inline static MaybeObject* Info(int start_idx, int dump_limit) { + return Heap::undefined_value(); + } + inline static MaybeObject* Summarize(int id1, + int id2, + Handle<JSObject> filter_obj) { + return Heap::undefined_value(); + } + + inline static void Reset() {} + inline static Object* GetObj(int obj_id) { return Heap::undefined_value(); } + inline static Object* GetObjId(Handle<String> address) { + return Heap::undefined_value(); + } + inline static MaybeObject* GetObjRetainers(int obj_id, + Handle<JSObject> instance_filter, + bool verbose, + int start, + int count, + Handle<JSObject> filter_obj) { + return Heap::undefined_value(); + } + + inline static Object* GetPath(int obj_id1, + int obj_id2, + Handle<JSObject> instance_filter) { + return Heap::undefined_value(); + } + inline static Object* PrintObj(int obj_id) { return Heap::undefined_value(); } }; diff --git a/src/log-utils.cc b/src/log-utils.cc index c7b75679..9a498ec0 100644 --- a/src/log-utils.cc +++ b/src/log-utils.cc @@ -300,6 +300,8 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) { Append("\\,"); } else if (c == '\\') { Append("\\\\"); + } else if (c == '\"') { + Append("\"\""); } else { Append("%lc", c); } @@ -147,7 +147,7 @@ bool Profiler::paused_ = false; // StackTracer implementation // void StackTracer::Trace(TickSample* sample) { - sample->function = NULL; + sample->tos = NULL; sample->frames_count = 0; // Avoid collecting traces while doing GC. @@ -159,15 +159,9 @@ void StackTracer::Trace(TickSample* sample) { return; } - const Address function_address = - sample->fp + JavaScriptFrameConstants::kFunctionOffset; - if (SafeStackFrameIterator::IsWithinBounds(sample->sp, js_entry_sp, - function_address)) { - Object* object = Memory::Object_at(function_address); - if (object->IsHeapObject()) { - sample->function = HeapObject::cast(object)->address(); - } - } + // Sample potential return address value for frameless invocation of + // stubs (we'll figure out later, if this value makes sense). + sample->tos = Memory::Address_at(sample->sp); int i = 0; const Address callback = Top::external_callback(); @@ -181,10 +175,7 @@ void StackTracer::Trace(TickSample* sample) { SafeStackTraceFrameIterator it(sample->fp, sample->sp, sample->sp, js_entry_sp); while (!it.done() && i < TickSample::kMaxFramesCount) { - Object* object = it.frame()->function_slot_object(); - if (object->IsHeapObject()) { - sample->stack[i++] = HeapObject::cast(object)->address(); - } + sample->stack[i++] = it.frame()->pc(); it.Advance(); } sample->frames_count = i; @@ -710,17 +701,6 @@ void Logger::SetterCallbackEvent(String* name, Address entry_point) { } -#ifdef ENABLE_LOGGING_AND_PROFILING -static const char* ComputeMarker(Code* code) { - switch (code->kind()) { - case Code::FUNCTION: return code->optimizable() ? "~" : ""; - case Code::OPTIMIZED_FUNCTION: return "*"; - default: return ""; - } -} -#endif - - void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, const char* comment) { @@ -731,7 +711,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, kLogEventsNames[CODE_CREATION_EVENT], kLogEventsNames[tag]); msg.AppendAddress(code->address()); - msg.Append(",%d,\"%s", code->ExecutableSize(), ComputeMarker(code)); + msg.Append(",%d,\"", code->ExecutableSize()); for (const char* p = comment; *p != '\0'; p++) { if (*p == '"') { msg.Append('\\'); @@ -746,9 +726,40 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, } -void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) { +void Logger::CodeCreateEvent(LogEventsAndTags tag, + Code* code, + String* name) { +#ifdef ENABLE_LOGGING_AND_PROFILING + if (name != NULL) { + SmartPointer<char> str = + name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + CodeCreateEvent(tag, code, *str); + } else { + CodeCreateEvent(tag, code, ""); + } +#endif +} + + +#ifdef ENABLE_LOGGING_AND_PROFILING +// ComputeMarker must only be used when SharedFunctionInfo is known. +static const char* ComputeMarker(Code* code) { + switch (code->kind()) { + case Code::FUNCTION: return code->optimizable() ? "~" : ""; + case Code::OPTIMIZED_FUNCTION: return "*"; + default: return ""; + } +} +#endif + + +void Logger::CodeCreateEvent(LogEventsAndTags tag, + Code* code, + SharedFunctionInfo* shared, + String* name) { #ifdef ENABLE_LOGGING_AND_PROFILING if (!Log::IsEnabled() || !FLAG_log_code) return; + if (code == Builtins::builtin(Builtins::LazyCompile)) return; LogMessageBuilder msg; SmartPointer<char> str = name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); @@ -756,7 +767,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) { kLogEventsNames[CODE_CREATION_EVENT], kLogEventsNames[tag]); msg.AppendAddress(code->address()); - msg.Append(",%d,\"%s%s\"", code->ExecutableSize(), ComputeMarker(code), *str); + msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str); + msg.AppendAddress(shared->address()); + msg.Append(",%s", ComputeMarker(code)); LowLevelCodeCreateEvent(code, &msg); msg.Append('\n'); msg.WriteToLogFile(); @@ -764,26 +777,31 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) { } +// Although, it is possible to extract source and line from +// the SharedFunctionInfo object, we left it to caller +// to leave logging functions free from heap allocations. void Logger::CodeCreateEvent(LogEventsAndTags tag, - Code* code, String* name, + Code* code, + SharedFunctionInfo* shared, String* source, int line) { #ifdef ENABLE_LOGGING_AND_PROFILING if (!Log::IsEnabled() || !FLAG_log_code) return; LogMessageBuilder msg; - SmartPointer<char> str = - name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + SmartPointer<char> name = + shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); SmartPointer<char> sourcestr = source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); msg.Append("%s,%s,", kLogEventsNames[CODE_CREATION_EVENT], kLogEventsNames[tag]); msg.AppendAddress(code->address()); - msg.Append(",%d,\"%s%s %s:%d\"", + msg.Append(",%d,\"%s %s:%d\",", code->ExecutableSize(), - ComputeMarker(code), - *str, + *name, *sourcestr, line); + msg.AppendAddress(shared->address()); + msg.Append(",%s", ComputeMarker(code)); LowLevelCodeCreateEvent(code, &msg); msg.Append('\n'); msg.WriteToLogFile(); @@ -863,42 +881,9 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) { } -void Logger::FunctionCreateEvent(JSFunction* function) { -#ifdef ENABLE_LOGGING_AND_PROFILING - // This function can be called from GC iterators (during Scavenge, - // MC, and MS), so marking bits can be set on objects. That's - // why unchecked accessors are used here. - if (!Log::IsEnabled() || !FLAG_log_code) return; - LogMessageBuilder msg; - msg.Append("%s,", kLogEventsNames[FUNCTION_CREATION_EVENT]); - msg.AppendAddress(function->address()); - msg.Append(','); - msg.AppendAddress(function->unchecked_code()->address()); - msg.Append('\n'); - msg.WriteToLogFile(); -#endif -} - - -void Logger::FunctionCreateEventFromMove(JSFunction* function) { -#ifdef ENABLE_LOGGING_AND_PROFILING - if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) { - FunctionCreateEvent(function); - } -#endif -} - - -void Logger::FunctionMoveEvent(Address from, Address to) { +void Logger::SFIMoveEvent(Address from, Address to) { #ifdef ENABLE_LOGGING_AND_PROFILING - MoveEventInternal(FUNCTION_MOVE_EVENT, from, to); -#endif -} - - -void Logger::FunctionDeleteEvent(Address from) { -#ifdef ENABLE_LOGGING_AND_PROFILING - DeleteEventInternal(FUNCTION_DELETE_EVENT, from); + MoveEventInternal(SFI_MOVE_EVENT, from, to); #endif } @@ -1118,7 +1103,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) { msg.Append(','); msg.AppendAddress(sample->sp); msg.Append(','); - msg.AppendAddress(sample->function); + msg.AppendAddress(sample->tos); msg.Append(",%d", static_cast<int>(sample->state)); if (overflow) { msg.Append(",overflow"); @@ -1187,7 +1172,6 @@ void Logger::ResumeProfiler(int flags, int tag) { LOG(UncheckedStringEvent("profiler", "resume")); FLAG_log_code = true; LogCompiledFunctions(); - LogFunctionObjects(); LogAccessorCallbacks(); if (!FLAG_sliding_state_window && !ticker_->IsActive()) { ticker_->Start(); @@ -1388,10 +1372,9 @@ void Logger::LogCompiledFunctions() { // During iteration, there can be heap allocation due to // GetScriptLineNumber call. for (int i = 0; i < compiled_funcs_count; ++i) { + if (*code_objects[i] == Builtins::builtin(Builtins::LazyCompile)) continue; Handle<SharedFunctionInfo> shared = sfis[i]; - Handle<String> name(String::cast(shared->name())); - Handle<String> func_name(name->length() > 0 ? - *name : shared->inferred_name()); + Handle<String> func_name(shared->DebugName()); if (shared->script()->IsScript()) { Handle<Script> script(Script::cast(shared->script())); if (script->name()->IsString()) { @@ -1400,18 +1383,18 @@ void Logger::LogCompiledFunctions() { if (line_num > 0) { PROFILE(CodeCreateEvent( Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script), - *code_objects[i], *func_name, + *code_objects[i], *shared, *script_name, line_num + 1)); } else { // Can't distinguish eval and script here, so always use Script. PROFILE(CodeCreateEvent( Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), - *code_objects[i], *script_name)); + *code_objects[i], *shared, *script_name)); } } else { PROFILE(CodeCreateEvent( Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script), - *code_objects[i], *func_name)); + *code_objects[i], *shared, *func_name)); } } else if (shared->IsApiFunction()) { // API function. @@ -1425,24 +1408,12 @@ void Logger::LogCompiledFunctions() { } } else { PROFILE(CodeCreateEvent( - Logger::LAZY_COMPILE_TAG, *code_objects[i], *func_name)); + Logger::LAZY_COMPILE_TAG, *code_objects[i], *shared, *func_name)); } } } -void Logger::LogFunctionObjects() { - AssertNoAllocation no_alloc; - HeapIterator iterator; - for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { - if (!obj->IsJSFunction()) continue; - JSFunction* jsf = JSFunction::cast(obj); - if (!jsf->is_compiled()) continue; - PROFILE(FunctionCreateEvent(jsf)); - } -} - - void Logger::LogAccessorCallbacks() { AssertNoAllocation no_alloc; HeapIterator iterator; @@ -91,9 +91,7 @@ class LogMessageBuilder; V(CODE_MOVE_EVENT, "code-move") \ V(CODE_DELETE_EVENT, "code-delete") \ V(CODE_MOVING_GC, "code-moving-gc") \ - V(FUNCTION_CREATION_EVENT, "function-creation") \ - V(FUNCTION_MOVE_EVENT, "function-move") \ - V(FUNCTION_DELETE_EVENT, "function-delete") \ + V(SFI_MOVE_EVENT, "sfi-move") \ V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \ V(TICK_EVENT, "tick") \ V(REPEAT_META_EVENT, "repeat") \ @@ -205,8 +203,15 @@ class Logger { // Emits a code create event. static void CodeCreateEvent(LogEventsAndTags tag, Code* code, const char* source); - static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name); - static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name, + static void CodeCreateEvent(LogEventsAndTags tag, + Code* code, String* name); + static void CodeCreateEvent(LogEventsAndTags tag, + Code* code, + SharedFunctionInfo* shared, + String* name); + static void CodeCreateEvent(LogEventsAndTags tag, + Code* code, + SharedFunctionInfo* shared, String* source, int line); static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count); static void CodeMovingGCEvent(); @@ -216,13 +221,8 @@ class Logger { static void CodeMoveEvent(Address from, Address to); // Emits a code delete event. static void CodeDeleteEvent(Address from); - // Emits a function object create event. - static void FunctionCreateEvent(JSFunction* function); - static void FunctionCreateEventFromMove(JSFunction* function); - // Emits a function move event. - static void FunctionMoveEvent(Address from, Address to); - // Emits a function delete event. - static void FunctionDeleteEvent(Address from); + + static void SFIMoveEvent(Address from, Address to); static void SnapshotPositionEvent(Address addr, int pos); @@ -273,8 +273,6 @@ class Logger { // Logs all compiled functions found in the heap. static void LogCompiledFunctions(); - // Logs all compiled JSFunction objects found in the heap. - static void LogFunctionObjects(); // Logs all accessor callbacks found in the heap. static void LogAccessorCallbacks(); // Used for logging stubs found in the snapshot. diff --git a/src/macro-assembler.h b/src/macro-assembler.h index d261f57d..30838bd7 100644 --- a/src/macro-assembler.h +++ b/src/macro-assembler.h @@ -50,6 +50,13 @@ enum HandlerType { }; +// Types of uncatchable exceptions. +enum UncatchableExceptionType { + OUT_OF_MEMORY, + TERMINATION +}; + + // Invalid depth in prototype chain. const int kInvalidProtoDepth = -1; diff --git a/src/mark-compact.cc b/src/mark-compact.cc index 5c649d17..a4c782c5 100644 --- a/src/mark-compact.cc +++ b/src/mark-compact.cc @@ -1353,6 +1353,9 @@ void MarkCompactCollector::MarkLiveObjects() { // Flush code from collected candidates. FlushCode::ProcessCandidates(); + + // Clean up dead objects from the runtime profiler. + RuntimeProfiler::RemoveDeadSamples(); } @@ -1937,6 +1940,9 @@ static void SweepNewSpace(NewSpace* space) { // All pointers were updated. Update auxiliary allocation info. Heap::IncrementYoungSurvivorsCounter(survivors_size); space->set_age_mark(space->top()); + + // Update JSFunction pointers from the runtime profiler. + RuntimeProfiler::UpdateSamplesAfterScavenge(); } @@ -2535,6 +2541,7 @@ void MarkCompactCollector::UpdatePointers() { state_ = UPDATE_POINTERS; #endif UpdatingVisitor updating_visitor; + RuntimeProfiler::UpdateSamplesAfterCompact(&updating_visitor); Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG); GlobalHandles::IterateWeakRoots(&updating_visitor); @@ -2819,9 +2826,8 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj, ASSERT(!HeapObject::FromAddress(new_addr)->IsCode()); HeapObject* copied_to = HeapObject::FromAddress(new_addr); - if (copied_to->IsJSFunction()) { - PROFILE(FunctionMoveEvent(old_addr, new_addr)); - PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to))); + if (copied_to->IsSharedFunctionInfo()) { + PROFILE(SFIMoveEvent(old_addr, new_addr)); } HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr)); @@ -2912,9 +2918,8 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) { #endif HeapObject* copied_to = HeapObject::FromAddress(new_addr); - if (copied_to->IsJSFunction()) { - PROFILE(FunctionMoveEvent(old_addr, new_addr)); - PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to))); + if (copied_to->IsSharedFunctionInfo()) { + PROFILE(SFIMoveEvent(old_addr, new_addr)); } HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr)); @@ -2931,8 +2936,6 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) { #ifdef ENABLE_LOGGING_AND_PROFILING if (obj->IsCode()) { PROFILE(CodeDeleteEvent(obj->address())); - } else if (obj->IsJSFunction()) { - PROFILE(FunctionDeleteEvent(obj->address())); } #endif } diff --git a/src/messages.js b/src/messages.js index 1e41b178..2c94912f 100644 --- a/src/messages.js +++ b/src/messages.js @@ -224,6 +224,12 @@ function FormatMessage(message) { strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"], strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"], strict_reserved_word: ["Use of future reserved word in strict mode"], + strict_delete: ["Delete of an unqualified identifier in strict mode."], + strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"], + strict_const: ["Use of const in strict mode."], + strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ], + strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"], + strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"], }; } var message_type = %MessageGetType(message); @@ -1057,8 +1063,8 @@ function errorToString() { } } -%FunctionSetName(errorToString, 'toString'); -%SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM); + +InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]); // Boilerplate for exceptions for stack overflows. Used from // Top::StackOverflow(). diff --git a/src/objects-inl.h b/src/objects-inl.h index 24887a0e..dedb1995 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -769,6 +769,10 @@ bool Object::HasSpecificClassOf(String* name) { MaybeObject* Object::GetElement(uint32_t index) { + // GetElement can trigger a getter which can cause allocation. + // This was not always the case. This ASSERT is here to catch + // leftover incorrect uses. + ASSERT(Heap::IsAllocationAllowed()); return GetElementWithReceiver(this, index); } @@ -2615,7 +2619,8 @@ Code::Flags Code::ComputeFlags(Kind kind, ASSERT(extra_ic_state == kNoExtraICState || (kind == CALL_IC && (ic_state == MONOMORPHIC || ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)) || - (kind == STORE_IC)); + (kind == STORE_IC) || + (kind == KEYED_STORE_IC)); // Compute the bit mask. int bits = kind << kFlagsKindShift; if (in_loop) bits |= kFlagsICInLoopMask; @@ -3737,7 +3742,8 @@ MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) { ASSERT(!IsJSGlobalProxy()); return SetPropertyPostInterceptor(Heap::hidden_symbol(), hidden_obj, - DONT_ENUM); + DONT_ENUM, + kNonStrictMode); } diff --git a/src/objects.cc b/src/objects.cc index 5003b4f8..4c005918 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -531,10 +531,25 @@ MaybeObject* Object::GetProperty(Object* receiver, MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) { - // Non-JS objects do not have integer indexed properties. - if (!IsJSObject()) return Heap::undefined_value(); - return JSObject::cast(this)->GetElementWithReceiver(JSObject::cast(receiver), - index); + if (IsJSObject()) { + return JSObject::cast(this)->GetElementWithReceiver(receiver, index); + } + + Object* holder = NULL; + Context* global_context = Top::context()->global_context(); + if (IsString()) { + holder = global_context->string_function()->instance_prototype(); + } else if (IsNumber()) { + holder = global_context->number_function()->instance_prototype(); + } else if (IsBoolean()) { + holder = global_context->boolean_function()->instance_prototype(); + } else { + // Undefined and null have no indexed properties. + ASSERT(IsUndefined() || IsNull()); + return Heap::undefined_value(); + } + + return JSObject::cast(holder)->GetElementWithReceiver(receiver, index); } @@ -1399,7 +1414,7 @@ MaybeObject* JSObject::AddProperty(String* name, if (!map()->is_extensible()) { Handle<Object> args[1] = {Handle<String>(name)}; return Top::Throw(*Factory::NewTypeError("object_not_extensible", - HandleVector(args, 1))); + HandleVector(args, 1))); } if (HasFastProperties()) { // Ensure the descriptor array does not get too big. @@ -1429,14 +1444,15 @@ MaybeObject* JSObject::AddProperty(String* name, MaybeObject* JSObject::SetPropertyPostInterceptor( String* name, Object* value, - PropertyAttributes attributes) { + PropertyAttributes attributes, + StrictModeFlag strict_mode) { // Check local property, ignore interceptor. LookupResult result; LocalLookupRealNamedProperty(name, &result); if (result.IsFound()) { // An existing property, a map transition or a null descriptor was // found. Use set property to handle all these cases. - return SetProperty(&result, name, value, attributes); + return SetProperty(&result, name, value, attributes, strict_mode); } // Add a new real property. return AddProperty(name, value, attributes); @@ -1561,7 +1577,8 @@ MaybeObject* JSObject::ConvertDescriptorToField(String* name, MaybeObject* JSObject::SetPropertyWithInterceptor( String* name, Object* value, - PropertyAttributes attributes) { + PropertyAttributes attributes, + StrictModeFlag strict_mode) { HandleScope scope; Handle<JSObject> this_handle(this); Handle<String> name_handle(name); @@ -1590,7 +1607,8 @@ MaybeObject* JSObject::SetPropertyWithInterceptor( MaybeObject* raw_result = this_handle->SetPropertyPostInterceptor(*name_handle, *value_handle, - attributes); + attributes, + strict_mode); RETURN_IF_SCHEDULED_EXCEPTION(); return raw_result; } @@ -1598,10 +1616,11 @@ MaybeObject* JSObject::SetPropertyWithInterceptor( MaybeObject* JSObject::SetProperty(String* name, Object* value, - PropertyAttributes attributes) { + PropertyAttributes attributes, + StrictModeFlag strict_mode) { LookupResult result; LocalLookup(name, &result); - return SetProperty(&result, name, value, attributes); + return SetProperty(&result, name, value, attributes, strict_mode); } @@ -1881,7 +1900,8 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result, MaybeObject* JSObject::SetProperty(LookupResult* result, String* name, Object* value, - PropertyAttributes attributes) { + PropertyAttributes attributes, + StrictModeFlag strict_mode) { // Make sure that the top context does not change when doing callbacks or // interceptor calls. AssertNoContextChange ncc; @@ -1908,7 +1928,8 @@ MaybeObject* JSObject::SetProperty(LookupResult* result, Object* proto = GetPrototype(); if (proto->IsNull()) return value; ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->SetProperty(result, name, value, attributes); + return JSObject::cast(proto)->SetProperty( + result, name, value, attributes, strict_mode); } if (!result->IsProperty() && !IsJSContextExtensionObject()) { @@ -1927,7 +1948,18 @@ MaybeObject* JSObject::SetProperty(LookupResult* result, // Neither properties nor transitions found. return AddProperty(name, value, attributes); } - if (result->IsReadOnly() && result->IsProperty()) return value; + if (result->IsReadOnly() && result->IsProperty()) { + if (strict_mode == kStrictMode) { + HandleScope scope; + Handle<String> key(name); + Handle<Object> holder(this); + Handle<Object> args[2] = { key, holder }; + return Top::Throw(*Factory::NewTypeError("strict_read_only_property", + HandleVector(args, 2))); + } else { + return value; + } + } // This is a real property that is not read-only, or it is a // transition or null descriptor and there are no setters in the prototypes. switch (result->type()) { @@ -1955,7 +1987,7 @@ MaybeObject* JSObject::SetProperty(LookupResult* result, value, result->holder()); case INTERCEPTOR: - return SetPropertyWithInterceptor(name, value, attributes); + return SetPropertyWithInterceptor(name, value, attributes, strict_mode); case CONSTANT_TRANSITION: { // If the same constant function is being added we can simply // transition to the target map. @@ -2620,7 +2652,17 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) { NumberDictionary* dictionary = element_dictionary(); int entry = dictionary->FindEntry(index); if (entry != NumberDictionary::kNotFound) { - return dictionary->DeleteProperty(entry, mode); + Object* result = dictionary->DeleteProperty(entry, mode); + if (mode == STRICT_DELETION && result == Heap::false_value()) { + // In strict mode, deleting a non-configurable property throws + // exception. dictionary->DeleteProperty will return false_value() + // if a non-configurable property is being deleted. + HandleScope scope; + Handle<Object> i = Factory::NewNumberFromUint(index); + Handle<Object> args[2] = { i, Handle<Object>(this) }; + return Top::Throw(*Factory::NewTypeError("strict_delete_property", + HandleVector(args, 2))); + } } break; } @@ -2659,6 +2701,13 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) { if (!result.IsProperty()) return Heap::true_value(); // Ignore attributes if forcing a deletion. if (result.IsDontDelete() && mode != FORCE_DELETION) { + if (mode == STRICT_DELETION) { + // Deleting a non-configurable property in strict mode. + HandleScope scope; + Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) }; + return Top::Throw(*Factory::NewTypeError("strict_delete_property", + HandleVector(args, 2))); + } return Heap::false_value(); } // Check for interceptor. @@ -2781,6 +2830,12 @@ bool JSObject::ReferencesObject(Object* obj) { MaybeObject* JSObject::PreventExtensions() { + if (IsAccessCheckNeeded() && + !Top::MayNamedAccess(this, Heap::undefined_value(), v8::ACCESS_KEYS)) { + Top::ReportFailedAccessCheck(this, v8::ACCESS_KEYS); + return Heap::false_value(); + } + if (IsJSGlobalProxy()) { Object* proto = GetPrototype(); if (proto->IsNull()) return this; @@ -5438,9 +5493,11 @@ uint32_t JSFunction::SourceHash() { bool JSFunction::IsInlineable() { if (IsBuiltin()) return false; + SharedFunctionInfo* shared_info = shared(); // Check that the function has a script associated with it. - if (!shared()->script()->IsScript()) return false; - Code* code = shared()->code(); + if (!shared_info->script()->IsScript()) return false; + if (shared_info->optimization_disabled()) return false; + Code* code = shared_info->code(); if (code->kind() == Code::OPTIMIZED_FUNCTION) return true; // If we never ran this (unlikely) then lets try to optimize it. if (code->kind() != Code::FUNCTION) return true; @@ -5494,6 +5551,10 @@ MaybeObject* JSFunction::SetPrototype(Object* value) { Object* JSFunction::RemovePrototype() { + if (map() == context()->global_context()->function_without_prototype_map()) { + // Be idempotent. + return this; + } ASSERT(map() == context()->global_context()->function_map()); set_map(context()->global_context()->function_without_prototype_map()); set_prototype_or_initial_map(Heap::the_hole_value()); @@ -6247,7 +6308,8 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) { } break; case STORE_IC: - if (extra == StoreIC::kStoreICStrict) { + case KEYED_STORE_IC: + if (extra == kStrictMode) { name = "STRICT"; } break; @@ -6463,13 +6525,6 @@ void JSArray::Expand(int required_size) { } -// Computes the new capacity when expanding the elements of a JSObject. -static int NewElementsCapacity(int old_capacity) { - // (old_capacity + 50%) + 16 - return old_capacity + (old_capacity >> 1) + 16; -} - - static Failure* ArrayLengthRangeError() { HandleScope scope; return Top::Throw(*Factory::NewRangeError("invalid_array_length", @@ -6628,7 +6683,6 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) { break; } case PIXEL_ELEMENTS: { - // TODO(iposva): Add testcase. PixelArray* pixels = PixelArray::cast(elements()); if (index < static_cast<uint32_t>(pixels->length())) { return true; @@ -6642,7 +6696,6 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) { case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS: { - // TODO(kbr): Add testcase. ExternalArray* array = ExternalArray::cast(elements()); if (index < static_cast<uint32_t>(array->length())) { return true; @@ -6846,6 +6899,7 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) { MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index, Object* value, + StrictModeFlag strict_mode, bool check_prototype) { // Make sure that the top context does not change when doing // callbacks or interceptor calls. @@ -6872,6 +6926,7 @@ MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index, MaybeObject* raw_result = this_handle->SetElementWithoutInterceptor(index, *value_handle, + strict_mode, check_prototype); RETURN_IF_SCHEDULED_EXCEPTION(); return raw_result; @@ -6985,6 +7040,7 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure, // elements. MaybeObject* JSObject::SetFastElement(uint32_t index, Object* value, + StrictModeFlag strict_mode, bool check_prototype) { ASSERT(HasFastElements()); @@ -7041,12 +7097,13 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, if (!maybe_obj->ToObject(&obj)) return maybe_obj; } ASSERT(HasDictionaryElements()); - return SetElement(index, value, check_prototype); + return SetElement(index, value, strict_mode, check_prototype); } MaybeObject* JSObject::SetElement(uint32_t index, Object* value, + StrictModeFlag strict_mode, bool check_prototype) { // Check access rights if needed. if (IsAccessCheckNeeded() && @@ -7061,25 +7118,35 @@ MaybeObject* JSObject::SetElement(uint32_t index, Object* proto = GetPrototype(); if (proto->IsNull()) return value; ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->SetElement(index, value, check_prototype); + return JSObject::cast(proto)->SetElement(index, + value, + strict_mode, + check_prototype); } // Check for lookup interceptor if (HasIndexedInterceptor()) { - return SetElementWithInterceptor(index, value, check_prototype); + return SetElementWithInterceptor(index, + value, + strict_mode, + check_prototype); } - return SetElementWithoutInterceptor(index, value, check_prototype); + return SetElementWithoutInterceptor(index, + value, + strict_mode, + check_prototype); } MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value, + StrictModeFlag strict_mode, bool check_prototype) { switch (GetElementsKind()) { case FAST_ELEMENTS: // Fast case. - return SetFastElement(index, value, check_prototype); + return SetFastElement(index, value, strict_mode, check_prototype); case PIXEL_ELEMENTS: { PixelArray* pixels = PixelArray::cast(elements()); return pixels->SetValue(index, value); @@ -7128,13 +7195,23 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index, return SetElementWithCallback(element, index, value, this); } else { dictionary->UpdateMaxNumberKey(index); - dictionary->ValueAtPut(entry, value); + // If put fails instrict mode, throw exception. + if (!dictionary->ValueAtPut(entry, value) && + strict_mode == kStrictMode) { + Handle<Object> number(Factory::NewNumberFromUint(index)); + Handle<Object> holder(this); + Handle<Object> args[2] = { number, holder }; + return Top::Throw( + *Factory::NewTypeError("strict_read_only_property", + HandleVector(args, 2))); + } } } else { // Index not already used. Look for an accessor in the prototype chain. if (check_prototype) { bool found; MaybeObject* result = + // Strict mode not needed. No-setter case already handled. SetElementWithCallbackSetterInPrototypes(index, value, &found); if (found) return result; } @@ -7220,7 +7297,7 @@ MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index, } -MaybeObject* JSObject::GetElementPostInterceptor(JSObject* receiver, +MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver, uint32_t index) { // Get element works for both JSObject and JSArray since // JSArray::length cannot change. @@ -7233,11 +7310,7 @@ MaybeObject* JSObject::GetElementPostInterceptor(JSObject* receiver, } break; } - case PIXEL_ELEMENTS: { - // TODO(iposva): Add testcase and implement. - UNIMPLEMENTED(); - break; - } + case PIXEL_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: case EXTERNAL_SHORT_ELEMENTS: @@ -7245,8 +7318,10 @@ MaybeObject* JSObject::GetElementPostInterceptor(JSObject* receiver, case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS: { - // TODO(kbr): Add testcase and implement. - UNIMPLEMENTED(); + MaybeObject* maybe_value = GetExternalElement(index); + Object* value; + if (!maybe_value->ToObject(&value)) return maybe_value; + if (!value->IsUndefined()) return value; break; } case DICTIONARY_ELEMENTS: { @@ -7277,14 +7352,14 @@ MaybeObject* JSObject::GetElementPostInterceptor(JSObject* receiver, } -MaybeObject* JSObject::GetElementWithInterceptor(JSObject* receiver, +MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver, uint32_t index) { // Make sure that the top context does not change when doing // callbacks or interceptor calls. AssertNoContextChange ncc; HandleScope scope; Handle<InterceptorInfo> interceptor(GetIndexedInterceptor()); - Handle<JSObject> this_handle(receiver); + Handle<Object> this_handle(receiver); Handle<JSObject> holder_handle(this); if (!interceptor->getter()->IsUndefined()) { @@ -7310,7 +7385,7 @@ MaybeObject* JSObject::GetElementWithInterceptor(JSObject* receiver, } -MaybeObject* JSObject::GetElementWithReceiver(JSObject* receiver, +MaybeObject* JSObject::GetElementWithReceiver(Object* receiver, uint32_t index) { // Check access rights if needed. if (IsAccessCheckNeeded() && @@ -7334,6 +7409,48 @@ MaybeObject* JSObject::GetElementWithReceiver(JSObject* receiver, } break; } + case PIXEL_ELEMENTS: + case EXTERNAL_BYTE_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case EXTERNAL_SHORT_ELEMENTS: + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case EXTERNAL_INT_ELEMENTS: + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + case EXTERNAL_FLOAT_ELEMENTS: { + MaybeObject* maybe_value = GetExternalElement(index); + Object* value; + if (!maybe_value->ToObject(&value)) return maybe_value; + if (!value->IsUndefined()) return value; + break; + } + case DICTIONARY_ELEMENTS: { + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { + Object* element = dictionary->ValueAt(entry); + PropertyDetails details = dictionary->DetailsAt(entry); + if (details.type() == CALLBACKS) { + return GetElementWithCallback(receiver, + element, + index, + this); + } + return element; + } + break; + } + } + + Object* pt = GetPrototype(); + if (pt == Heap::null_value()) return Heap::undefined_value(); + return pt->GetElementWithReceiver(receiver, index); +} + + +MaybeObject* JSObject::GetExternalElement(uint32_t index) { + // Get element works for both JSObject and JSArray since + // JSArray::length cannot change. + switch (GetElementsKind()) { case PIXEL_ELEMENTS: { PixelArray* pixels = PixelArray::cast(elements()); if (index < static_cast<uint32_t>(pixels->length())) { @@ -7401,27 +7518,12 @@ MaybeObject* JSObject::GetElementWithReceiver(JSObject* receiver, } break; } - case DICTIONARY_ELEMENTS: { - NumberDictionary* dictionary = element_dictionary(); - int entry = dictionary->FindEntry(index); - if (entry != NumberDictionary::kNotFound) { - Object* element = dictionary->ValueAt(entry); - PropertyDetails details = dictionary->DetailsAt(entry); - if (details.type() == CALLBACKS) { - return GetElementWithCallback(receiver, - element, - index, - this); - } - return element; - } + case FAST_ELEMENTS: + case DICTIONARY_ELEMENTS: + UNREACHABLE(); break; - } } - - Object* pt = GetPrototype(); - if (pt == Heap::null_value()) return Heap::undefined_value(); - return pt->GetElementWithReceiver(receiver, index); + return Heap::undefined_value(); } @@ -9345,7 +9447,7 @@ Object* Dictionary<Shape, Key>::DeleteProperty(int entry, JSObject::DeleteMode mode) { PropertyDetails details = DetailsAt(entry); // Ignore attributes if forcing a deletion. - if (details.IsDontDelete() && mode == JSObject::NORMAL_DELETION) { + if (details.IsDontDelete() && mode != JSObject::FORCE_DELETION) { return Heap::false_value(); } SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0)); diff --git a/src/objects.h b/src/objects.h index 264cc0bc..d7b87c65 100644 --- a/src/objects.h +++ b/src/objects.h @@ -1286,7 +1286,12 @@ class HeapNumber: public HeapObject { // caching. class JSObject: public HeapObject { public: - enum DeleteMode { NORMAL_DELETION, FORCE_DELETION }; + enum DeleteMode { + NORMAL_DELETION, + STRICT_DELETION, + FORCE_DELETION + }; + enum ElementsKind { // The only "fast" kind. FAST_ELEMENTS, @@ -1356,11 +1361,13 @@ class JSObject: public HeapObject { MUST_USE_RESULT MaybeObject* SetProperty(String* key, Object* value, - PropertyAttributes attributes); + PropertyAttributes attributes, + StrictModeFlag strict_mode); MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result, String* key, Object* value, - PropertyAttributes attributes); + PropertyAttributes attributes, + StrictModeFlag strict_mode); MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck( LookupResult* result, String* name, @@ -1375,11 +1382,13 @@ class JSObject: public HeapObject { MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor( String* name, Object* value, - PropertyAttributes attributes); + PropertyAttributes attributes, + StrictModeFlag strict_mode); MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor( String* name, Object* value, - PropertyAttributes attributes); + PropertyAttributes attributes, + StrictModeFlag strict_mode); MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes( String* key, Object* value, @@ -1506,6 +1515,12 @@ class JSObject: public HeapObject { inline bool HasElement(uint32_t index); bool HasElementWithReceiver(JSObject* receiver, uint32_t index); + // Computes the new capacity when expanding the elements of a JSObject. + static int NewElementsCapacity(int old_capacity) { + // (old_capacity + 50%) + 16 + return old_capacity + (old_capacity >> 1) + 16; + } + // Tells whether the index'th element is present and how it is stored. enum LocalElementType { // There is no element with given index. @@ -1531,18 +1546,25 @@ class JSObject: public HeapObject { MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index, Object* value, + StrictModeFlag strict_mode, bool check_prototype = true); // Set the index'th array element. // A Failure object is returned if GC is needed. MUST_USE_RESULT MaybeObject* SetElement(uint32_t index, Object* value, + StrictModeFlag strict_mode, bool check_prototype = true); // Returns the index'th element. // The undefined object if index is out of bounds. - MaybeObject* GetElementWithReceiver(JSObject* receiver, uint32_t index); - MaybeObject* GetElementWithInterceptor(JSObject* receiver, uint32_t index); + MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index); + MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index); + + // Get external element value at index if there is one and undefined + // otherwise. Can return a failure if allocation of a heap number + // failed. + MaybeObject* GetExternalElement(uint32_t index); MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity, int length); @@ -1791,15 +1813,18 @@ class JSObject: public HeapObject { uint32_t index, Object* value, JSObject* holder); - MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(uint32_t index, - Object* value, - bool check_prototype); + MUST_USE_RESULT MaybeObject* SetElementWithInterceptor( + uint32_t index, + Object* value, + StrictModeFlag strict_mode, + bool check_prototype); MUST_USE_RESULT MaybeObject* SetElementWithoutInterceptor( uint32_t index, Object* value, + StrictModeFlag strict_mode, bool check_prototype); - MaybeObject* GetElementPostInterceptor(JSObject* receiver, uint32_t index); + MaybeObject* GetElementPostInterceptor(Object* receiver, uint32_t index); MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name, DeleteMode mode); @@ -2433,13 +2458,18 @@ class Dictionary: public HashTable<Shape, Key> { } // Set the value for entry. - void ValueAtPut(int entry, Object* value) { + // Returns false if the put wasn't performed due to property being read only. + // Returns true on successful put. + bool ValueAtPut(int entry, Object* value) { // Check that this value can actually be written. PropertyDetails details = DetailsAt(entry); // If a value has not been initilized we allow writing to it even if // it is read only (a declared const that has not been initialized). - if (details.IsReadOnly() && !ValueAt(entry)->IsTheHole()) return; - this->set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value); + if (details.IsReadOnly() && !ValueAt(entry)->IsTheHole()) { + return false; + } + this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 1, value); + return true; } // Returns the property details for the property at entry. @@ -4365,7 +4395,6 @@ class SharedFunctionInfo: public HeapObject { kThisPropertyAssignmentsOffset + kPointerSize, kSize> BodyDescriptor; - private: // Bit positions in start_position_and_type. // The source code start position is in the 30 most significant bits of // the start_position_and_type field. @@ -4384,6 +4413,35 @@ class SharedFunctionInfo: public HeapObject { static const int kOptimizationDisabled = 7; static const int kStrictModeFunction = 8; + private: +#if V8_HOST_ARCH_32_BIT + // On 32 bit platforms, compiler hints is a smi. + static const int kCompilerHintsSmiTagSize = kSmiTagSize; + static const int kCompilerHintsSize = kPointerSize; +#else + // On 64 bit platforms, compiler hints is not a smi, see comment above. + static const int kCompilerHintsSmiTagSize = 0; + static const int kCompilerHintsSize = kIntSize; +#endif + + public: + // Constants for optimizing codegen for strict mode function tests. + // Allows to use byte-widgh instructions. + static const int kStrictModeBitWithinByte = + (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte; + +#if __BYTE_ORDER == __LITTLE_ENDIAN + static const int kStrictModeByteOffset = kCompilerHintsOffset + + (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte; +#elif __BYTE_ORDER == __BIG_ENDIAN + static const int kStrictModeByteOffset = kCompilerHintsOffset + + (kCompilerHintsSize - 1) - + ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte); +#else +#error Unknown byte ordering +#endif + + private: DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo); }; diff --git a/src/parser.cc b/src/parser.cc index 04d510f2..3c361a7e 100644 --- a/src/parser.cc +++ b/src/parser.cc @@ -803,10 +803,12 @@ void Parser::ReportMessageAt(Scanner::Location source_location, MessageLocation location(script_, source_location.beg_pos, source_location.end_pos); - Handle<JSArray> array = Factory::NewJSArray(args.length()); + Handle<FixedArray> elements = Factory::NewFixedArray(args.length()); for (int i = 0; i < args.length(); i++) { - SetElement(array, i, Factory::NewStringFromUtf8(CStrVector(args[i]))); + Handle<String> arg_string = Factory::NewStringFromUtf8(CStrVector(args[i])); + elements->set(i, *arg_string); } + Handle<JSArray> array = Factory::NewJSArrayWithElements(elements); Handle<Object> result = Factory::NewSyntaxError(type, array); Top::Throw(*result, &location); } @@ -818,10 +820,11 @@ void Parser::ReportMessageAt(Scanner::Location source_location, MessageLocation location(script_, source_location.beg_pos, source_location.end_pos); - Handle<JSArray> array = Factory::NewJSArray(args.length()); + Handle<FixedArray> elements = Factory::NewFixedArray(args.length()); for (int i = 0; i < args.length(); i++) { - SetElement(array, i, args[i]); + elements->set(i, *args[i]); } + Handle<JSArray> array = Factory::NewJSArrayWithElements(elements); Handle<Object> result = Factory::NewSyntaxError(type, array); Top::Throw(*result, &location); } @@ -1106,7 +1109,20 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor, } Scanner::Location token_loc = scanner().peek_location(); - Statement* stat = ParseStatement(NULL, CHECK_OK); + + Statement* stat; + if (peek() == Token::FUNCTION) { + // FunctionDeclaration is only allowed in the context of SourceElements + // (Ecma 262 5th Edition, clause 14): + // SourceElement: + // Statement + // FunctionDeclaration + // Common language extension is to allow function declaration in place + // of any statement. This language extension is disabled in strict mode. + stat = ParseFunctionDeclaration(CHECK_OK); + } else { + stat = ParseStatement(NULL, CHECK_OK); + } if (stat == NULL || stat->IsEmpty()) { directive_prologue = false; // End of directive prologue. @@ -1263,8 +1279,17 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) { return result; } - case Token::FUNCTION: + case Token::FUNCTION: { + // In strict mode, FunctionDeclaration is only allowed in the context + // of SourceElements. + if (temp_scope_->StrictMode()) { + ReportMessageAt(scanner().peek_location(), "strict_function", + Vector<const char*>::empty()); + *ok = false; + return NULL; + } return ParseFunctionDeclaration(ok); + } case Token::NATIVE: return ParseNativeDeclaration(ok); @@ -1515,6 +1540,11 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN, Consume(Token::VAR); } else if (peek() == Token::CONST) { Consume(Token::CONST); + if (temp_scope_->StrictMode()) { + ReportMessage("strict_const", Vector<const char*>::empty()); + *ok = false; + return NULL; + } mode = Variable::CONST; is_const = true; } else { @@ -1634,34 +1664,49 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN, if (top_scope_->is_global_scope()) { // Compute the arguments for the runtime call. - ZoneList<Expression*>* arguments = new ZoneList<Expression*>(2); - // Be careful not to assign a value to the global variable if - // we're in a with. The initialization value should not - // necessarily be stored in the global object in that case, - // which is why we need to generate a separate assignment node. + ZoneList<Expression*>* arguments = new ZoneList<Expression*>(3); arguments->Add(new Literal(name)); // we have at least 1 parameter - if (is_const || (value != NULL && !inside_with())) { - arguments->Add(value); - value = NULL; // zap the value to avoid the unnecessary assignment - } - // Construct the call to Runtime::DeclareGlobal{Variable,Const}Locally - // and add it to the initialization statement block. Note that - // this function does different things depending on if we have - // 1 or 2 parameters. CallRuntime* initialize; + if (is_const) { + arguments->Add(value); + value = NULL; // zap the value to avoid the unnecessary assignment + + // Construct the call to Runtime_InitializeConstGlobal + // and add it to the initialization statement block. + // Note that the function does different things depending on + // the number of arguments (1 or 2). initialize = - new CallRuntime( - Factory::InitializeConstGlobal_symbol(), - Runtime::FunctionForId(Runtime::kInitializeConstGlobal), - arguments); + new CallRuntime( + Factory::InitializeConstGlobal_symbol(), + Runtime::FunctionForId(Runtime::kInitializeConstGlobal), + arguments); } else { + // Add strict mode. + // We may want to pass singleton to avoid Literal allocations. + arguments->Add(NewNumberLiteral( + temp_scope_->StrictMode() ? kStrictMode : kNonStrictMode)); + + // Be careful not to assign a value to the global variable if + // we're in a with. The initialization value should not + // necessarily be stored in the global object in that case, + // which is why we need to generate a separate assignment node. + if (value != NULL && !inside_with()) { + arguments->Add(value); + value = NULL; // zap the value to avoid the unnecessary assignment + } + + // Construct the call to Runtime_InitializeVarGlobal + // and add it to the initialization statement block. + // Note that the function does different things depending on + // the number of arguments (2 or 3). initialize = - new CallRuntime( - Factory::InitializeVarGlobal_symbol(), - Runtime::FunctionForId(Runtime::kInitializeVarGlobal), - arguments); + new CallRuntime( + Factory::InitializeVarGlobal_symbol(), + Runtime::FunctionForId(Runtime::kInitializeVarGlobal), + arguments); } + block->AddStatement(new ExpressionStatement(initialize)); } @@ -2521,6 +2566,16 @@ Expression* Parser::ParseUnaryExpression(bool* ok) { } } + // "delete identifier" is a syntax error in strict mode. + if (op == Token::DELETE && temp_scope_->StrictMode()) { + VariableProxy* operand = expression->AsVariableProxy(); + if (operand != NULL && !operand->is_this()) { + ReportMessage("strict_delete", Vector<const char*>::empty()); + *ok = false; + return NULL; + } + } + return new UnaryOperation(op, expression); } else if (Token::IsCountOp(op)) { @@ -3983,12 +4038,14 @@ Handle<Object> JsonParser::ParseJson(Handle<String> script, MessageLocation location(Factory::NewScript(script), source_location.beg_pos, source_location.end_pos); - int argc = (name_opt == NULL) ? 0 : 1; - Handle<JSArray> array = Factory::NewJSArray(argc); - if (name_opt != NULL) { - SetElement(array, - 0, - Factory::NewStringFromUtf8(CStrVector(name_opt))); + Handle<JSArray> array; + if (name_opt == NULL) { + array = Factory::NewJSArray(0); + } else { + Handle<String> name = Factory::NewStringFromUtf8(CStrVector(name_opt)); + Handle<FixedArray> element = Factory::NewFixedArray(1); + element->set(0, *name); + array = Factory::NewJSArrayWithElements(element); } Handle<Object> result = Factory::NewSyntaxError(message, array); Top::Throw(*result, &location); @@ -4060,7 +4117,7 @@ Handle<Object> JsonParser::ParseJsonObject() { if (value.is_null()) return Handle<Object>::null(); uint32_t index; if (key->AsArrayIndex(&index)) { - SetOwnElement(json_object, index, value); + SetOwnElement(json_object, index, value, kNonStrictMode); } else if (key->Equals(Heap::Proto_symbol())) { // We can't remove the __proto__ accessor since it's hardcoded // in several places. Instead go along and add the value as @@ -4263,6 +4320,8 @@ RegExpTree* RegExpParser::ParseDisjunction() { capture_index); } builder->AddAtom(body); + // For compatability with JSC and ES3, we allow quantifiers after + // lookaheads, and break in all cases. break; } case '|': { @@ -4336,7 +4395,7 @@ RegExpTree* RegExpParser::ParseDisjunction() { type, captures_started()); builder = stored_state->builder(); - break; + continue; } case '[': { RegExpTree* atom = ParseCharacterClass(CHECK_FAILED); @@ -4359,11 +4418,11 @@ RegExpTree* RegExpParser::ParseDisjunction() { builder->AddAssertion( new RegExpAssertion(RegExpAssertion::NON_BOUNDARY)); continue; - // AtomEscape :: - // CharacterClassEscape - // - // CharacterClassEscape :: one of - // d D s S w W + // AtomEscape :: + // CharacterClassEscape + // + // CharacterClassEscape :: one of + // d D s S w W case 'd': case 'D': case 's': case 'S': case 'w': case 'W': { uc32 c = Next(); Advance(2); diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc new file mode 100644 index 00000000..a7cc5256 --- /dev/null +++ b/src/platform-cygwin.cc @@ -0,0 +1,745 @@ +// Copyright 2006-2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Platform specific code for Cygwin goes here. For the POSIX comaptible parts +// the implementation is in platform-posix.cc. + +#include <errno.h> +#include <pthread.h> +#include <semaphore.h> +#include <stdarg.h> +#include <strings.h> // index +#include <sys/time.h> +#include <sys/mman.h> // mmap & munmap +#include <unistd.h> // sysconf + +#undef MAP_TYPE + +#include "v8.h" + +#include "platform.h" +#include "top.h" +#include "v8threads.h" +#include "vm-state-inl.h" +#include "win32-headers.h" + +namespace v8 { +namespace internal { + +// 0 is never a valid thread id +static const pthread_t kNoThread = (pthread_t) 0; + + +double ceiling(double x) { + return ceil(x); +} + + +void OS::Setup() { + // Seed the random number generator. + // Convert the current time to a 64-bit integer first, before converting it + // to an unsigned. Going directly can cause an overflow and the seed to be + // set to all ones. The seed will be identical for different instances that + // call this setup code within the same millisecond. + uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); + srandom(static_cast<unsigned int>(seed)); +} + + +uint64_t OS::CpuFeaturesImpliedByPlatform() { + return 0; // Nothing special about Cygwin. +} + + +int OS::ActivationFrameAlignment() { + // With gcc 4.4 the tree vectorization optimizer can generate code + // that requires 16 byte alignment such as movdqa on x86. + return 16; +} + + +void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { + __asm__ __volatile__("" : : : "memory"); + // An x86 store acts as a release barrier. + *ptr = value; +} + +const char* OS::LocalTimezone(double time) { + if (isnan(time)) return ""; + time_t tv = static_cast<time_t>(floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return ""; + return tzname[0]; // The location of the timezone string on Cygwin. +} + + +double OS::LocalTimeOffset() { + // On Cygwin, struct tm does not contain a tm_gmtoff field. + time_t utc = time(NULL); + ASSERT(utc != -1); + struct tm* loc = localtime(&utc); + ASSERT(loc != NULL); + // time - localtime includes any daylight savings offset, so subtract it. + return static_cast<double>((mktime(loc) - utc) * msPerSecond - + (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0)); +} + + +// We keep the lowest and highest addresses mapped as a quick way of +// determining that pointers are outside the heap (used mostly in assertions +// and verification). The estimate is conservative, ie, not all addresses in +// 'allocated' space are actually allocated to our heap. The range is +// [lowest, highest), inclusive on the low and and exclusive on the high end. +static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); +static void* highest_ever_allocated = reinterpret_cast<void*>(0); + + +static void UpdateAllocatedSpaceLimits(void* address, int size) { + lowest_ever_allocated = Min(lowest_ever_allocated, address); + highest_ever_allocated = + Max(highest_ever_allocated, + reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); +} + + +bool OS::IsOutsideAllocatedSpace(void* address) { + return address < lowest_ever_allocated || address >= highest_ever_allocated; +} + + +size_t OS::AllocateAlignment() { + return sysconf(_SC_PAGESIZE); +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (mbase == MAP_FAILED) { + LOG(StringEvent("OS::Allocate", "mmap failed")); + return NULL; + } + *allocated = msize; + UpdateAllocatedSpaceLimits(mbase, msize); + return mbase; +} + + +void OS::Free(void* address, const size_t size) { + // TODO(1240712): munmap has a return value which is ignored here. + int result = munmap(address, size); + USE(result); + ASSERT(result == 0); +} + + +#ifdef ENABLE_HEAP_PROTECTION + +void OS::Protect(void* address, size_t size) { + // TODO(1240712): mprotect has a return value which is ignored here. + mprotect(address, size, PROT_READ); +} + + +void OS::Unprotect(void* address, size_t size, bool is_executable) { + // TODO(1240712): mprotect has a return value which is ignored here. + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + mprotect(address, size, prot); +} + +#endif + + +void OS::Sleep(int milliseconds) { + unsigned int ms = static_cast<unsigned int>(milliseconds); + usleep(1000 * ms); +} + + +void OS::Abort() { + // Redirect to std abort to signal abnormal program termination. + abort(); +} + + +void OS::DebugBreak() { + asm("int $3"); +} + + +class PosixMemoryMappedFile : public OS::MemoryMappedFile { + public: + PosixMemoryMappedFile(FILE* file, void* memory, int size) + : file_(file), memory_(memory), size_(size) { } + virtual ~PosixMemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + FILE* file_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "r+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + int result = fwrite(initial, size, 1, file); + if (result < 1) { + fclose(file); + return NULL; + } + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +PosixMemoryMappedFile::~PosixMemoryMappedFile() { + if (memory_) munmap(memory_, size_); + fclose(file_); +} + + +void OS::LogSharedLibraryAddresses() { +#ifdef ENABLE_LOGGING_AND_PROFILING + // This function assumes that the layout of the file is as follows: + // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] + // If we encounter an unexpected situation we abort scanning further entries. + FILE* fp = fopen("/proc/self/maps", "r"); + if (fp == NULL) return; + + // Allocate enough room to be able to store a full file name. + const int kLibNameLen = FILENAME_MAX + 1; + char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); + + // This loop will terminate once the scanning hits an EOF. + while (true) { + uintptr_t start, end; + char attr_r, attr_w, attr_x, attr_p; + // Parse the addresses and permission bits at the beginning of the line. + if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; + if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; + + int c; + if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { + // Found a read-only executable entry. Skip characters until we reach + // the beginning of the filename or the end of the line. + do { + c = getc(fp); + } while ((c != EOF) && (c != '\n') && (c != '/')); + if (c == EOF) break; // EOF: Was unexpected, just exit. + + // Process the filename if found. + if (c == '/') { + ungetc(c, fp); // Push the '/' back into the stream to be read below. + + // Read to the end of the line. Exit if the read fails. + if (fgets(lib_name, kLibNameLen, fp) == NULL) break; + + // Drop the newline character read by fgets. We do not need to check + // for a zero-length string because we know that we at least read the + // '/' character. + lib_name[strlen(lib_name) - 1] = '\0'; + } else { + // No library name found, just record the raw address range. + snprintf(lib_name, kLibNameLen, + "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); + } + LOG(SharedLibraryEvent(lib_name, start, end)); + } else { + // Entry not describing executable data. Skip to end of line to setup + // reading the next entry. + do { + c = getc(fp); + } while ((c != EOF) && (c != '\n')); + if (c == EOF) break; + } + } + free(lib_name); + fclose(fp); +#endif +} + + +void OS::SignalCodeMovingGC() { + // Nothing to do on Cygwin. +} + + +int OS::StackWalk(Vector<OS::StackFrame> frames) { + // Not supported on Cygwin. + return 0; +} + + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + + +VirtualMemory::VirtualMemory(size_t size) { + address_ = mmap(NULL, size, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, kMmapFdOffset); + size_ = size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + if (0 == munmap(address(), size())) address_ = MAP_FAILED; + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != MAP_FAILED; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + + if (mprotect(address, size, prot) != 0) { + return false; + } + + UpdateAllocatedSpaceLimits(address, size); + return true; +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return mmap(address, size, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, kMmapFdOffset) != MAP_FAILED; +} + + +class ThreadHandle::PlatformData : public Malloced { + public: + explicit PlatformData(ThreadHandle::Kind kind) { + Initialize(kind); + } + + void Initialize(ThreadHandle::Kind kind) { + switch (kind) { + case ThreadHandle::SELF: thread_ = pthread_self(); break; + case ThreadHandle::INVALID: thread_ = kNoThread; break; + } + } + + pthread_t thread_; // Thread handle for pthread. +}; + + +ThreadHandle::ThreadHandle(Kind kind) { + data_ = new PlatformData(kind); +} + + +void ThreadHandle::Initialize(ThreadHandle::Kind kind) { + data_->Initialize(kind); +} + + +ThreadHandle::~ThreadHandle() { + delete data_; +} + + +bool ThreadHandle::IsSelf() const { + return pthread_equal(data_->thread_, pthread_self()); +} + + +bool ThreadHandle::IsValid() const { + return data_->thread_ != kNoThread; +} + + +Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) { + set_name("v8:<unknown>"); +} + + +Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) { + set_name(name); +} + + +Thread::~Thread() { +} + + +static void* ThreadEntry(void* arg) { + Thread* thread = reinterpret_cast<Thread*>(arg); + // This is also initialized by the first argument to pthread_create() but we + // don't know which thread will run first (the original thread or the new + // one) so we initialize it here too. + thread->thread_handle_data()->thread_ = pthread_self(); + ASSERT(thread->IsValid()); + thread->Run(); + return NULL; +} + + +void Thread::set_name(const char* name) { + strncpy(name_, name, sizeof(name_)); + name_[sizeof(name_) - 1] = '\0'; +} + + +void Thread::Start() { + pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this); + ASSERT(IsValid()); +} + + +void Thread::Join() { + pthread_join(thread_handle_data()->thread_, NULL); +} + + +static inline Thread::LocalStorageKey PthreadKeyToLocalKey( + pthread_key_t pthread_key) { + // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps + // because pthread_key_t is a pointer type on Cygwin. This will probably not + // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway. + STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); + intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key); + return static_cast<Thread::LocalStorageKey>(ptr_key); +} + + +static inline pthread_key_t LocalKeyToPthreadKey( + Thread::LocalStorageKey local_key) { + STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); + intptr_t ptr_key = static_cast<intptr_t>(local_key); + return reinterpret_cast<pthread_key_t>(ptr_key); +} + + +Thread::LocalStorageKey Thread::CreateThreadLocalKey() { + pthread_key_t key; + int result = pthread_key_create(&key, NULL); + USE(result); + ASSERT(result == 0); + return PthreadKeyToLocalKey(key); +} + + +void Thread::DeleteThreadLocalKey(LocalStorageKey key) { + pthread_key_t pthread_key = LocalKeyToPthreadKey(key); + int result = pthread_key_delete(pthread_key); + USE(result); + ASSERT(result == 0); +} + + +void* Thread::GetThreadLocal(LocalStorageKey key) { + pthread_key_t pthread_key = LocalKeyToPthreadKey(key); + return pthread_getspecific(pthread_key); +} + + +void Thread::SetThreadLocal(LocalStorageKey key, void* value) { + pthread_key_t pthread_key = LocalKeyToPthreadKey(key); + pthread_setspecific(pthread_key, value); +} + + +void Thread::YieldCPU() { + sched_yield(); +} + + +class CygwinMutex : public Mutex { + public: + + CygwinMutex() { + pthread_mutexattr_t attrs; + memset(&attrs, 0, sizeof(attrs)); + + int result = pthread_mutexattr_init(&attrs); + ASSERT(result == 0); + result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE); + ASSERT(result == 0); + result = pthread_mutex_init(&mutex_, &attrs); + ASSERT(result == 0); + } + + virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); } + + virtual int Lock() { + int result = pthread_mutex_lock(&mutex_); + return result; + } + + virtual int Unlock() { + int result = pthread_mutex_unlock(&mutex_); + return result; + } + + virtual bool TryLock() { + int result = pthread_mutex_trylock(&mutex_); + // Return false if the lock is busy and locking failed. + if (result == EBUSY) { + return false; + } + ASSERT(result == 0); // Verify no other errors. + return true; + } + + private: + pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms. +}; + + +Mutex* OS::CreateMutex() { + return new CygwinMutex(); +} + + +class CygwinSemaphore : public Semaphore { + public: + explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); } + virtual ~CygwinSemaphore() { sem_destroy(&sem_); } + + virtual void Wait(); + virtual bool Wait(int timeout); + virtual void Signal() { sem_post(&sem_); } + private: + sem_t sem_; +}; + + +void CygwinSemaphore::Wait() { + while (true) { + int result = sem_wait(&sem_); + if (result == 0) return; // Successfully got semaphore. + CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. + } +} + + +#ifndef TIMEVAL_TO_TIMESPEC +#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \ + (ts)->tv_sec = (tv)->tv_sec; \ + (ts)->tv_nsec = (tv)->tv_usec * 1000; \ +} while (false) +#endif + + +bool CygwinSemaphore::Wait(int timeout) { + const long kOneSecondMicros = 1000000; // NOLINT + + // Split timeout into second and nanosecond parts. + struct timeval delta; + delta.tv_usec = timeout % kOneSecondMicros; + delta.tv_sec = timeout / kOneSecondMicros; + + struct timeval current_time; + // Get the current time. + if (gettimeofday(¤t_time, NULL) == -1) { + return false; + } + + // Calculate time for end of timeout. + struct timeval end_time; + timeradd(¤t_time, &delta, &end_time); + + struct timespec ts; + TIMEVAL_TO_TIMESPEC(&end_time, &ts); + // Wait for semaphore signalled or timeout. + while (true) { + int result = sem_timedwait(&sem_, &ts); + if (result == 0) return true; // Successfully got semaphore. + if (result == -1 && errno == ETIMEDOUT) return false; // Timeout. + CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. + } +} + + +Semaphore* OS::CreateSemaphore(int count) { + return new CygwinSemaphore(count); +} + + +#ifdef ENABLE_LOGGING_AND_PROFILING + +// ---------------------------------------------------------------------------- +// Cygwin profiler support. +// +// On Cygwin we use the same sampler implementation as on win32. + +class Sampler::PlatformData : public Malloced { + public: + explicit PlatformData(Sampler* sampler) { + sampler_ = sampler; + sampler_thread_ = INVALID_HANDLE_VALUE; + profiled_thread_ = INVALID_HANDLE_VALUE; + } + + Sampler* sampler_; + HANDLE sampler_thread_; + HANDLE profiled_thread_; + RuntimeProfilerRateLimiter rate_limiter_; + + // Sampler thread handler. + void Runner() { + while (sampler_->IsActive()) { + if (rate_limiter_.SuspendIfNecessary()) continue; + Sample(); + Sleep(sampler_->interval_); + } + } + + void Sample() { + if (sampler_->IsProfiling()) { + // Context used for sampling the register state of the profiled thread. + CONTEXT context; + memset(&context, 0, sizeof(context)); + + TickSample sample_obj; + TickSample* sample = CpuProfiler::TickSampleEvent(); + if (sample == NULL) sample = &sample_obj; + + static const DWORD kSuspendFailed = static_cast<DWORD>(-1); + if (SuspendThread(profiled_thread_) == kSuspendFailed) return; + sample->state = Top::current_vm_state(); + + context.ContextFlags = CONTEXT_FULL; + if (GetThreadContext(profiled_thread_, &context) != 0) { +#if V8_HOST_ARCH_X64 + sample->pc = reinterpret_cast<Address>(context.Rip); + sample->sp = reinterpret_cast<Address>(context.Rsp); + sample->fp = reinterpret_cast<Address>(context.Rbp); +#else + sample->pc = reinterpret_cast<Address>(context.Eip); + sample->sp = reinterpret_cast<Address>(context.Esp); + sample->fp = reinterpret_cast<Address>(context.Ebp); +#endif + sampler_->SampleStack(sample); + sampler_->Tick(sample); + } + ResumeThread(profiled_thread_); + } + if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick(); + } +}; + + +// Entry point for sampler thread. +static DWORD __stdcall SamplerEntry(void* arg) { + Sampler::PlatformData* data = + reinterpret_cast<Sampler::PlatformData*>(arg); + data->Runner(); + return 0; +} + + +// Initialize a profile sampler. +Sampler::Sampler(int interval) + : interval_(interval), + profiling_(false), + active_(false), + samples_taken_(0) { + data_ = new PlatformData(this); +} + + +Sampler::~Sampler() { + delete data_; +} + + +// Start profiling. +void Sampler::Start() { + // Do not start multiple threads for the same sampler. + ASSERT(!IsActive()); + + // Get a handle to the calling thread. This is the thread that we are + // going to profile. We need to make a copy of the handle because we are + // going to use it in the sampler thread. Using GetThreadHandle() will + // not work in this case. We're using OpenThread because DuplicateHandle + // for some reason doesn't work in Chrome's sandbox. + data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT | + THREAD_SUSPEND_RESUME | + THREAD_QUERY_INFORMATION, + false, + GetCurrentThreadId()); + BOOL ok = data_->profiled_thread_ != NULL; + if (!ok) return; + + // Start sampler thread. + DWORD tid; + SetActive(true); + data_->sampler_thread_ = CreateThread(NULL, 0, SamplerEntry, data_, 0, &tid); + // Set thread to high priority to increase sampling accuracy. + SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL); +} + + +// Stop profiling. +void Sampler::Stop() { + // Seting active to false triggers termination of the sampler + // thread. + SetActive(false); + + // Wait for sampler thread to terminate. + Top::WakeUpRuntimeProfilerThreadBeforeShutdown(); + WaitForSingleObject(data_->sampler_thread_, INFINITE); + + // Release the thread handles + CloseHandle(data_->sampler_thread_); + CloseHandle(data_->profiled_thread_); +} + + +#endif // ENABLE_LOGGING_AND_PROFILING + +} } // namespace v8::internal + diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc index c18049fe..21763b5d 100644 --- a/src/platform-freebsd.cc +++ b/src/platform-freebsd.cc @@ -224,7 +224,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); diff --git a/src/platform-linux.cc b/src/platform-linux.cc index 4a474b4d..16aa7c81 100644 --- a/src/platform-linux.cc +++ b/src/platform-linux.cc @@ -327,7 +327,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); @@ -873,6 +873,7 @@ class Sampler::PlatformData : public Malloced { } void SendProfilingSignal() { + if (!signal_handler_installed_) return; // Glibc doesn't provide a wrapper for tgkill(2). syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF); } @@ -939,8 +940,8 @@ void Sampler::Start() { sa.sa_sigaction = ProfilerSignalHandler; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_RESTART | SA_SIGINFO; - if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return; - data_->signal_handler_installed_ = true; + data_->signal_handler_installed_ = + sigaction(SIGPROF, &sa, &data_->old_signal_handler_) == 0; // Start a thread that sends SIGPROF signal to VM thread. // Sending the signal ourselves instead of relying on itimer provides diff --git a/src/platform-macos.cc b/src/platform-macos.cc index ea35c1b1..35724c35 100644 --- a/src/platform-macos.cc +++ b/src/platform-macos.cc @@ -205,7 +205,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc index 0002dd76..e2796294 100644 --- a/src/platform-openbsd.cc +++ b/src/platform-openbsd.cc @@ -222,7 +222,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc index 556e26be..ebe0475f 100644 --- a/src/platform-solaris.cc +++ b/src/platform-solaris.cc @@ -235,7 +235,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); diff --git a/src/platform-win32.cc b/src/platform-win32.cc index b5a85f66..f24994b5 100644 --- a/src/platform-win32.cc +++ b/src/platform-win32.cc @@ -939,7 +939,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { // Open a physical file HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL); - if (file == NULL) return NULL; + if (file == INVALID_HANDLE_VALUE) return NULL; int size = static_cast<int>(GetFileSize(file, NULL)); diff --git a/src/platform.h b/src/platform.h index 0d7d2e9c..88825e64 100644 --- a/src/platform.h +++ b/src/platform.h @@ -567,13 +567,13 @@ class TickSample { pc(NULL), sp(NULL), fp(NULL), - function(NULL), + tos(NULL), frames_count(0) {} StateTag state; // The state of the VM. - Address pc; // Instruction pointer. - Address sp; // Stack pointer. - Address fp; // Frame pointer. - Address function; // The last called JS function. + Address pc; // Instruction pointer. + Address sp; // Stack pointer. + Address fp; // Frame pointer. + Address tos; // Top stack value (*sp). static const int kMaxFramesCount = 64; Address stack[kMaxFramesCount]; // Call stack. int frames_count; // Number of captured frames. diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h index 3df6af06..747e5c72 100644 --- a/src/profile-generator-inl.h +++ b/src/profile-generator-inl.h @@ -45,16 +45,6 @@ const char* StringsStorage::GetFunctionName(const char* name) { } -CodeEntry::CodeEntry(int security_token_id) - : tag_(Logger::FUNCTION_TAG), - name_prefix_(kEmptyNamePrefix), - name_(""), - resource_name_(""), - line_number_(0), - security_token_id_(security_token_id) { -} - - CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name_prefix, const char* name, @@ -66,6 +56,7 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, name_(name), resource_name_(resource_name), line_number_(line_number), + shared_id_(0), security_token_id_(security_token_id) { } @@ -130,34 +121,6 @@ uint64_t HeapEntry::id() { return id_adaptor.returned_id; } - -template<class Visitor> -void HeapEntriesMap::UpdateEntries(Visitor* visitor) { - for (HashMap::Entry* p = entries_.Start(); - p != NULL; - p = entries_.Next(p)) { - EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value); - entry_info->entry = visitor->GetEntry( - reinterpret_cast<HeapObject*>(p->key), - entry_info->children_count, - entry_info->retainers_count); - entry_info->children_count = 0; - entry_info->retainers_count = 0; - } -} - - -bool HeapSnapshotGenerator::ReportProgress(bool force) { - const int kProgressReportGranularity = 10000; - if (control_ != NULL - && (force || progress_counter_ % kProgressReportGranularity == 0)) { - return - control_->ReportProgressValue(progress_counter_, progress_total_) == - v8::ActivityControl::kContinue; - } - return true; -} - } } // namespace v8::internal #endif // ENABLE_LOGGING_AND_PROFILING diff --git a/src/profile-generator.cc b/src/profile-generator.cc index 06ee333b..7612eab9 100644 --- a/src/profile-generator.cc +++ b/src/profile-generator.cc @@ -156,13 +156,18 @@ void CodeEntry::CopyData(const CodeEntry& source) { uint32_t CodeEntry::GetCallUid() const { uint32_t hash = ComputeIntegerHash(tag_); - hash ^= ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_))); - hash ^= ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_))); - hash ^= ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_))); - hash ^= ComputeIntegerHash(line_number_); + if (shared_id_ != 0) { + hash ^= ComputeIntegerHash( + static_cast<uint32_t>(shared_id_)); + } else { + hash ^= ComputeIntegerHash( + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_))); + hash ^= ComputeIntegerHash( + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_))); + hash ^= ComputeIntegerHash( + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_))); + hash ^= ComputeIntegerHash(line_number_); + } return hash; } @@ -170,10 +175,12 @@ uint32_t CodeEntry::GetCallUid() const { bool CodeEntry::IsSameAs(CodeEntry* entry) const { return this == entry || (tag_ == entry->tag_ - && name_prefix_ == entry->name_prefix_ - && name_ == entry->name_ - && resource_name_ == entry->resource_name_ - && line_number_ == entry->line_number_); + && shared_id_ == entry->shared_id_ + && (shared_id_ != 0 + || (name_prefix_ == entry->name_prefix_ + && name_ == entry->name_ + && resource_name_ == entry->resource_name_ + && line_number_ == entry->line_number_))); } @@ -458,23 +465,12 @@ void CpuProfile::Print() { } +CodeEntry* const CodeMap::kSfiCodeEntry = NULL; const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL; const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue = CodeMap::CodeEntryInfo(NULL, 0); -void CodeMap::AddAlias(Address start, CodeEntry* entry, Address code_start) { - CodeTree::Locator locator; - if (tree_.Find(code_start, &locator)) { - const CodeEntryInfo& code_info = locator.value(); - if (tree_.Insert(start, &locator)) { - entry->CopyData(*code_info.entry); - locator.set_value(CodeEntryInfo(entry, code_info.size)); - } - } -} - - CodeEntry* CodeMap::FindEntry(Address addr) { CodeTree::Locator locator; if (tree_.FindGreatestLessThan(addr, &locator)) { @@ -487,6 +483,22 @@ CodeEntry* CodeMap::FindEntry(Address addr) { } +int CodeMap::GetSFITag(Address addr) { + CodeTree::Locator locator; + // For SFI entries, 'size' field is used to store their IDs. + if (tree_.Find(addr, &locator)) { + const CodeEntryInfo& entry = locator.value(); + ASSERT(entry.entry == kSfiCodeEntry); + return entry.size; + } else { + tree_.Insert(addr, &locator); + int tag = next_sfi_tag_++; + locator.set_value(CodeEntryInfo(kSfiCodeEntry, tag)); + return tag; + } +} + + void CodeMap::CodeTreePrinter::Call( const Address& key, const CodeMap::CodeEntryInfo& value) { OS::Print("%p %5d %s\n", key, value.size, value.entry->name()); @@ -715,13 +727,6 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag, } -CodeEntry* CpuProfilesCollection::NewCodeEntry(int security_token_id) { - CodeEntry* entry = new CodeEntry(security_token_id); - code_entries_.Add(entry); - return entry; -} - - void CpuProfilesCollection::AddPathToCurrentProfiles( const Vector<CodeEntry*>& path) { // As starting / stopping profiles is rare relatively to this @@ -784,19 +789,10 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) { if (sample.pc != NULL) { *entry++ = code_map_.FindEntry(sample.pc); - if (sample.function != NULL) { - *entry = code_map_.FindEntry(sample.function); + if (sample.tos != NULL) { + *entry = code_map_.FindEntry(sample.tos); if (*entry != NULL && !(*entry)->is_js_function()) { *entry = NULL; - } else { - CodeEntry* pc_entry = *entries.start(); - if (pc_entry == NULL) { - *entry = NULL; - } else if (pc_entry->is_js_function()) { - // Use function entry in favor of pc entry, as function - // entry has security token. - *entries.start() = NULL; - } } entry++; } @@ -1181,12 +1177,6 @@ void HeapGraphPath::Print() { } -HeapObject *const HeapSnapshot::kInternalRootObject = - reinterpret_cast<HeapObject*>(1); -HeapObject *const HeapSnapshot::kGcRootsObject = - reinterpret_cast<HeapObject*>(2); - - // It is very important to keep objects that form a heap snapshot // as small as possible. namespace { // Avoid littering the global namespace. @@ -1257,96 +1247,6 @@ void HeapSnapshot::AllocateEntries(int entries_count, } -HeapEntry* HeapSnapshot::AddEntry(HeapObject* object, - int children_count, - int retainers_count) { - if (object == kInternalRootObject) { - ASSERT(root_entry_ == NULL); - ASSERT(retainers_count == 0); - return (root_entry_ = AddEntry(HeapEntry::kObject, - "", - HeapObjectsMap::kInternalRootObjectId, - 0, - children_count, - retainers_count)); - } else if (object == kGcRootsObject) { - ASSERT(gc_roots_entry_ == NULL); - return (gc_roots_entry_ = AddEntry(HeapEntry::kObject, - "(GC roots)", - HeapObjectsMap::kGcRootsObjectId, - 0, - children_count, - retainers_count)); - } else if (object->IsJSFunction()) { - JSFunction* func = JSFunction::cast(object); - SharedFunctionInfo* shared = func->shared(); - return AddEntry(object, - HeapEntry::kClosure, - collection_->GetName(String::cast(shared->name())), - children_count, - retainers_count); - } else if (object->IsJSRegExp()) { - JSRegExp* re = JSRegExp::cast(object); - return AddEntry(object, - HeapEntry::kRegExp, - collection_->GetName(re->Pattern()), - children_count, - retainers_count); - } else if (object->IsJSObject()) { - return AddEntry(object, - HeapEntry::kObject, - collection_->GetName(GetConstructorNameForHeapProfile( - JSObject::cast(object))), - children_count, - retainers_count); - } else if (object->IsString()) { - return AddEntry(object, - HeapEntry::kString, - collection_->GetName(String::cast(object)), - children_count, - retainers_count); - } else if (object->IsCode()) { - return AddEntry(object, - HeapEntry::kCode, - "", - children_count, - retainers_count); - } else if (object->IsSharedFunctionInfo()) { - SharedFunctionInfo* shared = SharedFunctionInfo::cast(object); - return AddEntry(object, - HeapEntry::kCode, - collection_->GetName(String::cast(shared->name())), - children_count, - retainers_count); - } else if (object->IsScript()) { - Script* script = Script::cast(object); - return AddEntry(object, - HeapEntry::kCode, - script->name()->IsString() ? - collection_->GetName(String::cast(script->name())) : "", - children_count, - retainers_count); - } else if (object->IsFixedArray()) { - return AddEntry(object, - HeapEntry::kArray, - "", - children_count, - retainers_count); - } else if (object->IsHeapNumber()) { - return AddEntry(object, - HeapEntry::kHeapNumber, - "number", - children_count, - retainers_count); - } - return AddEntry(object, - HeapEntry::kHidden, - "system", - children_count, - retainers_count); -} - - static void HeapEntryClearPaint(HeapEntry** entry_ptr) { (*entry_ptr)->clear_paint(); } @@ -1356,17 +1256,26 @@ void HeapSnapshot::ClearPaint() { } -HeapEntry* HeapSnapshot::AddEntry(HeapObject* object, - HeapEntry::Type type, - const char* name, - int children_count, - int retainers_count) { - return AddEntry(type, - name, - collection_->GetObjectId(object->address()), - object->Size(), - children_count, - retainers_count); +HeapEntry* HeapSnapshot::AddRootEntry(int children_count) { + ASSERT(root_entry_ == NULL); + return (root_entry_ = AddEntry(HeapEntry::kObject, + "", + HeapObjectsMap::kInternalRootObjectId, + 0, + children_count, + 0)); +} + + +HeapEntry* HeapSnapshot::AddGcRootsEntry(int children_count, + int retainers_count) { + ASSERT(gc_roots_entry_ == NULL); + return (gc_roots_entry_ = AddEntry(HeapEntry::kObject, + "(GC roots)", + HeapObjectsMap::kGcRootsObjectId, + 0, + children_count, + retainers_count)); } @@ -1619,7 +1528,7 @@ HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder = reinterpret_cast<HeapEntry*>(1); HeapEntriesMap::HeapEntriesMap() - : entries_(HeapObjectsMatch), + : entries_(HeapThingsMatch), entries_count_(0), total_children_count_(0), total_retainers_count_(0) { @@ -1633,8 +1542,23 @@ HeapEntriesMap::~HeapEntriesMap() { } -HeapEntry* HeapEntriesMap::Map(HeapObject* object) { - HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), false); +void HeapEntriesMap::AllocateEntries() { + for (HashMap::Entry* p = entries_.Start(); + p != NULL; + p = entries_.Next(p)) { + EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value); + entry_info->entry = entry_info->allocator->AllocateEntry( + p->key, + entry_info->children_count, + entry_info->retainers_count); + entry_info->children_count = 0; + entry_info->retainers_count = 0; + } +} + + +HeapEntry* HeapEntriesMap::Map(HeapThing thing) { + HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false); if (cache_entry != NULL) { EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(cache_entry->value); return entry_info->entry; @@ -1644,15 +1568,16 @@ HeapEntry* HeapEntriesMap::Map(HeapObject* object) { } -void HeapEntriesMap::Pair(HeapObject* object, HeapEntry* entry) { - HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true); +void HeapEntriesMap::Pair( + HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry) { + HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true); ASSERT(cache_entry->value == NULL); - cache_entry->value = new EntryInfo(entry); + cache_entry->value = new EntryInfo(entry, allocator); ++entries_count_; } -void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to, +void HeapEntriesMap::CountReference(HeapThing from, HeapThing to, int* prev_children_count, int* prev_retainers_count) { HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), false); @@ -1675,7 +1600,7 @@ void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to, HeapObjectsSet::HeapObjectsSet() - : entries_(HeapEntriesMap::HeapObjectsMatch) { + : entries_(HeapEntriesMap::HeapThingsMatch) { } @@ -1704,206 +1629,144 @@ void HeapObjectsSet::Insert(Object* obj) { } -HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot, - v8::ActivityControl* control) +HeapObject *const V8HeapExplorer::kInternalRootObject = + reinterpret_cast<HeapObject*>(1); +HeapObject *const V8HeapExplorer::kGcRootsObject = + reinterpret_cast<HeapObject*>(2); + + +V8HeapExplorer::V8HeapExplorer( + HeapSnapshot* snapshot, + SnapshottingProgressReportingInterface* progress) : snapshot_(snapshot), - control_(control), - collection_(snapshot->collection()), + collection_(snapshot_->collection()), + progress_(progress), filler_(NULL) { } -class SnapshotCounter : public HeapSnapshotGenerator::SnapshotFillerInterface { - public: - explicit SnapshotCounter(HeapEntriesMap* entries) - : entries_(entries) { } - HeapEntry* AddEntry(HeapObject* obj) { - entries_->Pair(obj, HeapEntriesMap::kHeapEntryPlaceholder); - return HeapEntriesMap::kHeapEntryPlaceholder; - } - void SetIndexedReference(HeapGraphEdge::Type, - HeapObject* parent_obj, - HeapEntry*, - int, - Object* child_obj, - HeapEntry*) { - entries_->CountReference(parent_obj, HeapObject::cast(child_obj)); - } - void SetNamedReference(HeapGraphEdge::Type, - HeapObject* parent_obj, - HeapEntry*, - const char*, - Object* child_obj, - HeapEntry*) { - entries_->CountReference(parent_obj, HeapObject::cast(child_obj)); - } - void SetRootShortcutReference(Object* child_obj, HeapEntry*) { - entries_->CountReference( - HeapSnapshot::kInternalRootObject, HeapObject::cast(child_obj)); - } - void SetRootGcRootsReference() { - entries_->CountReference( - HeapSnapshot::kInternalRootObject, HeapSnapshot::kGcRootsObject); - } - void SetStrongRootReference(Object* child_obj, HeapEntry*) { - entries_->CountReference( - HeapSnapshot::kGcRootsObject, HeapObject::cast(child_obj)); - } - private: - HeapEntriesMap* entries_; -}; - -class SnapshotFiller : public HeapSnapshotGenerator::SnapshotFillerInterface { - public: - explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries) - : snapshot_(snapshot), - collection_(snapshot->collection()), - entries_(entries) { } - HeapEntry* AddEntry(HeapObject* obj) { - UNREACHABLE(); - return NULL; - } - void SetIndexedReference(HeapGraphEdge::Type type, - HeapObject* parent_obj, - HeapEntry* parent_entry, - int index, - Object* child_obj, - HeapEntry* child_entry) { - int child_index, retainer_index; - entries_->CountReference(parent_obj, - HeapObject::cast(child_obj), - &child_index, - &retainer_index); - parent_entry->SetIndexedReference( - type, child_index, index, child_entry, retainer_index); - } - void SetNamedReference(HeapGraphEdge::Type type, - HeapObject* parent_obj, - HeapEntry* parent_entry, - const char* reference_name, - Object* child_obj, - HeapEntry* child_entry) { - int child_index, retainer_index; - entries_->CountReference(parent_obj, HeapObject::cast(child_obj), - &child_index, &retainer_index); - parent_entry->SetNamedReference(type, - child_index, - reference_name, - child_entry, - retainer_index); - } - void SetRootGcRootsReference() { - int child_index, retainer_index; - entries_->CountReference(HeapSnapshot::kInternalRootObject, - HeapSnapshot::kGcRootsObject, - &child_index, - &retainer_index); - snapshot_->root()->SetIndexedReference(HeapGraphEdge::kElement, - child_index, - child_index + 1, - snapshot_->gc_roots(), - retainer_index); - } - void SetRootShortcutReference(Object* child_obj, - HeapEntry* child_entry) { - int child_index, retainer_index; - entries_->CountReference(HeapSnapshot::kInternalRootObject, - HeapObject::cast(child_obj), - &child_index, - &retainer_index); - snapshot_->root()->SetNamedReference(HeapGraphEdge::kShortcut, - child_index, - collection_->GetName(child_index + 1), - child_entry, - retainer_index); - } - void SetStrongRootReference(Object* child_obj, - HeapEntry* child_entry) { - int child_index, retainer_index; - entries_->CountReference(HeapSnapshot::kGcRootsObject, - HeapObject::cast(child_obj), - &child_index, - &retainer_index); - snapshot_->gc_roots()->SetIndexedReference(HeapGraphEdge::kElement, - child_index, - child_index + 1, - child_entry, - retainer_index); - } - private: - HeapSnapshot* snapshot_; - HeapSnapshotsCollection* collection_; - HeapEntriesMap* entries_; -}; - -class SnapshotAllocator { - public: - explicit SnapshotAllocator(HeapSnapshot* snapshot) - : snapshot_(snapshot) { } - HeapEntry* GetEntry( - HeapObject* obj, int children_count, int retainers_count) { - HeapEntry* entry = - snapshot_->AddEntry(obj, children_count, retainers_count); - ASSERT(entry != NULL); - return entry; - } - private: - HeapSnapshot* snapshot_; -}; - -class RootsReferencesExtractor : public ObjectVisitor { - public: - explicit RootsReferencesExtractor(HeapSnapshotGenerator* generator) - : generator_(generator) { - } - void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) generator_->SetGcRootsReference(*p); - } - private: - HeapSnapshotGenerator* generator_; -}; +V8HeapExplorer::~V8HeapExplorer() { +} -bool HeapSnapshotGenerator::GenerateSnapshot() { - AssertNoAllocation no_alloc; +HeapEntry* V8HeapExplorer::AllocateEntry( + HeapThing ptr, int children_count, int retainers_count) { + return AddEntry( + reinterpret_cast<HeapObject*>(ptr), children_count, retainers_count); +} - SetProgressTotal(4); // 2 passes + dominators + sizes. - // Pass 1. Iterate heap contents to count entries and references. - if (!CountEntriesAndReferences()) return false; +HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object, + int children_count, + int retainers_count) { + if (object == kInternalRootObject) { + ASSERT(retainers_count == 0); + return snapshot_->AddRootEntry(children_count); + } else if (object == kGcRootsObject) { + return snapshot_->AddGcRootsEntry(children_count, retainers_count); + } else if (object->IsJSFunction()) { + JSFunction* func = JSFunction::cast(object); + SharedFunctionInfo* shared = func->shared(); + return AddEntry(object, + HeapEntry::kClosure, + collection_->GetName(String::cast(shared->name())), + children_count, + retainers_count); + } else if (object->IsJSRegExp()) { + JSRegExp* re = JSRegExp::cast(object); + return AddEntry(object, + HeapEntry::kRegExp, + collection_->GetName(re->Pattern()), + children_count, + retainers_count); + } else if (object->IsJSObject()) { + return AddEntry(object, + HeapEntry::kObject, + collection_->GetName(GetConstructorNameForHeapProfile( + JSObject::cast(object))), + children_count, + retainers_count); + } else if (object->IsString()) { + return AddEntry(object, + HeapEntry::kString, + collection_->GetName(String::cast(object)), + children_count, + retainers_count); + } else if (object->IsCode()) { + return AddEntry(object, + HeapEntry::kCode, + "", + children_count, + retainers_count); + } else if (object->IsSharedFunctionInfo()) { + SharedFunctionInfo* shared = SharedFunctionInfo::cast(object); + return AddEntry(object, + HeapEntry::kCode, + collection_->GetName(String::cast(shared->name())), + children_count, + retainers_count); + } else if (object->IsScript()) { + Script* script = Script::cast(object); + return AddEntry(object, + HeapEntry::kCode, + script->name()->IsString() ? + collection_->GetName(String::cast(script->name())) : "", + children_count, + retainers_count); + } else if (object->IsFixedArray()) { + return AddEntry(object, + HeapEntry::kArray, + "", + children_count, + retainers_count); + } else if (object->IsHeapNumber()) { + return AddEntry(object, + HeapEntry::kHeapNumber, + "number", + children_count, + retainers_count); + } + return AddEntry(object, + HeapEntry::kHidden, + "system", + children_count, + retainers_count); +} - // Allocate and fill entries in the snapshot, allocate references. - snapshot_->AllocateEntries(entries_.entries_count(), - entries_.total_children_count(), - entries_.total_retainers_count()); - SnapshotAllocator allocator(snapshot_); - entries_.UpdateEntries(&allocator); - // Pass 2. Fill references. - if (!FillReferences()) return false; +HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object, + HeapEntry::Type type, + const char* name, + int children_count, + int retainers_count) { + return snapshot_->AddEntry(type, + name, + collection_->GetObjectId(object->address()), + object->Size(), + children_count, + retainers_count); +} - if (!SetEntriesDominators()) return false; - if (!ApproximateRetainedSizes()) return false; - progress_counter_ = progress_total_; - if (!ReportProgress(true)) return false; - return true; +void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) { + filler->AddEntry(kInternalRootObject); + filler->AddEntry(kGcRootsObject); } -HeapEntry* HeapSnapshotGenerator::GetEntry(Object* obj) { - if (!obj->IsHeapObject()) return NULL; - HeapObject* object = HeapObject::cast(obj); - HeapEntry* entry = entries_.Map(object); - // A new entry. - if (entry == NULL) entry = filler_->AddEntry(object); - return entry; +int V8HeapExplorer::EstimateObjectsCount() { + HeapIterator iterator(HeapIterator::kFilterUnreachable); + int objects_count = 0; + for (HeapObject* obj = iterator.next(); + obj != NULL; + obj = iterator.next(), ++objects_count) {} + return objects_count; } class IndexedReferencesExtractor : public ObjectVisitor { public: - IndexedReferencesExtractor(HeapSnapshotGenerator* generator, + IndexedReferencesExtractor(V8HeapExplorer* generator, HeapObject* parent_obj, HeapEntry* parent_entry, HeapObjectsSet* known_references = NULL) @@ -1921,7 +1784,7 @@ class IndexedReferencesExtractor : public ObjectVisitor { } } private: - HeapSnapshotGenerator* generator_; + V8HeapExplorer* generator_; HeapObject* parent_obj_; HeapEntry* parent_; HeapObjectsSet* known_references_; @@ -1929,7 +1792,7 @@ class IndexedReferencesExtractor : public ObjectVisitor { }; -void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) { +void V8HeapExplorer::ExtractReferences(HeapObject* obj) { HeapEntry* entry = GetEntry(obj); if (entry == NULL) return; // No interest in this object. @@ -1973,8 +1836,8 @@ void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) { } -void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj, - HeapEntry* entry) { +void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, + HeapEntry* entry) { if (js_obj->IsJSFunction()) { HandleScope hs; JSFunction* func = JSFunction::cast(js_obj); @@ -1996,8 +1859,8 @@ void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj, } -void HeapSnapshotGenerator::ExtractPropertyReferences(JSObject* js_obj, - HeapEntry* entry) { +void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, + HeapEntry* entry) { if (js_obj->HasFastProperties()) { DescriptorArray* descs = js_obj->map()->instance_descriptors(); for (int i = 0; i < descs->number_of_descriptors(); i++) { @@ -2038,8 +1901,8 @@ void HeapSnapshotGenerator::ExtractPropertyReferences(JSObject* js_obj, } -void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj, - HeapEntry* entry) { +void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, + HeapEntry* entry) { if (js_obj->HasFastElements()) { FixedArray* elements = FixedArray::cast(js_obj->elements()); int length = js_obj->IsJSArray() ? @@ -2065,8 +1928,8 @@ void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj, } -void HeapSnapshotGenerator::ExtractInternalReferences(JSObject* js_obj, - HeapEntry* entry) { +void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, + HeapEntry* entry) { int length = js_obj->GetInternalFieldCount(); for (int i = 0; i < length; ++i) { Object* o = js_obj->GetInternalField(i); @@ -2075,10 +1938,55 @@ void HeapSnapshotGenerator::ExtractInternalReferences(JSObject* js_obj, } -void HeapSnapshotGenerator::SetClosureReference(HeapObject* parent_obj, - HeapEntry* parent_entry, - String* reference_name, - Object* child_obj) { +HeapEntry* V8HeapExplorer::GetEntry(Object* obj) { + if (!obj->IsHeapObject()) return NULL; + return filler_->FindOrAddEntry(obj); +} + + +class RootsReferencesExtractor : public ObjectVisitor { + public: + explicit RootsReferencesExtractor(V8HeapExplorer* explorer) + : explorer_(explorer) { + } + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) explorer_->SetGcRootsReference(*p); + } + private: + V8HeapExplorer* explorer_; +}; + + +bool V8HeapExplorer::IterateAndExtractReferences( + SnapshotFillerInterface* filler) { + filler_ = filler; + HeapIterator iterator(HeapIterator::kFilterUnreachable); + bool interrupted = false; + // Heap iteration with filtering must be finished in any case. + for (HeapObject* obj = iterator.next(); + obj != NULL; + obj = iterator.next(), progress_->ProgressStep()) { + if (!interrupted) { + ExtractReferences(obj); + if (!progress_->ProgressReport(false)) interrupted = true; + } + } + if (interrupted) { + filler_ = NULL; + return false; + } + SetRootGcRootsReference(); + RootsReferencesExtractor extractor(this); + Heap::IterateRoots(&extractor, VISIT_ALL); + filler_ = NULL; + return progress_->ProgressReport(false); +} + + +void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj, + HeapEntry* parent_entry, + String* reference_name, + Object* child_obj) { HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { filler_->SetNamedReference(HeapGraphEdge::kContextVariable, @@ -2092,10 +2000,10 @@ void HeapSnapshotGenerator::SetClosureReference(HeapObject* parent_obj, } -void HeapSnapshotGenerator::SetElementReference(HeapObject* parent_obj, - HeapEntry* parent_entry, - int index, - Object* child_obj) { +void V8HeapExplorer::SetElementReference(HeapObject* parent_obj, + HeapEntry* parent_entry, + int index, + Object* child_obj) { HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { filler_->SetIndexedReference(HeapGraphEdge::kElement, @@ -2109,10 +2017,10 @@ void HeapSnapshotGenerator::SetElementReference(HeapObject* parent_obj, } -void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj, - HeapEntry* parent_entry, - const char* reference_name, - Object* child_obj) { +void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj, + HeapEntry* parent_entry, + const char* reference_name, + Object* child_obj) { HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { filler_->SetNamedReference(HeapGraphEdge::kInternal, @@ -2126,10 +2034,10 @@ void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj, } -void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj, - HeapEntry* parent_entry, - int index, - Object* child_obj) { +void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj, + HeapEntry* parent_entry, + int index, + Object* child_obj) { HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { filler_->SetNamedReference(HeapGraphEdge::kInternal, @@ -2143,10 +2051,10 @@ void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj, } -void HeapSnapshotGenerator::SetHiddenReference(HeapObject* parent_obj, - HeapEntry* parent_entry, - int index, - Object* child_obj) { +void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj, + HeapEntry* parent_entry, + int index, + Object* child_obj) { HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { filler_->SetIndexedReference(HeapGraphEdge::kHidden, @@ -2159,10 +2067,10 @@ void HeapSnapshotGenerator::SetHiddenReference(HeapObject* parent_obj, } -void HeapSnapshotGenerator::SetPropertyReference(HeapObject* parent_obj, - HeapEntry* parent_entry, - String* reference_name, - Object* child_obj) { +void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj, + HeapEntry* parent_entry, + String* reference_name, + Object* child_obj) { HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { HeapGraphEdge::Type type = reference_name->length() > 0 ? @@ -2178,7 +2086,7 @@ void HeapSnapshotGenerator::SetPropertyReference(HeapObject* parent_obj, } -void HeapSnapshotGenerator::SetPropertyShortcutReference( +void V8HeapExplorer::SetPropertyShortcutReference( HeapObject* parent_obj, HeapEntry* parent_entry, String* reference_name, @@ -2195,52 +2103,221 @@ void HeapSnapshotGenerator::SetPropertyShortcutReference( } -void HeapSnapshotGenerator::SetRootGcRootsReference() { - filler_->SetRootGcRootsReference(); +void V8HeapExplorer::SetRootGcRootsReference() { + filler_->SetIndexedAutoIndexReference( + HeapGraphEdge::kElement, + kInternalRootObject, snapshot_->root(), + kGcRootsObject, snapshot_->gc_roots()); } -void HeapSnapshotGenerator::SetRootShortcutReference(Object* child_obj) { +void V8HeapExplorer::SetRootShortcutReference(Object* child_obj) { HeapEntry* child_entry = GetEntry(child_obj); ASSERT(child_entry != NULL); - filler_->SetRootShortcutReference(child_obj, child_entry); + filler_->SetNamedAutoIndexReference( + HeapGraphEdge::kShortcut, + kInternalRootObject, snapshot_->root(), + child_obj, child_entry); } -void HeapSnapshotGenerator::SetGcRootsReference(Object* child_obj) { +void V8HeapExplorer::SetGcRootsReference(Object* child_obj) { HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { - filler_->SetStrongRootReference(child_obj, child_entry); + filler_->SetIndexedAutoIndexReference( + HeapGraphEdge::kElement, + kGcRootsObject, snapshot_->gc_roots(), + child_obj, child_entry); } } +HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot, + v8::ActivityControl* control) + : snapshot_(snapshot), + control_(control), + v8_heap_explorer_(snapshot_, this) { +} + + +class SnapshotCounter : public SnapshotFillerInterface { + public: + SnapshotCounter(HeapEntriesAllocator* allocator, HeapEntriesMap* entries) + : allocator_(allocator), entries_(entries) { } + HeapEntry* AddEntry(HeapThing ptr) { + entries_->Pair(ptr, allocator_, HeapEntriesMap::kHeapEntryPlaceholder); + return HeapEntriesMap::kHeapEntryPlaceholder; + } + HeapEntry* FindOrAddEntry(HeapThing ptr) { + HeapEntry* entry = entries_->Map(ptr); + return entry != NULL ? entry : AddEntry(ptr); + } + void SetIndexedReference(HeapGraphEdge::Type, + HeapThing parent_ptr, + HeapEntry*, + int, + HeapThing child_ptr, + HeapEntry*) { + entries_->CountReference(parent_ptr, child_ptr); + } + void SetIndexedAutoIndexReference(HeapGraphEdge::Type, + HeapThing parent_ptr, + HeapEntry*, + HeapThing child_ptr, + HeapEntry*) { + entries_->CountReference(parent_ptr, child_ptr); + } + void SetNamedReference(HeapGraphEdge::Type, + HeapThing parent_ptr, + HeapEntry*, + const char*, + HeapThing child_ptr, + HeapEntry*) { + entries_->CountReference(parent_ptr, child_ptr); + } + void SetNamedAutoIndexReference(HeapGraphEdge::Type, + HeapThing parent_ptr, + HeapEntry*, + HeapThing child_ptr, + HeapEntry*) { + entries_->CountReference(parent_ptr, child_ptr); + } + private: + HeapEntriesAllocator* allocator_; + HeapEntriesMap* entries_; +}; + + +class SnapshotFiller : public SnapshotFillerInterface { + public: + explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries) + : snapshot_(snapshot), + collection_(snapshot->collection()), + entries_(entries) { } + HeapEntry* AddEntry(HeapThing ptr) { + UNREACHABLE(); + return NULL; + } + HeapEntry* FindOrAddEntry(HeapThing ptr) { + HeapEntry* entry = entries_->Map(ptr); + return entry != NULL ? entry : AddEntry(ptr); + } + void SetIndexedReference(HeapGraphEdge::Type type, + HeapThing parent_ptr, + HeapEntry* parent_entry, + int index, + HeapThing child_ptr, + HeapEntry* child_entry) { + int child_index, retainer_index; + entries_->CountReference( + parent_ptr, child_ptr, &child_index, &retainer_index); + parent_entry->SetIndexedReference( + type, child_index, index, child_entry, retainer_index); + } + void SetIndexedAutoIndexReference(HeapGraphEdge::Type type, + HeapThing parent_ptr, + HeapEntry* parent_entry, + HeapThing child_ptr, + HeapEntry* child_entry) { + int child_index, retainer_index; + entries_->CountReference( + parent_ptr, child_ptr, &child_index, &retainer_index); + parent_entry->SetIndexedReference( + type, child_index, child_index + 1, child_entry, retainer_index); + } + void SetNamedReference(HeapGraphEdge::Type type, + HeapThing parent_ptr, + HeapEntry* parent_entry, + const char* reference_name, + HeapThing child_ptr, + HeapEntry* child_entry) { + int child_index, retainer_index; + entries_->CountReference( + parent_ptr, child_ptr, &child_index, &retainer_index); + parent_entry->SetNamedReference( + type, child_index, reference_name, child_entry, retainer_index); + } + void SetNamedAutoIndexReference(HeapGraphEdge::Type type, + HeapThing parent_ptr, + HeapEntry* parent_entry, + HeapThing child_ptr, + HeapEntry* child_entry) { + int child_index, retainer_index; + entries_->CountReference( + parent_ptr, child_ptr, &child_index, &retainer_index); + parent_entry->SetNamedReference(type, + child_index, + collection_->GetName(child_index + 1), + child_entry, + retainer_index); + } + private: + HeapSnapshot* snapshot_; + HeapSnapshotsCollection* collection_; + HeapEntriesMap* entries_; +}; + + +bool HeapSnapshotGenerator::GenerateSnapshot() { + AssertNoAllocation no_alloc; + + SetProgressTotal(4); // 2 passes + dominators + sizes. + + // Pass 1. Iterate heap contents to count entries and references. + if (!CountEntriesAndReferences()) return false; + + // Allocate and fill entries in the snapshot, allocate references. + snapshot_->AllocateEntries(entries_.entries_count(), + entries_.total_children_count(), + entries_.total_retainers_count()); + entries_.AllocateEntries(); + + // Pass 2. Fill references. + if (!FillReferences()) return false; + + if (!SetEntriesDominators()) return false; + if (!ApproximateRetainedSizes()) return false; + + progress_counter_ = progress_total_; + if (!ProgressReport(true)) return false; + return true; +} + + +void HeapSnapshotGenerator::ProgressStep() { + ++progress_counter_; +} + + +bool HeapSnapshotGenerator::ProgressReport(bool force) { + const int kProgressReportGranularity = 10000; + if (control_ != NULL + && (force || progress_counter_ % kProgressReportGranularity == 0)) { + return + control_->ReportProgressValue(progress_counter_, progress_total_) == + v8::ActivityControl::kContinue; + } + return true; +} + + void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) { if (control_ == NULL) return; - - HeapIterator iterator(HeapIterator::kFilterUnreachable); - int objects_count = 0; - for (HeapObject* obj = iterator.next(); - obj != NULL; - obj = iterator.next(), ++objects_count) {} - progress_total_ = objects_count * iterations_count; + progress_total_ = v8_heap_explorer_.EstimateObjectsCount() * iterations_count; progress_counter_ = 0; } bool HeapSnapshotGenerator::CountEntriesAndReferences() { - SnapshotCounter counter(&entries_); - filler_ = &counter; - filler_->AddEntry(HeapSnapshot::kInternalRootObject); - filler_->AddEntry(HeapSnapshot::kGcRootsObject); - return IterateAndExtractReferences(); + SnapshotCounter counter(&v8_heap_explorer_, &entries_); + v8_heap_explorer_.AddRootEntries(&counter); + return v8_heap_explorer_.IterateAndExtractReferences(&counter); } bool HeapSnapshotGenerator::FillReferences() { SnapshotFiller filler(snapshot_, &entries_); - filler_ = &filler; - return IterateAndExtractReferences(); + return v8_heap_explorer_.IterateAndExtractReferences(&filler); } @@ -2326,7 +2403,7 @@ bool HeapSnapshotGenerator::BuildDominatorTree( int remaining = entries_length - changed; if (remaining < 0) remaining = 0; progress_counter_ = base_progress_counter + remaining; - if (!ReportProgress(true)) return false; + if (!ProgressReport(true)) return false; } return true; } @@ -2356,7 +2433,7 @@ bool HeapSnapshotGenerator::ApproximateRetainedSizes() { } for (int i = 0; i < snapshot_->entries()->length(); - ++i, IncProgressCounter()) { + ++i, ProgressStep()) { HeapEntry* entry = snapshot_->entries()->at(i); int entry_size = entry->self_size(); for (HeapEntry* dominator = entry->dominator(); @@ -2364,32 +2441,12 @@ bool HeapSnapshotGenerator::ApproximateRetainedSizes() { entry = dominator, dominator = entry->dominator()) { dominator->add_retained_size(entry_size); } - if (!ReportProgress()) return false; + if (!ProgressReport()) return false; } return true; } -bool HeapSnapshotGenerator::IterateAndExtractReferences() { - HeapIterator iterator(HeapIterator::kFilterUnreachable); - bool interrupted = false; - // Heap iteration with filtering must be finished in any case. - for (HeapObject* obj = iterator.next(); - obj != NULL; - obj = iterator.next(), IncProgressCounter()) { - if (!interrupted) { - ExtractReferences(obj); - if (!ReportProgress()) interrupted = true; - } - } - if (interrupted) return false; - SetRootGcRootsReference(); - RootsReferencesExtractor extractor(this); - Heap::IterateRoots(&extractor, VISIT_ALL); - return ReportProgress(); -} - - void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) { raw_additions_root_ = NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0)); diff --git a/src/profile-generator.h b/src/profile-generator.h index cacd27ea..4762eb63 100644 --- a/src/profile-generator.h +++ b/src/profile-generator.h @@ -88,7 +88,6 @@ class StringsStorage { class CodeEntry { public: - explicit INLINE(CodeEntry(int security_token_id)); // CodeEntry doesn't own name strings, just references them. INLINE(CodeEntry(Logger::LogEventsAndTags tag, const char* name_prefix, @@ -103,6 +102,8 @@ class CodeEntry { INLINE(const char* name() const) { return name_; } INLINE(const char* resource_name() const) { return resource_name_; } INLINE(int line_number() const) { return line_number_; } + INLINE(int shared_id() const) { return shared_id_; } + INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; } INLINE(int security_token_id() const) { return security_token_id_; } INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag)); @@ -119,6 +120,7 @@ class CodeEntry { const char* name_; const char* resource_name_; int line_number_; + int shared_id_; int security_token_id_; DISALLOW_COPY_AND_ASSIGN(CodeEntry); @@ -234,12 +236,12 @@ class CpuProfile { class CodeMap { public: - CodeMap() { } + CodeMap() : next_sfi_tag_(1) { } INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size)); INLINE(void MoveCode(Address from, Address to)); INLINE(void DeleteCode(Address addr)); - void AddAlias(Address start, CodeEntry* entry, Address code_start); CodeEntry* FindEntry(Address addr); + int GetSFITag(Address addr); void Print(); @@ -267,7 +269,11 @@ class CodeMap { void Call(const Address& key, const CodeEntryInfo& value); }; + // Fake CodeEntry pointer to distinguish SFI entries. + static CodeEntry* const kSfiCodeEntry; + CodeTree tree_; + int next_sfi_tag_; DISALLOW_COPY_AND_ASSIGN(CodeMap); }; @@ -675,14 +681,14 @@ class HeapSnapshot { void AllocateEntries( int entries_count, int children_count, int retainers_count); - HeapEntry* AddEntry( - HeapObject* object, int children_count, int retainers_count); HeapEntry* AddEntry(HeapEntry::Type type, const char* name, uint64_t id, int size, int children_count, int retainers_count); + HeapEntry* AddRootEntry(int children_count); + HeapEntry* AddGcRootsEntry(int children_count, int retainers_count); void ClearPaint(); HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot); HeapEntry* GetEntryById(uint64_t id); @@ -695,15 +701,7 @@ class HeapSnapshot { void Print(int max_depth); void PrintEntriesSize(); - static HeapObject* const kInternalRootObject; - static HeapObject* const kGcRootsObject; - private: - HeapEntry* AddEntry(HeapObject* object, - HeapEntry::Type type, - const char* name, - int children_count, - int retainers_count); HeapEntry* GetNextEntryToInit(); HeapSnapshotsCollection* collection_; @@ -867,6 +865,20 @@ class HeapSnapshotsCollection { }; +// A typedef for referencing anything that can be snapshotted living +// in any kind of heap memory. +typedef void* HeapThing; + + +// An interface that creates HeapEntries by HeapThings. +class HeapEntriesAllocator { + public: + virtual ~HeapEntriesAllocator() { } + virtual HeapEntry* AllocateEntry( + HeapThing ptr, int children_count, int retainers_count) = 0; +}; + + // The HeapEntriesMap instance is used to track a mapping between // real heap objects and their representations in heap snapshots. class HeapEntriesMap { @@ -874,13 +886,12 @@ class HeapEntriesMap { HeapEntriesMap(); ~HeapEntriesMap(); - HeapEntry* Map(HeapObject* object); - void Pair(HeapObject* object, HeapEntry* entry); - void CountReference(HeapObject* from, HeapObject* to, + void AllocateEntries(); + HeapEntry* Map(HeapThing thing); + void Pair(HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry); + void CountReference(HeapThing from, HeapThing to, int* prev_children_count = NULL, int* prev_retainers_count = NULL); - template<class Visitor> - void UpdateEntries(Visitor* visitor); int entries_count() { return entries_count_; } int total_children_count() { return total_children_count_; } @@ -890,18 +901,25 @@ class HeapEntriesMap { private: struct EntryInfo { - explicit EntryInfo(HeapEntry* entry) - : entry(entry), children_count(0), retainers_count(0) { } + EntryInfo(HeapEntry* entry, HeapEntriesAllocator* allocator) + : entry(entry), + allocator(allocator), + children_count(0), + retainers_count(0) { + } HeapEntry* entry; + HeapEntriesAllocator* allocator; int children_count; int retainers_count; }; - static uint32_t Hash(HeapObject* object) { + static uint32_t Hash(HeapThing thing) { return ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object))); + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing))); + } + static bool HeapThingsMatch(HeapThing key1, HeapThing key2) { + return key1 == key2; } - static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; } HashMap entries_; int entries_count_; @@ -928,52 +946,70 @@ class HeapObjectsSet { }; -class HeapSnapshotGenerator { +// An interface used to populate a snapshot with nodes and edges. +class SnapshotFillerInterface { public: - class SnapshotFillerInterface { - public: - virtual ~SnapshotFillerInterface() { } - virtual HeapEntry* AddEntry(HeapObject* obj) = 0; - virtual void SetIndexedReference(HeapGraphEdge::Type type, - HeapObject* parent_obj, - HeapEntry* parent_entry, - int index, - Object* child_obj, - HeapEntry* child_entry) = 0; - virtual void SetNamedReference(HeapGraphEdge::Type type, - HeapObject* parent_obj, + virtual ~SnapshotFillerInterface() { } + virtual HeapEntry* AddEntry(HeapThing ptr) = 0; + virtual HeapEntry* FindOrAddEntry(HeapThing ptr) = 0; + virtual void SetIndexedReference(HeapGraphEdge::Type type, + HeapThing parent_ptr, HeapEntry* parent_entry, - const char* reference_name, - Object* child_obj, + int index, + HeapThing child_ptr, HeapEntry* child_entry) = 0; - virtual void SetRootGcRootsReference() = 0; - virtual void SetRootShortcutReference(Object* child_obj, + virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type, + HeapThing parent_ptr, + HeapEntry* parent_entry, + HeapThing child_ptr, + HeapEntry* child_entry) = 0; + virtual void SetNamedReference(HeapGraphEdge::Type type, + HeapThing parent_ptr, + HeapEntry* parent_entry, + const char* reference_name, + HeapThing child_ptr, + HeapEntry* child_entry) = 0; + virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type, + HeapThing parent_ptr, + HeapEntry* parent_entry, + HeapThing child_ptr, HeapEntry* child_entry) = 0; - virtual void SetStrongRootReference(Object* child_obj, - HeapEntry* child_entry) = 0; - }; +}; - HeapSnapshotGenerator(HeapSnapshot* snapshot, - v8::ActivityControl* control); - bool GenerateSnapshot(); + +class SnapshottingProgressReportingInterface { + public: + virtual ~SnapshottingProgressReportingInterface() { } + virtual void ProgressStep() = 0; + virtual bool ProgressReport(bool force) = 0; +}; + + +// An implementation of V8 heap graph extractor. +class V8HeapExplorer : public HeapEntriesAllocator { + public: + V8HeapExplorer(HeapSnapshot* snapshot, + SnapshottingProgressReportingInterface* progress); + ~V8HeapExplorer(); + virtual HeapEntry* AllocateEntry( + HeapThing ptr, int children_count, int retainers_count); + void AddRootEntries(SnapshotFillerInterface* filler); + int EstimateObjectsCount(); + bool IterateAndExtractReferences(SnapshotFillerInterface* filler); private: - bool ApproximateRetainedSizes(); - bool BuildDominatorTree(const Vector<HeapEntry*>& entries, - Vector<HeapEntry*>* dominators); - bool CountEntriesAndReferences(); - HeapEntry* GetEntry(Object* obj); - void IncProgressCounter() { ++progress_counter_; } + HeapEntry* AddEntry( + HeapObject* object, int children_count, int retainers_count); + HeapEntry* AddEntry(HeapObject* object, + HeapEntry::Type type, + const char* name, + int children_count, + int retainers_count); void ExtractReferences(HeapObject* obj); void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry); void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry); void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry); void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry); - bool FillReferences(); - void FillReversePostorderIndexes(Vector<HeapEntry*>* entries); - bool IterateAndExtractReferences(); - inline bool ReportProgress(bool force = false); - bool SetEntriesDominators(); void SetClosureReference(HeapObject* parent_obj, HeapEntry* parent, String* reference_name, @@ -1005,24 +1041,54 @@ class HeapSnapshotGenerator { void SetRootShortcutReference(Object* child); void SetRootGcRootsReference(); void SetGcRootsReference(Object* child); - void SetProgressTotal(int iterations_count); + + HeapEntry* GetEntry(Object* obj); HeapSnapshot* snapshot_; - v8::ActivityControl* control_; HeapSnapshotsCollection* collection_; - // Mapping from HeapObject* pointers to HeapEntry* pointers. - HeapEntriesMap entries_; - SnapshotFillerInterface* filler_; + SnapshottingProgressReportingInterface* progress_; // Used during references extraction to mark heap objects that // are references via non-hidden properties. HeapObjectsSet known_references_; - // Used during snapshot generation. - int progress_counter_; - int progress_total_; + SnapshotFillerInterface* filler_; + + static HeapObject* const kInternalRootObject; + static HeapObject* const kGcRootsObject; friend class IndexedReferencesExtractor; friend class RootsReferencesExtractor; + DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer); +}; + + +class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface { + public: + HeapSnapshotGenerator(HeapSnapshot* snapshot, + v8::ActivityControl* control); + bool GenerateSnapshot(); + + private: + bool ApproximateRetainedSizes(); + bool BuildDominatorTree(const Vector<HeapEntry*>& entries, + Vector<HeapEntry*>* dominators); + bool CountEntriesAndReferences(); + bool FillReferences(); + void FillReversePostorderIndexes(Vector<HeapEntry*>* entries); + void ProgressStep(); + bool ProgressReport(bool force = false); + bool SetEntriesDominators(); + void SetProgressTotal(int iterations_count); + + HeapSnapshot* snapshot_; + v8::ActivityControl* control_; + V8HeapExplorer v8_heap_explorer_; + // Mapping from HeapThing pointers to HeapEntry* pointers. + HeapEntriesMap entries_; + // Used during snapshot generation. + int progress_counter_; + int progress_total_; + DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator); }; diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc index 09797ca2..51f4015f 100644 --- a/src/regexp-macro-assembler.cc +++ b/src/regexp-macro-assembler.cc @@ -154,16 +154,12 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute( const byte* input_start, const byte* input_end, int* output) { - typedef int (*matcher)(String*, int, const byte*, - const byte*, int*, Address, int); - matcher matcher_func = FUNCTION_CAST<matcher>(code->entry()); - // Ensure that the minimum stack has been allocated. RegExpStack stack; Address stack_base = RegExpStack::stack_base(); int direct_call = 0; - int result = CALL_GENERATED_REGEXP_CODE(matcher_func, + int result = CALL_GENERATED_REGEXP_CODE(code->entry(), input, start_offset, input_start, diff --git a/src/regexp.js b/src/regexp.js index 5b7e3a9d..f68dee61 100644 --- a/src/regexp.js +++ b/src/regexp.js @@ -384,13 +384,13 @@ function RegExpMakeCaptureGetter(n) { // pairs for the match and all the captured substrings), the invariant is // that there are at least two capture indeces. The array also contains // the subject string for the last successful match. -var lastMatchInfo = [ +var lastMatchInfo = new InternalArray( 2, // REGEXP_NUMBER_OF_CAPTURES "", // Last subject. void 0, // Last input - settable with RegExpSetInput. 0, // REGEXP_FIRST_CAPTURE + 0 - 0, // REGEXP_FIRST_CAPTURE + 1 -]; + 0 // REGEXP_FIRST_CAPTURE + 1 +); // Override last match info with an array of actual substrings. // Used internally by replace regexp with function. diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc index 3406cdc2..df6471e9 100644 --- a/src/runtime-profiler.cc +++ b/src/runtime-profiler.cc @@ -35,6 +35,7 @@ #include "deoptimizer.h" #include "execution.h" #include "global-handles.h" +#include "mark-compact.h" #include "scopeinfo.h" #include "top.h" @@ -100,11 +101,6 @@ static int sampler_ticks_until_threshold_adjustment = // The ratio of ticks spent in JS code in percent. static Atomic32 js_ratio; -// The JSFunctions in the sampler window are not GC safe. Old-space -// pointers are not cleared during mark-sweep collection and therefore -// the window might contain stale pointers. The window is updated on -// scavenges and (parts of it) cleared on mark-sweep and -// mark-sweep-compact. static Object* sampler_window[kSamplerWindowSize] = { NULL, }; static int sampler_window_position = 0; static int sampler_window_weight[kSamplerWindowSize] = { 0, }; @@ -134,7 +130,6 @@ void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) { static bool IsOptimizable(JSFunction* function) { - if (Heap::InNewSpace(function)) return false; Code* code = function->code(); return code->kind() == Code::FUNCTION && code->optimizable(); } @@ -208,16 +203,6 @@ static void ClearSampleBuffer() { } -static void ClearSampleBufferNewSpaceEntries() { - for (int i = 0; i < kSamplerWindowSize; i++) { - if (Heap::InNewSpace(sampler_window[i])) { - sampler_window[i] = NULL; - sampler_window_weight[i] = 0; - } - } -} - - static int LookupSample(JSFunction* function) { int weight = 0; for (int i = 0; i < kSamplerWindowSize; i++) { @@ -372,24 +357,6 @@ void RuntimeProfiler::NotifyTick() { } -void RuntimeProfiler::MarkCompactPrologue(bool is_compacting) { - if (is_compacting) { - // Clear all samples before mark-sweep-compact because every - // function might move. - ClearSampleBuffer(); - } else { - // Clear only new space entries on mark-sweep since none of the - // old-space functions will move. - ClearSampleBufferNewSpaceEntries(); - } -} - - -bool IsEqual(void* first, void* second) { - return first == second; -} - - void RuntimeProfiler::Setup() { ClearSampleBuffer(); // If the ticker hasn't already started, make sure to do so to get @@ -411,13 +378,41 @@ void RuntimeProfiler::TearDown() { } -Object** RuntimeProfiler::SamplerWindowAddress() { - return sampler_window; +int RuntimeProfiler::SamplerWindowSize() { + return kSamplerWindowSize; } -int RuntimeProfiler::SamplerWindowSize() { - return kSamplerWindowSize; +// Update the pointers in the sampler window after a GC. +void RuntimeProfiler::UpdateSamplesAfterScavenge() { + for (int i = 0; i < kSamplerWindowSize; i++) { + Object* function = sampler_window[i]; + if (function != NULL && Heap::InNewSpace(function)) { + MapWord map_word = HeapObject::cast(function)->map_word(); + if (map_word.IsForwardingAddress()) { + sampler_window[i] = map_word.ToForwardingAddress(); + } else { + sampler_window[i] = NULL; + } + } + } +} + + +void RuntimeProfiler::RemoveDeadSamples() { + for (int i = 0; i < kSamplerWindowSize; i++) { + Object* function = sampler_window[i]; + if (function != NULL && !HeapObject::cast(function)->IsMarked()) { + sampler_window[i] = NULL; + } + } +} + + +void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) { + for (int i = 0; i < kSamplerWindowSize; i++) { + visitor->VisitPointer(&sampler_window[i]); + } } diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h index e041c059..02defc9b 100644 --- a/src/runtime-profiler.h +++ b/src/runtime-profiler.h @@ -47,9 +47,10 @@ class RuntimeProfiler : public AllStatic { static void Reset(); static void TearDown(); - static void MarkCompactPrologue(bool is_compacting); - static Object** SamplerWindowAddress(); static int SamplerWindowSize(); + static void UpdateSamplesAfterScavenge(); + static void RemoveDeadSamples(); + static void UpdateSamplesAfterCompact(ObjectVisitor* visitor); }; diff --git a/src/runtime.cc b/src/runtime.cc index ef7a4acc..965a083a 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -40,8 +40,10 @@ #include "debug.h" #include "deoptimizer.h" #include "execution.h" +#include "global-handles.h" #include "jsregexp.h" #include "liveedit.h" +#include "liveobjectlist-inl.h" #include "parser.h" #include "platform.h" #include "runtime.h" @@ -160,7 +162,8 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) { if (!maybe_result->ToObject(&result)) return maybe_result; } { MaybeObject* maybe_result = - copy->SetProperty(key_string, result, NONE); + // Creating object copy for literals. No strict mode needed. + copy->SetProperty(key_string, result, NONE, kNonStrictMode); if (!maybe_result->ToObject(&result)) return maybe_result; } } @@ -332,7 +335,10 @@ static Handle<Object> CreateObjectLiteralBoilerplate( if (key->IsSymbol()) { if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) { // Array index as string (uint32). - result = SetOwnElement(boilerplate, element_index, value); + result = SetOwnElement(boilerplate, + element_index, + value, + kNonStrictMode); } else { Handle<String> name(String::cast(*key)); ASSERT(!name->AsArrayIndex(&element_index)); @@ -341,7 +347,10 @@ static Handle<Object> CreateObjectLiteralBoilerplate( } } else if (key->ToArrayIndex(&element_index)) { // Array index (uint32). - result = SetOwnElement(boilerplate, element_index, value); + result = SetOwnElement(boilerplate, + element_index, + value, + kNonStrictMode); } else { // Non-uint32 number. ASSERT(key->IsNumber()); @@ -546,7 +555,9 @@ static MaybeObject* Runtime_CreateCatchExtensionObject(Arguments args) { // Assign the exception value to the catch variable and make sure // that the catch variable is DontDelete. { MaybeObject* maybe_value = - JSObject::cast(object)->SetProperty(key, value, DONT_DELETE); + // Passing non-strict per ECMA-262 5th Ed. 12.14. Catch, bullet #4. + JSObject::cast(object)->SetProperty( + key, value, DONT_DELETE, kNonStrictMode); if (!maybe_value->ToObject(&value)) return maybe_value; } return object; @@ -783,7 +794,9 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) { case JSObject::INTERCEPTED_ELEMENT: case JSObject::FAST_ELEMENT: { elms->set(IS_ACCESSOR_INDEX, Heap::false_value()); - elms->set(VALUE_INDEX, *GetElement(obj, index)); + Handle<Object> value = GetElement(obj, index); + RETURN_IF_EMPTY_HANDLE(value); + elms->set(VALUE_INDEX, *value); elms->set(WRITABLE_INDEX, Heap::true_value()); elms->set(ENUMERABLE_INDEX, Heap::true_value()); elms->set(CONFIGURABLE_INDEX, Heap::true_value()); @@ -816,12 +829,15 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) { } break; } - case NORMAL: + case NORMAL: { // This is a data property. elms->set(IS_ACCESSOR_INDEX, Heap::false_value()); - elms->set(VALUE_INDEX, *GetElement(obj, index)); + Handle<Object> value = GetElement(obj, index); + ASSERT(!value.is_null()); + elms->set(VALUE_INDEX, *value); elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly())); break; + } default: UNREACHABLE(); break; @@ -994,12 +1010,16 @@ static Failure* ThrowRedeclarationError(const char* type, Handle<String> name) { static MaybeObject* Runtime_DeclareGlobals(Arguments args) { + ASSERT(args.length() == 4); HandleScope scope; Handle<GlobalObject> global = Handle<GlobalObject>(Top::context()->global()); Handle<Context> context = args.at<Context>(0); CONVERT_ARG_CHECKED(FixedArray, pairs, 1); bool is_eval = Smi::cast(args[2])->value() == 1; + StrictModeFlag strict_mode = + static_cast<StrictModeFlag>(Smi::cast(args[3])->value()); + ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode); // Compute the property attributes. According to ECMA-262, section // 13, page 71, the property must be read-only and @@ -1051,6 +1071,12 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) { // Fall-through and introduce the absent property by using // SetProperty. } else { + // For const properties, we treat a callback with this name + // even in the prototype as a conflicting declaration. + if (is_const_property && (lookup.type() == CALLBACKS)) { + return ThrowRedeclarationError("const", name); + } + // Otherwise, we check for locally conflicting declarations. if (is_local && (is_read_only || is_const_property)) { const char* type = (is_read_only) ? "const" : "var"; return ThrowRedeclarationError(type, name); @@ -1076,29 +1102,43 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) { ? static_cast<PropertyAttributes>(base | READ_ONLY) : base; - if (lookup.IsProperty()) { - // There's a local property that we need to overwrite because - // we're either declaring a function or there's an interceptor - // that claims the property is absent. - - // Check for conflicting re-declarations. We cannot have - // conflicting types in case of intercepted properties because - // they are absent. - if (lookup.type() != INTERCEPTOR && - (lookup.IsReadOnly() || is_const_property)) { - const char* type = (lookup.IsReadOnly()) ? "const" : "var"; - return ThrowRedeclarationError(type, name); + // There's a local property that we need to overwrite because + // we're either declaring a function or there's an interceptor + // that claims the property is absent. + // + // Check for conflicting re-declarations. We cannot have + // conflicting types in case of intercepted properties because + // they are absent. + if (lookup.IsProperty() && + (lookup.type() != INTERCEPTOR) && + (lookup.IsReadOnly() || is_const_property)) { + const char* type = (lookup.IsReadOnly()) ? "const" : "var"; + return ThrowRedeclarationError(type, name); + } + + // Safari does not allow the invocation of callback setters for + // function declarations. To mimic this behavior, we do not allow + // the invocation of setters for function values. This makes a + // difference for global functions with the same names as event + // handlers such as "function onload() {}". Firefox does call the + // onload setter in those case and Safari does not. We follow + // Safari for compatibility. + if (value->IsJSFunction()) { + // Do not change DONT_DELETE to false from true. + if (lookup.IsProperty() && (lookup.type() != INTERCEPTOR)) { + attributes = static_cast<PropertyAttributes>( + attributes | (lookup.GetAttributes() & DONT_DELETE)); } - RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, attributes)); + RETURN_IF_EMPTY_HANDLE(SetLocalPropertyIgnoreAttributes(global, + name, + value, + attributes)); } else { - // If a property with this name does not already exist on the - // global object add the property locally. We take special - // precautions to always add it as a local property even in case - // of callbacks in the prototype chain (this rules out using - // SetProperty). Also, we must use the handle-based version to - // avoid GC issues. - RETURN_IF_EMPTY_HANDLE( - SetLocalPropertyIgnoreAttributes(global, name, value, attributes)); + RETURN_IF_EMPTY_HANDLE(SetProperty(global, + name, + value, + attributes, + strict_mode)); } } @@ -1152,14 +1192,16 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) { } else { // The holder is an arguments object. Handle<JSObject> arguments(Handle<JSObject>::cast(holder)); - Handle<Object> result = SetElement(arguments, index, initial_value); + Handle<Object> result = SetElement(arguments, index, initial_value, + kNonStrictMode); if (result.is_null()) return Failure::Exception(); } } else { // Slow case: The property is not in the FixedArray part of the context. Handle<JSObject> context_ext = Handle<JSObject>::cast(holder); RETURN_IF_EMPTY_HANDLE( - SetProperty(context_ext, name, initial_value, mode)); + SetProperty(context_ext, name, initial_value, + mode, kNonStrictMode)); } } @@ -1186,7 +1228,22 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) { ASSERT(!context_ext->HasLocalProperty(*name)); Handle<Object> value(Heap::undefined_value()); if (*initial_value != NULL) value = initial_value; - RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, mode)); + // Declaring a const context slot is a conflicting declaration if + // there is a callback with that name in a prototype. It is + // allowed to introduce const variables in + // JSContextExtensionObjects. They are treated specially in + // SetProperty and no setters are invoked for those since they are + // not real JSObjects. + if (initial_value->IsTheHole() && + !context_ext->IsJSContextExtensionObject()) { + LookupResult lookup; + context_ext->Lookup(*name, &lookup); + if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) { + return ThrowRedeclarationError("const", name); + } + } + RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, mode, + kNonStrictMode)); } return Heap::undefined_value(); @@ -1195,14 +1252,21 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) { static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) { NoHandleAllocation nha; + // args[0] == name + // args[1] == strict_mode + // args[2] == value (optional) // Determine if we need to assign to the variable if it already // exists (based on the number of arguments). - RUNTIME_ASSERT(args.length() == 1 || args.length() == 2); - bool assign = args.length() == 2; + RUNTIME_ASSERT(args.length() == 2 || args.length() == 3); + bool assign = args.length() == 3; CONVERT_ARG_CHECKED(String, name, 0); GlobalObject* global = Top::context()->global(); + RUNTIME_ASSERT(args[1]->IsSmi()); + StrictModeFlag strict_mode = + static_cast<StrictModeFlag>(Smi::cast(args[1])->value()); + ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode); // According to ECMA-262, section 12.2, page 62, the property must // not be deletable. @@ -1212,11 +1276,7 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) { // there, there is a property with this name in the prototype chain. // We follow Safari and Firefox behavior and only set the property // locally if there is an explicit initialization value that we have - // to assign to the property. When adding the property we take - // special precautions to always add it as a local property even in - // case of callbacks in the prototype chain (this rules out using - // SetProperty). We have SetLocalPropertyIgnoreAttributes for - // this. + // to assign to the property. // Note that objects can have hidden prototypes, so we need to traverse // the whole chain of hidden prototypes to do a 'local' lookup. JSObject* real_holder = global; @@ -1262,8 +1322,9 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) { } // Assign the value (or undefined) to the property. - Object* value = (assign) ? args[1] : Heap::undefined_value(); - return real_holder->SetProperty(&lookup, *name, value, attributes); + Object* value = (assign) ? args[2] : Heap::undefined_value(); + return real_holder->SetProperty( + &lookup, *name, value, attributes, strict_mode); } Object* proto = real_holder->GetPrototype(); @@ -1278,9 +1339,7 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) { global = Top::context()->global(); if (assign) { - return global->SetLocalPropertyIgnoreAttributes(*name, - args[1], - attributes); + return global->SetProperty(*name, args[2], attributes, strict_mode); } return Heap::undefined_value(); } @@ -1340,13 +1399,19 @@ static MaybeObject* Runtime_InitializeConstGlobal(Arguments args) { // BUG 1213575: Handle the case where we have to set a read-only // property through an interceptor and only do it if it's // uninitialized, e.g. the hole. Nirk... - RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, attributes)); + // Passing non-strict mode because the property is writable. + RETURN_IF_EMPTY_HANDLE(SetProperty(global, + name, + value, + attributes, + kNonStrictMode)); return *value; } // Set the value, but only we're assigning the initial value to a // constant. For now, we determine this by checking if the // current value is the hole. + // Strict mode handling not needed (const disallowed in strict mode). PropertyType type = lookup.type(); if (type == FIELD) { FixedArray* properties = global->properties(); @@ -1413,7 +1478,8 @@ static MaybeObject* Runtime_InitializeConstContextSlot(Arguments args) { // The holder is an arguments object. ASSERT((attributes & READ_ONLY) == 0); Handle<JSObject> arguments(Handle<JSObject>::cast(holder)); - SetElement(arguments, index, value); + RETURN_IF_EMPTY_HANDLE( + SetElement(arguments, index, value, kNonStrictMode)); } return *value; } @@ -1422,7 +1488,9 @@ static MaybeObject* Runtime_InitializeConstContextSlot(Arguments args) { // context. if (attributes == ABSENT) { Handle<JSObject> global = Handle<JSObject>(Top::context()->global()); - RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, NONE)); + // Strict mode not needed (const disallowed in strict mode). + RETURN_IF_EMPTY_HANDLE( + SetProperty(global, name, value, NONE, kNonStrictMode)); return *value; } @@ -1459,8 +1527,9 @@ static MaybeObject* Runtime_InitializeConstContextSlot(Arguments args) { // The property was found in a different context extension object. // Set it if it is not a read-only property. if ((attributes & READ_ONLY) == 0) { + // Strict mode not needed (const disallowed in strict mode). RETURN_IF_EMPTY_HANDLE( - SetProperty(context_ext, name, value, attributes)); + SetProperty(context_ext, name, value, attributes, kNonStrictMode)); } } @@ -1626,7 +1695,7 @@ static Handle<JSFunction> InstallBuiltin(Handle<JSObject> holder, code, false); optimized->shared()->DontAdaptArguments(); - SetProperty(holder, key, optimized, NONE); + SetProperty(holder, key, optimized, NONE, kStrictMode); return optimized; } @@ -3673,6 +3742,8 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) { is_element) { // Normalize the elements to enable attributes on the property. if (js_object->IsJSGlobalProxy()) { + // We do not need to do access checks here since these has already + // been performed by the call to GetOwnProperty. Handle<Object> proto(js_object->GetPrototype()); // If proxy is detached, ignore the assignment. Alternatively, // we could throw an exception. @@ -3713,14 +3784,15 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) { attr); } - return Runtime::SetObjectProperty(js_object, name, obj_value, attr); + return Runtime::ForceSetObjectProperty(js_object, name, obj_value, attr); } MaybeObject* Runtime::SetObjectProperty(Handle<Object> object, Handle<Object> key, Handle<Object> value, - PropertyAttributes attr) { + PropertyAttributes attr, + StrictModeFlag strict_mode) { HandleScope scope; if (object->IsUndefined() || object->IsNull()) { @@ -3750,7 +3822,7 @@ MaybeObject* Runtime::SetObjectProperty(Handle<Object> object, return *value; } - Handle<Object> result = SetElement(js_object, index, value); + Handle<Object> result = SetElement(js_object, index, value, strict_mode); if (result.is_null()) return Failure::Exception(); return *value; } @@ -3758,11 +3830,11 @@ MaybeObject* Runtime::SetObjectProperty(Handle<Object> object, if (key->IsString()) { Handle<Object> result; if (Handle<String>::cast(key)->AsArrayIndex(&index)) { - result = SetElement(js_object, index, value); + result = SetElement(js_object, index, value, strict_mode); } else { Handle<String> key_string = Handle<String>::cast(key); key_string->TryFlatten(); - result = SetProperty(js_object, key_string, value, attr); + result = SetProperty(js_object, key_string, value, attr, strict_mode); } if (result.is_null()) return Failure::Exception(); return *value; @@ -3775,9 +3847,9 @@ MaybeObject* Runtime::SetObjectProperty(Handle<Object> object, Handle<String> name = Handle<String>::cast(converted); if (name->AsArrayIndex(&index)) { - return js_object->SetElement(index, *value); + return js_object->SetElement(index, *value, strict_mode); } else { - return js_object->SetProperty(*name, *value, attr); + return js_object->SetProperty(*name, *value, attr, strict_mode); } } @@ -3802,12 +3874,12 @@ MaybeObject* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object, return *value; } - return js_object->SetElement(index, *value); + return js_object->SetElement(index, *value, kNonStrictMode); } if (key->IsString()) { if (Handle<String>::cast(key)->AsArrayIndex(&index)) { - return js_object->SetElement(index, *value); + return js_object->SetElement(index, *value, kNonStrictMode); } else { Handle<String> key_string = Handle<String>::cast(key); key_string->TryFlatten(); @@ -3824,7 +3896,7 @@ MaybeObject* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object, Handle<String> name = Handle<String>::cast(converted); if (name->AsArrayIndex(&index)) { - return js_object->SetElement(index, *value); + return js_object->SetElement(index, *value, kNonStrictMode); } else { return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr); } @@ -3869,23 +3941,31 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object, static MaybeObject* Runtime_SetProperty(Arguments args) { NoHandleAllocation ha; - RUNTIME_ASSERT(args.length() == 3 || args.length() == 4); + RUNTIME_ASSERT(args.length() == 4 || args.length() == 5); Handle<Object> object = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); Handle<Object> value = args.at<Object>(2); - + CONVERT_SMI_CHECKED(unchecked_attributes, args[3]); + RUNTIME_ASSERT( + (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); // Compute attributes. - PropertyAttributes attributes = NONE; - if (args.length() == 4) { - CONVERT_CHECKED(Smi, value_obj, args[3]); - int unchecked_value = value_obj->value(); - // Only attribute bits should be set. - RUNTIME_ASSERT( - (unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); - attributes = static_cast<PropertyAttributes>(unchecked_value); + PropertyAttributes attributes = + static_cast<PropertyAttributes>(unchecked_attributes); + + StrictModeFlag strict_mode = kNonStrictMode; + if (args.length() == 5) { + CONVERT_SMI_CHECKED(strict_unchecked, args[4]); + RUNTIME_ASSERT(strict_unchecked == kStrictMode || + strict_unchecked == kNonStrictMode); + strict_mode = static_cast<StrictModeFlag>(strict_unchecked); } - return Runtime::SetObjectProperty(object, key, value, attributes); + + return Runtime::SetObjectProperty(object, + key, + value, + attributes, + strict_mode); } @@ -3914,11 +3994,14 @@ static MaybeObject* Runtime_IgnoreAttributesAndSetProperty(Arguments args) { static MaybeObject* Runtime_DeleteProperty(Arguments args) { NoHandleAllocation ha; - ASSERT(args.length() == 2); + ASSERT(args.length() == 3); CONVERT_CHECKED(JSObject, object, args[0]); CONVERT_CHECKED(String, key, args[1]); - return object->DeleteProperty(key, JSObject::NORMAL_DELETION); + CONVERT_SMI_CHECKED(strict, args[2]); + return object->DeleteProperty(key, (strict == kStrictMode) + ? JSObject::STRICT_DELETION + : JSObject::NORMAL_DELETION); } @@ -4214,6 +4297,14 @@ static MaybeObject* Runtime_LocalKeys(Arguments args) { Handle<JSObject> object(raw_object); if (object->IsJSGlobalProxy()) { + // Do access checks before going to the global object. + if (object->IsAccessCheckNeeded() && + !Top::MayNamedAccess(*object, Heap::undefined_value(), + v8::ACCESS_KEYS)) { + Top::ReportFailedAccessCheck(*object, v8::ACCESS_KEYS); + return *Factory::NewJSArray(0); + } + Handle<Object> proto(object->GetPrototype()); // If proxy is detached we simply return an empty array. if (proto->IsNull()) return *Factory::NewJSArray(0); @@ -5803,6 +5894,89 @@ static MaybeObject* Runtime_StringBuilderConcat(Arguments args) { } +static MaybeObject* Runtime_StringBuilderJoin(Arguments args) { + NoHandleAllocation ha; + ASSERT(args.length() == 3); + CONVERT_CHECKED(JSArray, array, args[0]); + if (!args[1]->IsSmi()) { + Top::context()->mark_out_of_memory(); + return Failure::OutOfMemoryException(); + } + int array_length = Smi::cast(args[1])->value(); + CONVERT_CHECKED(String, separator, args[2]); + + if (!array->HasFastElements()) { + return Top::Throw(Heap::illegal_argument_symbol()); + } + FixedArray* fixed_array = FixedArray::cast(array->elements()); + if (fixed_array->length() < array_length) { + array_length = fixed_array->length(); + } + + if (array_length == 0) { + return Heap::empty_string(); + } else if (array_length == 1) { + Object* first = fixed_array->get(0); + if (first->IsString()) return first; + } + + int separator_length = separator->length(); + int max_nof_separators = + (String::kMaxLength + separator_length - 1) / separator_length; + if (max_nof_separators < (array_length - 1)) { + Top::context()->mark_out_of_memory(); + return Failure::OutOfMemoryException(); + } + int length = (array_length - 1) * separator_length; + for (int i = 0; i < array_length; i++) { + Object* element_obj = fixed_array->get(i); + if (!element_obj->IsString()) { + // TODO(1161): handle this case. + return Top::Throw(Heap::illegal_argument_symbol()); + } + String* element = String::cast(element_obj); + int increment = element->length(); + if (increment > String::kMaxLength - length) { + Top::context()->mark_out_of_memory(); + return Failure::OutOfMemoryException(); + } + length += increment; + } + + Object* object; + { MaybeObject* maybe_object = Heap::AllocateRawTwoByteString(length); + if (!maybe_object->ToObject(&object)) return maybe_object; + } + SeqTwoByteString* answer = SeqTwoByteString::cast(object); + + uc16* sink = answer->GetChars(); +#ifdef DEBUG + uc16* end = sink + length; +#endif + + String* first = String::cast(fixed_array->get(0)); + int first_length = first->length(); + String::WriteToFlat(first, sink, 0, first_length); + sink += first_length; + + for (int i = 1; i < array_length; i++) { + ASSERT(sink + separator_length <= end); + String::WriteToFlat(separator, sink, 0, separator_length); + sink += separator_length; + + String* element = String::cast(fixed_array->get(i)); + int element_length = element->length(); + ASSERT(sink + element_length <= end); + String::WriteToFlat(element, sink, 0, element_length); + sink += element_length; + } + ASSERT(sink == end); + + ASSERT(!answer->HasOnlyAsciiChars()); // Use %_FastAsciiArrayJoin instead. + return answer; +} + + static MaybeObject* Runtime_NumberOr(Arguments args) { NoHandleAllocation ha; ASSERT(args.length() == 2); @@ -6833,6 +7007,7 @@ static MaybeObject* Runtime_NewObject(Arguments args) { bool first_allocation = !shared->live_objects_may_exist(); Handle<JSObject> result = Factory::NewJSObject(function); + RETURN_IF_EMPTY_HANDLE(result); // Delay setting the stub if inobject slack tracking is in progress. if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) { TrySettingInlineConstructStub(function); @@ -6906,7 +7081,7 @@ static MaybeObject* Runtime_LazyRecompile(Arguments args) { function->ReplaceCode(function->shared()->code()); return function->code(); } - if (CompileOptimized(function, AstNode::kNoNumber)) { + if (CompileOptimized(function, AstNode::kNoNumber, CLEAR_EXCEPTION)) { return function->code(); } if (FLAG_trace_opt) { @@ -6915,7 +7090,7 @@ static MaybeObject* Runtime_LazyRecompile(Arguments args) { PrintF(": optimized compilation failed]\n"); } function->ReplaceCode(function->shared()->code()); - return Failure::Exception(); + return function->code(); } @@ -7075,7 +7250,8 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) { // Try to compile the optimized code. A true return value from // CompileOptimized means that compilation succeeded, not necessarily // that optimization succeeded. - if (CompileOptimized(function, ast_id) && function->IsOptimized()) { + if (CompileOptimized(function, ast_id, CLEAR_EXCEPTION) && + function->IsOptimized()) { DeoptimizationInputData* data = DeoptimizationInputData::cast( function->code()->deoptimization_data()); if (data->OsrPcOffset()->value() >= 0) { @@ -7118,6 +7294,9 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) { ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION); return Smi::FromInt(ast_id); } else { + if (function->IsMarkedForLazyRecompilation()) { + function->ReplaceCode(function->shared()->code()); + } return Smi::FromInt(-1); } } @@ -7368,11 +7547,15 @@ static ObjectPair Runtime_LoadContextSlotNoReferenceError(Arguments args) { static MaybeObject* Runtime_StoreContextSlot(Arguments args) { HandleScope scope; - ASSERT(args.length() == 3); + ASSERT(args.length() == 4); Handle<Object> value(args[0]); CONVERT_ARG_CHECKED(Context, context, 1); CONVERT_ARG_CHECKED(String, name, 2); + CONVERT_SMI_CHECKED(strict_unchecked, args[3]); + RUNTIME_ASSERT(strict_unchecked == kStrictMode || + strict_unchecked == kNonStrictMode); + StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked); int index; PropertyAttributes attributes; @@ -7385,11 +7568,17 @@ static MaybeObject* Runtime_StoreContextSlot(Arguments args) { if ((attributes & READ_ONLY) == 0) { // Context is a fixed array and set cannot fail. Context::cast(*holder)->set(index, *value); + } else if (strict_mode == kStrictMode) { + // Setting read only property in strict mode. + Handle<Object> error = + Factory::NewTypeError("strict_cannot_assign", + HandleVector(&name, 1)); + return Top::Throw(*error); } } else { ASSERT((attributes & READ_ONLY) == 0); Handle<Object> result = - SetElement(Handle<JSObject>::cast(holder), index, value); + SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode); if (result.is_null()) { ASSERT(Top::has_pending_exception()); return Failure::Exception(); @@ -7416,7 +7605,13 @@ static MaybeObject* Runtime_StoreContextSlot(Arguments args) { // extension object itself. if ((attributes & READ_ONLY) == 0 || (context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) { - RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, NONE)); + RETURN_IF_EMPTY_HANDLE( + SetProperty(context_ext, name, value, NONE, strict_mode)); + } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) { + // Setting read only property in strict mode. + Handle<Object> error = + Factory::NewTypeError("strict_cannot_assign", HandleVector(&name, 1)); + return Top::Throw(*error); } return *value; } @@ -7745,12 +7940,9 @@ static ObjectPair CompileGlobalEval(Handle<String> source, static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) { ASSERT(args.length() == 4); - if (!args[0]->IsJSFunction()) { - return MakePair(Top::ThrowIllegalOperation(), NULL); - } HandleScope scope; - Handle<JSFunction> callee = args.at<JSFunction>(0); + Handle<Object> callee = args.at<Object>(0); Handle<Object> receiver; // Will be overwritten. // Compute the calling context. @@ -7818,12 +8010,9 @@ static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) { static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) { ASSERT(args.length() == 4); - if (!args[0]->IsJSFunction()) { - return MakePair(Top::ThrowIllegalOperation(), NULL); - } HandleScope scope; - Handle<JSFunction> callee = args.at<JSFunction>(0); + Handle<Object> callee = args.at<Object>(0); // 'eval' is bound in the global context, but it may have been overwritten. // Compare it to the builtin 'GlobalEval' function to make sure. @@ -7893,7 +8082,9 @@ static MaybeObject* Runtime_PushIfAbsent(Arguments args) { if (elements->get(i) == element) return Heap::false_value(); } Object* obj; - { MaybeObject* maybe_obj = array->SetFastElement(length, element); + // Strict not needed. Used for cycle detection in Array join implementation. + { MaybeObject* maybe_obj = array->SetFastElement(length, element, + kNonStrictMode); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } return Heap::true_value(); @@ -7914,398 +8105,494 @@ static MaybeObject* Runtime_PushIfAbsent(Arguments args) { class ArrayConcatVisitor { public: ArrayConcatVisitor(Handle<FixedArray> storage, - uint32_t index_limit, bool fast_elements) : - storage_(storage), index_limit_(index_limit), - index_offset_(0), fast_elements_(fast_elements) { } + storage_(Handle<FixedArray>::cast(GlobalHandles::Create(*storage))), + index_offset_(0u), + fast_elements_(fast_elements) { } + + ~ArrayConcatVisitor() { + clear_storage(); + } void visit(uint32_t i, Handle<Object> elm) { - if (i >= index_limit_ - index_offset_) return; + if (i >= JSObject::kMaxElementCount - index_offset_) return; uint32_t index = index_offset_ + i; if (fast_elements_) { - ASSERT(index < static_cast<uint32_t>(storage_->length())); - storage_->set(index, *elm); - - } else { - Handle<NumberDictionary> dict = Handle<NumberDictionary>::cast(storage_); - Handle<NumberDictionary> result = - Factory::DictionaryAtNumberPut(dict, index, elm); - if (!result.is_identical_to(dict)) - storage_ = result; + if (index < static_cast<uint32_t>(storage_->length())) { + storage_->set(index, *elm); + return; + } + // Our initial estimate of length was foiled, possibly by + // getters on the arrays increasing the length of later arrays + // during iteration. + // This shouldn't happen in anything but pathological cases. + SetDictionaryMode(index); + // Fall-through to dictionary mode. } - } + ASSERT(!fast_elements_); + Handle<NumberDictionary> dict(NumberDictionary::cast(*storage_)); + Handle<NumberDictionary> result = + Factory::DictionaryAtNumberPut(dict, index, elm); + if (!result.is_identical_to(dict)) { + // Dictionary needed to grow. + clear_storage(); + set_storage(*result); + } +} void increase_index_offset(uint32_t delta) { - if (index_limit_ - index_offset_ < delta) { - index_offset_ = index_limit_; + if (JSObject::kMaxElementCount - index_offset_ < delta) { + index_offset_ = JSObject::kMaxElementCount; } else { index_offset_ += delta; } } - Handle<FixedArray> storage() { return storage_; } + Handle<JSArray> ToArray() { + Handle<JSArray> array = Factory::NewJSArray(0); + Handle<Object> length = + Factory::NewNumber(static_cast<double>(index_offset_)); + Handle<Map> map; + if (fast_elements_) { + map = Factory::GetFastElementsMap(Handle<Map>(array->map())); + } else { + map = Factory::GetSlowElementsMap(Handle<Map>(array->map())); + } + array->set_map(*map); + array->set_length(*length); + array->set_elements(*storage_); + return array; + } private: - Handle<FixedArray> storage_; - // Limit on the accepted indices. Elements with indices larger than the - // limit are ignored by the visitor. - uint32_t index_limit_; - // Index after last seen index. Always less than or equal to index_limit_. + // Convert storage to dictionary mode. + void SetDictionaryMode(uint32_t index) { + ASSERT(fast_elements_); + Handle<FixedArray> current_storage(*storage_); + Handle<NumberDictionary> slow_storage( + Factory::NewNumberDictionary(current_storage->length())); + uint32_t current_length = static_cast<uint32_t>(current_storage->length()); + for (uint32_t i = 0; i < current_length; i++) { + HandleScope loop_scope; + Handle<Object> element(current_storage->get(i)); + if (!element->IsTheHole()) { + Handle<NumberDictionary> new_storage = + Factory::DictionaryAtNumberPut(slow_storage, i, element); + if (!new_storage.is_identical_to(slow_storage)) { + slow_storage = loop_scope.CloseAndEscape(new_storage); + } + } + } + clear_storage(); + set_storage(*slow_storage); + fast_elements_ = false; + } + + inline void clear_storage() { + GlobalHandles::Destroy(Handle<Object>::cast(storage_).location()); + } + + inline void set_storage(FixedArray* storage) { + storage_ = Handle<FixedArray>::cast(GlobalHandles::Create(storage)); + } + + Handle<FixedArray> storage_; // Always a global handle. + // Index after last seen index. Always less than or equal to + // JSObject::kMaxElementCount. uint32_t index_offset_; - const bool fast_elements_; + bool fast_elements_; }; +static uint32_t EstimateElementCount(Handle<JSArray> array) { + uint32_t length = static_cast<uint32_t>(array->length()->Number()); + int element_count = 0; + switch (array->GetElementsKind()) { + case JSObject::FAST_ELEMENTS: { + // Fast elements can't have lengths that are not representable by + // a 32-bit signed integer. + ASSERT(static_cast<int32_t>(FixedArray::kMaxLength) >= 0); + int fast_length = static_cast<int>(length); + Handle<FixedArray> elements(FixedArray::cast(array->elements())); + for (int i = 0; i < fast_length; i++) { + if (!elements->get(i)->IsTheHole()) element_count++; + } + break; + } + case JSObject::DICTIONARY_ELEMENTS: { + Handle<NumberDictionary> dictionary( + NumberDictionary::cast(array->elements())); + int capacity = dictionary->Capacity(); + for (int i = 0; i < capacity; i++) { + Handle<Object> key(dictionary->KeyAt(i)); + if (dictionary->IsKey(*key)) { + element_count++; + } + } + break; + } + default: + // External arrays are always dense. + return length; + } + // As an estimate, we assume that the prototype doesn't contain any + // inherited elements. + return element_count; +} + + + template<class ExternalArrayClass, class ElementType> -static uint32_t IterateExternalArrayElements(Handle<JSObject> receiver, - bool elements_are_ints, - bool elements_are_guaranteed_smis, - uint32_t range, - ArrayConcatVisitor* visitor) { +static void IterateExternalArrayElements(Handle<JSObject> receiver, + bool elements_are_ints, + bool elements_are_guaranteed_smis, + ArrayConcatVisitor* visitor) { Handle<ExternalArrayClass> array( ExternalArrayClass::cast(receiver->elements())); - uint32_t len = Min(static_cast<uint32_t>(array->length()), range); + uint32_t len = static_cast<uint32_t>(array->length()); - if (visitor != NULL) { - if (elements_are_ints) { - if (elements_are_guaranteed_smis) { - for (uint32_t j = 0; j < len; j++) { - Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get(j)))); + ASSERT(visitor != NULL); + if (elements_are_ints) { + if (elements_are_guaranteed_smis) { + for (uint32_t j = 0; j < len; j++) { + HandleScope loop_scope; + Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get(j)))); + visitor->visit(j, e); + } + } else { + for (uint32_t j = 0; j < len; j++) { + HandleScope loop_scope; + int64_t val = static_cast<int64_t>(array->get(j)); + if (Smi::IsValid(static_cast<intptr_t>(val))) { + Handle<Smi> e(Smi::FromInt(static_cast<int>(val))); + visitor->visit(j, e); + } else { + Handle<Object> e = + Factory::NewNumber(static_cast<ElementType>(val)); visitor->visit(j, e); } - } else { - for (uint32_t j = 0; j < len; j++) { - int64_t val = static_cast<int64_t>(array->get(j)); - if (Smi::IsValid(static_cast<intptr_t>(val))) { - Handle<Smi> e(Smi::FromInt(static_cast<int>(val))); - visitor->visit(j, e); - } else { - Handle<Object> e = - Factory::NewNumber(static_cast<ElementType>(val)); - visitor->visit(j, e); + } + } + } else { + for (uint32_t j = 0; j < len; j++) { + HandleScope loop_scope; + Handle<Object> e = Factory::NewNumber(array->get(j)); + visitor->visit(j, e); + } + } +} + + +// Used for sorting indices in a List<uint32_t>. +static int compareUInt32(const uint32_t* ap, const uint32_t* bp) { + uint32_t a = *ap; + uint32_t b = *bp; + return (a == b) ? 0 : (a < b) ? -1 : 1; +} + + +static void CollectElementIndices(Handle<JSObject> object, + uint32_t range, + List<uint32_t>* indices) { + JSObject::ElementsKind kind = object->GetElementsKind(); + switch (kind) { + case JSObject::FAST_ELEMENTS: { + Handle<FixedArray> elements(FixedArray::cast(object->elements())); + uint32_t length = static_cast<uint32_t>(elements->length()); + if (range < length) length = range; + for (uint32_t i = 0; i < length; i++) { + if (!elements->get(i)->IsTheHole()) { + indices->Add(i); + } + } + break; + } + case JSObject::DICTIONARY_ELEMENTS: { + Handle<NumberDictionary> dict(NumberDictionary::cast(object->elements())); + uint32_t capacity = dict->Capacity(); + for (uint32_t j = 0; j < capacity; j++) { + HandleScope loop_scope; + Handle<Object> k(dict->KeyAt(j)); + if (dict->IsKey(*k)) { + ASSERT(k->IsNumber()); + uint32_t index = static_cast<uint32_t>(k->Number()); + if (index < range) { + indices->Add(index); } } } - } else { - for (uint32_t j = 0; j < len; j++) { - Handle<Object> e = Factory::NewNumber(array->get(j)); - visitor->visit(j, e); + break; + } + default: { + int dense_elements_length; + switch (kind) { + case JSObject::PIXEL_ELEMENTS: { + dense_elements_length = + PixelArray::cast(object->elements())->length(); + break; + } + case JSObject::EXTERNAL_BYTE_ELEMENTS: { + dense_elements_length = + ExternalByteArray::cast(object->elements())->length(); + break; + } + case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: { + dense_elements_length = + ExternalUnsignedByteArray::cast(object->elements())->length(); + break; + } + case JSObject::EXTERNAL_SHORT_ELEMENTS: { + dense_elements_length = + ExternalShortArray::cast(object->elements())->length(); + break; + } + case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: { + dense_elements_length = + ExternalUnsignedShortArray::cast(object->elements())->length(); + break; + } + case JSObject::EXTERNAL_INT_ELEMENTS: { + dense_elements_length = + ExternalIntArray::cast(object->elements())->length(); + break; + } + case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: { + dense_elements_length = + ExternalUnsignedIntArray::cast(object->elements())->length(); + break; + } + case JSObject::EXTERNAL_FLOAT_ELEMENTS: { + dense_elements_length = + ExternalFloatArray::cast(object->elements())->length(); + break; + } + default: + UNREACHABLE(); + dense_elements_length = 0; + break; + } + uint32_t length = static_cast<uint32_t>(dense_elements_length); + if (range <= length) { + length = range; + // We will add all indices, so we might as well clear it first + // and avoid duplicates. + indices->Clear(); } + for (uint32_t i = 0; i < length; i++) { + indices->Add(i); + } + if (length == range) return; // All indices accounted for already. + break; } } - return len; + Handle<Object> prototype(object->GetPrototype()); + if (prototype->IsJSObject()) { + // The prototype will usually have no inherited element indices, + // but we have to check. + CollectElementIndices(Handle<JSObject>::cast(prototype), range, indices); + } } + /** - * A helper function that visits elements of a JSObject. Only elements - * whose index between 0 and range (exclusive) are visited. - * - * If the third parameter, visitor, is not NULL, the visitor is called - * with parameters, 'visitor_index_offset + element index' and the element. + * A helper function that visits elements of a JSArray in numerical + * order. * - * It returns the number of visisted elements. + * The visitor argument called for each existing element in the array + * with the element index and the element's value. + * Afterwards it increments the base-index of the visitor by the array + * length. + * Returns false if any access threw an exception, otherwise true. */ -static uint32_t IterateElements(Handle<JSObject> receiver, - uint32_t range, - ArrayConcatVisitor* visitor) { - uint32_t num_of_elements = 0; - +static bool IterateElements(Handle<JSArray> receiver, + ArrayConcatVisitor* visitor) { + uint32_t length = static_cast<uint32_t>(receiver->length()->Number()); switch (receiver->GetElementsKind()) { case JSObject::FAST_ELEMENTS: { + // Run through the elements FixedArray and use HasElement and GetElement + // to check the prototype for missing elements. Handle<FixedArray> elements(FixedArray::cast(receiver->elements())); - uint32_t len = elements->length(); - if (range < len) { - len = range; - } - - for (uint32_t j = 0; j < len; j++) { - Handle<Object> e(elements->get(j)); - if (!e->IsTheHole()) { - num_of_elements++; - if (visitor) { - visitor->visit(j, e); - } + int fast_length = static_cast<int>(length); + ASSERT(fast_length <= elements->length()); + for (int j = 0; j < fast_length; j++) { + HandleScope loop_scope; + Handle<Object> element_value(elements->get(j)); + if (!element_value->IsTheHole()) { + visitor->visit(j, element_value); + } else if (receiver->HasElement(j)) { + // Call GetElement on receiver, not its prototype, or getters won't + // have the correct receiver. + element_value = GetElement(receiver, j); + if (element_value.is_null()) return false; + visitor->visit(j, element_value); } } break; } + case JSObject::DICTIONARY_ELEMENTS: { + Handle<NumberDictionary> dict(receiver->element_dictionary()); + List<uint32_t> indices(dict->Capacity() / 2); + // Collect all indices in the object and the prototypes less + // than length. This might introduce duplicates in the indices list. + CollectElementIndices(receiver, length, &indices); + indices.Sort(&compareUInt32); + int j = 0; + int n = indices.length(); + while (j < n) { + HandleScope loop_scope; + uint32_t index = indices[j]; + Handle<Object> element = GetElement(receiver, index); + if (element.is_null()) return false; + visitor->visit(index, element); + // Skip to next different index (i.e., omit duplicates). + do { + j++; + } while (j < n && indices[j] == index); + } + break; + } case JSObject::PIXEL_ELEMENTS: { Handle<PixelArray> pixels(PixelArray::cast(receiver->elements())); - uint32_t len = pixels->length(); - if (range < len) { - len = range; - } - - for (uint32_t j = 0; j < len; j++) { - num_of_elements++; - if (visitor != NULL) { - Handle<Smi> e(Smi::FromInt(pixels->get(j))); - visitor->visit(j, e); - } + for (uint32_t j = 0; j < length; j++) { + Handle<Smi> e(Smi::FromInt(pixels->get(j))); + visitor->visit(j, e); } break; } case JSObject::EXTERNAL_BYTE_ELEMENTS: { - num_of_elements = - IterateExternalArrayElements<ExternalByteArray, int8_t>( - receiver, true, true, range, visitor); + IterateExternalArrayElements<ExternalByteArray, int8_t>( + receiver, true, true, visitor); break; } case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: { - num_of_elements = - IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>( - receiver, true, true, range, visitor); + IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>( + receiver, true, true, visitor); break; } case JSObject::EXTERNAL_SHORT_ELEMENTS: { - num_of_elements = - IterateExternalArrayElements<ExternalShortArray, int16_t>( - receiver, true, true, range, visitor); + IterateExternalArrayElements<ExternalShortArray, int16_t>( + receiver, true, true, visitor); break; } case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: { - num_of_elements = - IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>( - receiver, true, true, range, visitor); + IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>( + receiver, true, true, visitor); break; } case JSObject::EXTERNAL_INT_ELEMENTS: { - num_of_elements = - IterateExternalArrayElements<ExternalIntArray, int32_t>( - receiver, true, false, range, visitor); + IterateExternalArrayElements<ExternalIntArray, int32_t>( + receiver, true, false, visitor); break; } case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: { - num_of_elements = - IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>( - receiver, true, false, range, visitor); + IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>( + receiver, true, false, visitor); break; } case JSObject::EXTERNAL_FLOAT_ELEMENTS: { - num_of_elements = - IterateExternalArrayElements<ExternalFloatArray, float>( - receiver, false, false, range, visitor); - break; - } - case JSObject::DICTIONARY_ELEMENTS: { - Handle<NumberDictionary> dict(receiver->element_dictionary()); - uint32_t capacity = dict->Capacity(); - for (uint32_t j = 0; j < capacity; j++) { - Handle<Object> k(dict->KeyAt(j)); - if (dict->IsKey(*k)) { - ASSERT(k->IsNumber()); - uint32_t index = static_cast<uint32_t>(k->Number()); - if (index < range) { - num_of_elements++; - if (visitor) { - visitor->visit(index, Handle<Object>(dict->ValueAt(j))); - } - } - } - } + IterateExternalArrayElements<ExternalFloatArray, float>( + receiver, false, false, visitor); break; } default: UNREACHABLE(); break; } - - return num_of_elements; -} - - -/** - * A helper function that visits elements of an Array object, and elements - * on its prototypes. - * - * Elements on prototypes are visited first, and only elements whose indices - * less than Array length are visited. - * - * If a ArrayConcatVisitor object is given, the visitor is called with - * parameters, element's index + visitor_index_offset and the element. - * - * The returned number of elements is an upper bound on the actual number - * of elements added. If the same element occurs in more than one object - * in the array's prototype chain, it will be counted more than once, but - * will only occur once in the result. - */ -static uint32_t IterateArrayAndPrototypeElements(Handle<JSArray> array, - ArrayConcatVisitor* visitor) { - uint32_t range = static_cast<uint32_t>(array->length()->Number()); - Handle<Object> obj = array; - - static const int kEstimatedPrototypes = 3; - List< Handle<JSObject> > objects(kEstimatedPrototypes); - - // Visit prototype first. If an element on the prototype is shadowed by - // the inheritor using the same index, the ArrayConcatVisitor visits - // the prototype element before the shadowing element. - // The visitor can simply overwrite the old value by new value using - // the same index. This follows Array::concat semantics. - while (!obj->IsNull()) { - objects.Add(Handle<JSObject>::cast(obj)); - obj = Handle<Object>(obj->GetPrototype()); - } - - uint32_t nof_elements = 0; - for (int i = objects.length() - 1; i >= 0; i--) { - Handle<JSObject> obj = objects[i]; - uint32_t encountered_elements = - IterateElements(Handle<JSObject>::cast(obj), range, visitor); - - if (encountered_elements > JSObject::kMaxElementCount - nof_elements) { - nof_elements = JSObject::kMaxElementCount; - } else { - nof_elements += encountered_elements; - } - } - - return nof_elements; -} - - -/** - * A helper function of Runtime_ArrayConcat. - * - * The first argument is an Array of arrays and objects. It is the - * same as the arguments array of Array::concat JS function. - * - * If an argument is an Array object, the function visits array - * elements. If an argument is not an Array object, the function - * visits the object as if it is an one-element array. - * - * If the result array index overflows 32-bit unsigned integer, the rounded - * non-negative number is used as new length. For example, if one - * array length is 2^32 - 1, second array length is 1, the - * concatenated array length is 0. - * TODO(lrn) Change length behavior to ECMAScript 5 specification (length - * is one more than the last array index to get a value assigned). - */ -static uint32_t IterateArguments(Handle<JSArray> arguments, - ArrayConcatVisitor* visitor) { - uint32_t visited_elements = 0; - uint32_t num_of_args = static_cast<uint32_t>(arguments->length()->Number()); - - for (uint32_t i = 0; i < num_of_args; i++) { - Object *element; - MaybeObject* maybe_element = arguments->GetElement(i); - // This if() is not expected to fail, but we have the check in the - // interest of hardening the runtime calls. - if (maybe_element->ToObject(&element)) { - Handle<Object> obj(element); - if (obj->IsJSArray()) { - Handle<JSArray> array = Handle<JSArray>::cast(obj); - uint32_t len = static_cast<uint32_t>(array->length()->Number()); - uint32_t nof_elements = - IterateArrayAndPrototypeElements(array, visitor); - // Total elements of array and its prototype chain can be more than - // the array length, but ArrayConcat can only concatenate at most - // the array length number of elements. We use the length as an estimate - // for the actual number of elements added. - uint32_t added_elements = (nof_elements > len) ? len : nof_elements; - if (JSArray::kMaxElementCount - visited_elements < added_elements) { - visited_elements = JSArray::kMaxElementCount; - } else { - visited_elements += added_elements; - } - if (visitor) visitor->increase_index_offset(len); - } else { - if (visitor) { - visitor->visit(0, obj); - visitor->increase_index_offset(1); - } - if (visited_elements < JSArray::kMaxElementCount) { - visited_elements++; - } - } - } - } - return visited_elements; + visitor->increase_index_offset(length); + return true; } /** * Array::concat implementation. * See ECMAScript 262, 15.4.4.4. - * TODO(lrn): Fix non-compliance for very large concatenations and update to + * TODO(581): Fix non-compliance for very large concatenations and update to * following the ECMAScript 5 specification. */ static MaybeObject* Runtime_ArrayConcat(Arguments args) { ASSERT(args.length() == 1); HandleScope handle_scope; - CONVERT_CHECKED(JSArray, arg_arrays, args[0]); - Handle<JSArray> arguments(arg_arrays); - - // Pass 1: estimate the number of elements of the result - // (it could be more than real numbers if prototype has elements). - uint32_t result_length = 0; - uint32_t num_of_args = static_cast<uint32_t>(arguments->length()->Number()); - - { AssertNoAllocation nogc; - for (uint32_t i = 0; i < num_of_args; i++) { - Object* obj; - MaybeObject* maybe_object = arguments->GetElement(i); - // This if() is not expected to fail, but we have the check in the - // interest of hardening the runtime calls. - if (maybe_object->ToObject(&obj)) { - uint32_t length_estimate; - if (obj->IsJSArray()) { - length_estimate = - static_cast<uint32_t>(JSArray::cast(obj)->length()->Number()); - } else { - length_estimate = 1; - } - if (JSObject::kMaxElementCount - result_length < length_estimate) { - result_length = JSObject::kMaxElementCount; - break; - } - result_length += length_estimate; + CONVERT_ARG_CHECKED(JSArray, arguments, 0); + int argument_count = static_cast<int>(arguments->length()->Number()); + RUNTIME_ASSERT(arguments->HasFastElements()); + Handle<FixedArray> elements(FixedArray::cast(arguments->elements())); + + // Pass 1: estimate the length and number of elements of the result. + // The actual length can be larger if any of the arguments have getters + // that mutate other arguments (but will otherwise be precise). + // The number of elements is precise if there are no inherited elements. + + uint32_t estimate_result_length = 0; + uint32_t estimate_nof_elements = 0; + { + for (int i = 0; i < argument_count; i++) { + HandleScope loop_scope; + Handle<Object> obj(elements->get(i)); + uint32_t length_estimate; + uint32_t element_estimate; + if (obj->IsJSArray()) { + Handle<JSArray> array(Handle<JSArray>::cast(obj)); + length_estimate = + static_cast<uint32_t>(array->length()->Number()); + element_estimate = + EstimateElementCount(array); + } else { + length_estimate = 1; + element_estimate = 1; + } + // Avoid overflows by capping at kMaxElementCount. + if (JSObject::kMaxElementCount - estimate_result_length < + length_estimate) { + estimate_result_length = JSObject::kMaxElementCount; + } else { + estimate_result_length += length_estimate; + } + if (JSObject::kMaxElementCount - estimate_nof_elements < + element_estimate) { + estimate_nof_elements = JSObject::kMaxElementCount; + } else { + estimate_nof_elements += element_estimate; } } } - // Allocate an empty array, will set length and content later. - Handle<JSArray> result = Factory::NewJSArray(0); - - uint32_t estimate_nof_elements = IterateArguments(arguments, NULL); // If estimated number of elements is more than half of length, a // fixed array (fast case) is more time and space-efficient than a // dictionary. - bool fast_case = (estimate_nof_elements * 2) >= result_length; + bool fast_case = (estimate_nof_elements * 2) >= estimate_result_length; Handle<FixedArray> storage; if (fast_case) { // The backing storage array must have non-existing elements to // preserve holes across concat operations. - storage = Factory::NewFixedArrayWithHoles(result_length); - Handle<Map> fast_map = - Factory::GetFastElementsMap(Handle<Map>(result->map())); - result->set_map(*fast_map); + storage = Factory::NewFixedArrayWithHoles(estimate_result_length); } else { // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate uint32_t at_least_space_for = estimate_nof_elements + (estimate_nof_elements >> 2); storage = Handle<FixedArray>::cast( - Factory::NewNumberDictionary(at_least_space_for)); - Handle<Map> slow_map = - Factory::GetSlowElementsMap(Handle<Map>(result->map())); - result->set_map(*slow_map); + Factory::NewNumberDictionary(at_least_space_for)); } - Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length)); - - ArrayConcatVisitor visitor(storage, result_length, fast_case); - - IterateArguments(arguments, &visitor); + ArrayConcatVisitor visitor(storage, fast_case); - result->set_length(*len); - // Please note the storage might have changed in the visitor. - result->set_elements(*visitor.storage()); + for (int i = 0; i < argument_count; i++) { + Handle<Object> obj(elements->get(i)); + if (obj->IsJSArray()) { + Handle<JSArray> array = Handle<JSArray>::cast(obj); + if (!IterateElements(array, &visitor)) { + return Failure::Exception(); + } + } else { + visitor.visit(0, obj); + visitor.increase_index_offset(1); + } + } - return *result; + return *visitor.ToArray(); } @@ -8396,10 +8683,12 @@ static MaybeObject* Runtime_SwapElements(Arguments args) { Handle<JSObject> jsobject = Handle<JSObject>::cast(object); Handle<Object> tmp1 = GetElement(jsobject, index1); + RETURN_IF_EMPTY_HANDLE(tmp1); Handle<Object> tmp2 = GetElement(jsobject, index2); + RETURN_IF_EMPTY_HANDLE(tmp2); - SetElement(jsobject, index1, tmp2); - SetElement(jsobject, index2, tmp1); + RETURN_IF_EMPTY_HANDLE(SetElement(jsobject, index1, tmp2, kStrictMode)); + RETURN_IF_EMPTY_HANDLE(SetElement(jsobject, index2, tmp1, kStrictMode)); return Heap::undefined_value(); } @@ -9077,7 +9366,9 @@ static bool CopyContextLocalsToScopeObject( RETURN_IF_EMPTY_HANDLE_VALUE( SetProperty(scope_object, scope_info.context_slot_name(i), - Handle<Object>(context->get(context_index)), NONE), + Handle<Object>(context->get(context_index)), + NONE, + kNonStrictMode), false); } } @@ -9103,7 +9394,9 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) { RETURN_IF_EMPTY_HANDLE_VALUE( SetProperty(local_scope, scope_info.parameter_name(i), - Handle<Object>(frame->GetParameter(i)), NONE), + Handle<Object>(frame->GetParameter(i)), + NONE, + kNonStrictMode), Handle<JSObject>()); } @@ -9112,7 +9405,9 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) { RETURN_IF_EMPTY_HANDLE_VALUE( SetProperty(local_scope, scope_info.stack_slot_name(i), - Handle<Object>(frame->GetExpression(i)), NONE), + Handle<Object>(frame->GetExpression(i)), + NONE, + kNonStrictMode), Handle<JSObject>()); } @@ -9136,7 +9431,11 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) { ASSERT(keys->get(i)->IsString()); Handle<String> key(String::cast(keys->get(i))); RETURN_IF_EMPTY_HANDLE_VALUE( - SetProperty(local_scope, key, GetProperty(ext, key), NONE), + SetProperty(local_scope, + key, + GetProperty(ext, key), + NONE, + kNonStrictMode), Handle<JSObject>()); } } @@ -9174,7 +9473,8 @@ static Handle<JSObject> MaterializeClosure(Handle<Context> context) { SetProperty(closure_scope, scope_info.parameter_name(i), Handle<Object>(element), - NONE), + NONE, + kNonStrictMode), Handle<JSObject>()); } } @@ -9195,7 +9495,11 @@ static Handle<JSObject> MaterializeClosure(Handle<Context> context) { ASSERT(keys->get(i)->IsString()); Handle<String> key(String::cast(keys->get(i))); RETURN_IF_EMPTY_HANDLE_VALUE( - SetProperty(closure_scope, key, GetProperty(ext, key), NONE), + SetProperty(closure_scope, + key, + GetProperty(ext, key), + NONE, + kNonStrictMode), Handle<JSObject>()); } } @@ -10673,6 +10977,207 @@ static MaybeObject* Runtime_GetHeapUsage(Arguments args) { } return Smi::FromInt(usage); } + + +// Captures a live object list from the present heap. +static MaybeObject* Runtime_HasLOLEnabled(Arguments args) { +#ifdef LIVE_OBJECT_LIST + return Heap::true_value(); +#else + return Heap::false_value(); +#endif +} + + +// Captures a live object list from the present heap. +static MaybeObject* Runtime_CaptureLOL(Arguments args) { +#ifdef LIVE_OBJECT_LIST + return LiveObjectList::Capture(); +#else + return Heap::undefined_value(); +#endif +} + + +// Deletes the specified live object list. +static MaybeObject* Runtime_DeleteLOL(Arguments args) { +#ifdef LIVE_OBJECT_LIST + CONVERT_SMI_CHECKED(id, args[0]); + bool success = LiveObjectList::Delete(id); + return success ? Heap::true_value() : Heap::false_value(); +#else + return Heap::undefined_value(); +#endif +} + + +// Generates the response to a debugger request for a dump of the objects +// contained in the difference between the captured live object lists +// specified by id1 and id2. +// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be +// dumped. +static MaybeObject* Runtime_DumpLOL(Arguments args) { +#ifdef LIVE_OBJECT_LIST + HandleScope scope; + CONVERT_SMI_CHECKED(id1, args[0]); + CONVERT_SMI_CHECKED(id2, args[1]); + CONVERT_SMI_CHECKED(start, args[2]); + CONVERT_SMI_CHECKED(count, args[3]); + CONVERT_ARG_CHECKED(JSObject, filter_obj, 4); + EnterDebugger enter_debugger; + return LiveObjectList::Dump(id1, id2, start, count, filter_obj); +#else + return Heap::undefined_value(); +#endif +} + + +// Gets the specified object as requested by the debugger. +// This is only used for obj ids shown in live object lists. +static MaybeObject* Runtime_GetLOLObj(Arguments args) { +#ifdef LIVE_OBJECT_LIST + CONVERT_SMI_CHECKED(obj_id, args[0]); + Object* result = LiveObjectList::GetObj(obj_id); + return result; +#else + return Heap::undefined_value(); +#endif +} + + +// Gets the obj id for the specified address if valid. +// This is only used for obj ids shown in live object lists. +static MaybeObject* Runtime_GetLOLObjId(Arguments args) { +#ifdef LIVE_OBJECT_LIST + HandleScope scope; + CONVERT_ARG_CHECKED(String, address, 0); + Object* result = LiveObjectList::GetObjId(address); + return result; +#else + return Heap::undefined_value(); +#endif +} + + +// Gets the retainers that references the specified object alive. +static MaybeObject* Runtime_GetLOLObjRetainers(Arguments args) { +#ifdef LIVE_OBJECT_LIST + HandleScope scope; + CONVERT_SMI_CHECKED(obj_id, args[0]); + RUNTIME_ASSERT(args[1]->IsUndefined() || args[1]->IsJSObject()); + RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean()); + RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi()); + RUNTIME_ASSERT(args[4]->IsUndefined() || args[4]->IsSmi()); + CONVERT_ARG_CHECKED(JSObject, filter_obj, 5); + + Handle<JSObject> instance_filter; + if (args[1]->IsJSObject()) { + instance_filter = args.at<JSObject>(1); + } + bool verbose = false; + if (args[2]->IsBoolean()) { + verbose = args[2]->IsTrue(); + } + int start = 0; + if (args[3]->IsSmi()) { + start = Smi::cast(args[3])->value(); + } + int limit = Smi::kMaxValue; + if (args[4]->IsSmi()) { + limit = Smi::cast(args[4])->value(); + } + + return LiveObjectList::GetObjRetainers(obj_id, + instance_filter, + verbose, + start, + limit, + filter_obj); +#else + return Heap::undefined_value(); +#endif +} + + +// Gets the reference path between 2 objects. +static MaybeObject* Runtime_GetLOLPath(Arguments args) { +#ifdef LIVE_OBJECT_LIST + HandleScope scope; + CONVERT_SMI_CHECKED(obj_id1, args[0]); + CONVERT_SMI_CHECKED(obj_id2, args[1]); + RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsJSObject()); + + Handle<JSObject> instance_filter; + if (args[2]->IsJSObject()) { + instance_filter = args.at<JSObject>(2); + } + + Object* result = + LiveObjectList::GetPath(obj_id1, obj_id2, instance_filter); + return result; +#else + return Heap::undefined_value(); +#endif +} + + +// Generates the response to a debugger request for a list of all +// previously captured live object lists. +static MaybeObject* Runtime_InfoLOL(Arguments args) { +#ifdef LIVE_OBJECT_LIST + CONVERT_SMI_CHECKED(start, args[0]); + CONVERT_SMI_CHECKED(count, args[1]); + return LiveObjectList::Info(start, count); +#else + return Heap::undefined_value(); +#endif +} + + +// Gets a dump of the specified object as requested by the debugger. +// This is only used for obj ids shown in live object lists. +static MaybeObject* Runtime_PrintLOLObj(Arguments args) { +#ifdef LIVE_OBJECT_LIST + HandleScope scope; + CONVERT_SMI_CHECKED(obj_id, args[0]); + Object* result = LiveObjectList::PrintObj(obj_id); + return result; +#else + return Heap::undefined_value(); +#endif +} + + +// Resets and releases all previously captured live object lists. +static MaybeObject* Runtime_ResetLOL(Arguments args) { +#ifdef LIVE_OBJECT_LIST + LiveObjectList::Reset(); + return Heap::undefined_value(); +#else + return Heap::undefined_value(); +#endif +} + + +// Generates the response to a debugger request for a summary of the types +// of objects in the difference between the captured live object lists +// specified by id1 and id2. +// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be +// summarized. +static MaybeObject* Runtime_SummarizeLOL(Arguments args) { +#ifdef LIVE_OBJECT_LIST + HandleScope scope; + CONVERT_SMI_CHECKED(id1, args[0]); + CONVERT_SMI_CHECKED(id2, args[1]); + CONVERT_ARG_CHECKED(JSObject, filter_obj, 2); + + EnterDebugger enter_debugger; + return LiveObjectList::Summarize(id1, id2, filter_obj); +#else + return Heap::undefined_value(); +#endif +} + #endif // ENABLE_DEBUGGER_SUPPORT @@ -10789,7 +11294,8 @@ static MaybeObject* Runtime_CollectStackTrace(Arguments args) { limit = Max(limit, 0); // Ensure that limit is not negative. int initial_size = Min(limit, 10); - Handle<JSArray> result = Factory::NewJSArray(initial_size * 4); + Handle<FixedArray> elements = + Factory::NewFixedArrayWithHoles(initial_size * 4); StackFrameIterator iter; // If the caller parameter is a function we skip frames until we're @@ -10805,27 +11311,30 @@ static MaybeObject* Runtime_CollectStackTrace(Arguments args) { List<FrameSummary> frames(3); // Max 2 levels of inlining. frame->Summarize(&frames); for (int i = frames.length() - 1; i >= 0; i--) { + if (cursor + 4 > elements->length()) { + int new_capacity = JSObject::NewElementsCapacity(elements->length()); + Handle<FixedArray> new_elements = + Factory::NewFixedArrayWithHoles(new_capacity); + for (int i = 0; i < cursor; i++) { + new_elements->set(i, elements->get(i)); + } + elements = new_elements; + } + ASSERT(cursor + 4 <= elements->length()); + Handle<Object> recv = frames[i].receiver(); Handle<JSFunction> fun = frames[i].function(); Handle<Code> code = frames[i].code(); Handle<Smi> offset(Smi::FromInt(frames[i].offset())); - FixedArray* elements = FixedArray::cast(result->elements()); - if (cursor + 3 < elements->length()) { - elements->set(cursor++, *recv); - elements->set(cursor++, *fun); - elements->set(cursor++, *code); - elements->set(cursor++, *offset); - } else { - SetElement(result, cursor++, recv); - SetElement(result, cursor++, fun); - SetElement(result, cursor++, code); - SetElement(result, cursor++, offset); - } + elements->set(cursor++, *recv); + elements->set(cursor++, *fun); + elements->set(cursor++, *code); + elements->set(cursor++, *offset); } } iter.Advance(); } - + Handle<JSArray> result = Factory::NewJSArrayWithElements(elements); result->set_length(Smi::FromInt(cursor)); return *result; } @@ -10990,7 +11499,13 @@ static MaybeObject* Runtime_MessageGetScript(Arguments args) { static MaybeObject* Runtime_ListNatives(Arguments args) { ASSERT(args.length() == 0); HandleScope scope; - Handle<JSArray> result = Factory::NewJSArray(0); +#define COUNT_ENTRY(Name, argc, ressize) + 1 + int entry_count = 0 + RUNTIME_FUNCTION_LIST(COUNT_ENTRY) + INLINE_FUNCTION_LIST(COUNT_ENTRY) + INLINE_RUNTIME_FUNCTION_LIST(COUNT_ENTRY); +#undef COUNT_ENTRY + Handle<FixedArray> elements = Factory::NewFixedArray(entry_count); int index = 0; bool inline_runtime_functions = false; #define ADD_ENTRY(Name, argc, ressize) \ @@ -11005,10 +11520,11 @@ static MaybeObject* Runtime_ListNatives(Arguments args) { name = Factory::NewStringFromAscii( \ Vector<const char>(#Name, StrLength(#Name))); \ } \ - Handle<JSArray> pair = Factory::NewJSArray(0); \ - SetElement(pair, 0, name); \ - SetElement(pair, 1, Handle<Smi>(Smi::FromInt(argc))); \ - SetElement(result, index++, pair); \ + Handle<FixedArray> pair_elements = Factory::NewFixedArray(2); \ + pair_elements->set(0, *name); \ + pair_elements->set(1, Smi::FromInt(argc)); \ + Handle<JSArray> pair = Factory::NewJSArrayWithElements(pair_elements); \ + elements->set(index++, *pair); \ } inline_runtime_functions = false; RUNTIME_FUNCTION_LIST(ADD_ENTRY) @@ -11016,6 +11532,8 @@ static MaybeObject* Runtime_ListNatives(Arguments args) { INLINE_FUNCTION_LIST(ADD_ENTRY) INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY) #undef ADD_ENTRY + ASSERT_EQ(index, entry_count); + Handle<JSArray> result = Factory::NewJSArrayWithElements(elements); return *result; } #endif diff --git a/src/runtime.h b/src/runtime.h index fb2ff93c..8e73d5c4 100644 --- a/src/runtime.h +++ b/src/runtime.h @@ -45,7 +45,7 @@ namespace internal { /* Property access */ \ F(GetProperty, 2, 1) \ F(KeyedGetProperty, 2, 1) \ - F(DeleteProperty, 2, 1) \ + F(DeleteProperty, 3, 1) \ F(HasLocalProperty, 2, 1) \ F(HasProperty, 2, 1) \ F(HasElement, 2, 1) \ @@ -128,6 +128,7 @@ namespace internal { \ F(StringAdd, 2, 1) \ F(StringBuilderConcat, 3, 1) \ + F(StringBuilderJoin, 3, 1) \ \ /* Bit operations */ \ F(NumberOr, 2, 1) \ @@ -240,7 +241,7 @@ namespace internal { F(ResolvePossiblyDirectEval, 4, 2) \ F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \ \ - F(SetProperty, -1 /* 3 or 4 */, 1) \ + F(SetProperty, -1 /* 4 or 5 */, 1) \ F(DefineOrRedefineDataProperty, 4, 1) \ F(DefineOrRedefineAccessorProperty, 5, 1) \ F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \ @@ -287,12 +288,12 @@ namespace internal { F(DeleteContextSlot, 2, 1) \ F(LoadContextSlot, 2, 2) \ F(LoadContextSlotNoReferenceError, 2, 2) \ - F(StoreContextSlot, 3, 1) \ + F(StoreContextSlot, 4, 1) \ \ /* Declarations and initialization */ \ - F(DeclareGlobals, 3, 1) \ + F(DeclareGlobals, 4, 1) \ F(DeclareContextSlot, 4, 1) \ - F(InitializeVarGlobal, -1 /* 1 or 2 */, 1) \ + F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \ F(InitializeConstGlobal, 2, 1) \ F(InitializeConstContextSlot, 3, 1) \ F(OptimizeObjectForAddingMultipleProperties, 2, 1) \ @@ -375,7 +376,21 @@ namespace internal { \ F(SetFlags, 1, 1) \ F(CollectGarbage, 1, 1) \ - F(GetHeapUsage, 0, 1) + F(GetHeapUsage, 0, 1) \ + \ + /* LiveObjectList support*/ \ + F(HasLOLEnabled, 0, 1) \ + F(CaptureLOL, 0, 1) \ + F(DeleteLOL, 1, 1) \ + F(DumpLOL, 5, 1) \ + F(GetLOLObj, 1, 1) \ + F(GetLOLObjId, 1, 1) \ + F(GetLOLObjRetainers, 6, 1) \ + F(GetLOLPath, 3, 1) \ + F(InfoLOL, 2, 1) \ + F(PrintLOLObj, 1, 1) \ + F(ResetLOL, 0, 1) \ + F(SummarizeLOL, 3, 1) #else #define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) @@ -537,7 +552,8 @@ class Runtime : public AllStatic { Handle<Object> object, Handle<Object> key, Handle<Object> value, - PropertyAttributes attr); + PropertyAttributes attr, + StrictModeFlag strict_mode); MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty( Handle<JSObject> object, diff --git a/src/runtime.js b/src/runtime.js index 2cdbbdee..66d839be 100644 --- a/src/runtime.js +++ b/src/runtime.js @@ -338,8 +338,8 @@ function SHR(y) { */ // ECMA-262, section 11.4.1, page 46. -function DELETE(key) { - return %DeleteProperty(%ToObject(this), %ToString(key)); +function DELETE(key, strict) { + return %DeleteProperty(%ToObject(this), %ToString(key), strict); } diff --git a/src/spaces.h b/src/spaces.h index 4f2d07b0..6165255f 100644 --- a/src/spaces.h +++ b/src/spaces.h @@ -2121,6 +2121,12 @@ class MapSpace : public FixedSpace { accounting_stats_.DeallocateBytes(accounting_stats_.Size()); accounting_stats_.AllocateBytes(new_size); + // Flush allocation watermarks. + for (Page* p = first_page_; p != top_page; p = p->next_page()) { + p->SetAllocationWatermark(p->AllocationTop()); + } + top_page->SetAllocationWatermark(new_top); + #ifdef DEBUG if (FLAG_enable_slow_asserts) { intptr_t actual_size = 0; diff --git a/src/string.js b/src/string.js index 2b73e0f6..d8d402c4 100644 --- a/src/string.js +++ b/src/string.js @@ -87,7 +87,7 @@ function StringConcat() { if (len === 1) { return this_as_string + %_Arguments(0); } - var parts = new $Array(len + 1); + var parts = new InternalArray(len + 1); parts[0] = this_as_string; for (var i = 0; i < len; i++) { var part = %_Arguments(i); @@ -357,7 +357,7 @@ function addCaptureString(builder, matchInfo, index) { // TODO(lrn): This array will survive indefinitely if replace is never // called again. However, it will be empty, since the contents are cleared // in the finally block. -var reusableReplaceArray = $Array(16); +var reusableReplaceArray = new InternalArray(16); // Helper function for replacing regular expressions with the result of a // function application in String.prototype.replace. @@ -370,7 +370,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) { // of another replace) or we have failed to set the reusable array // back due to an exception in a replacement function. Create a new // array to use in the future, or until the original is written back. - resultArray = $Array(16); + resultArray = new InternalArray(16); } var res = %RegExpExecMultiple(regexp, subject, @@ -386,7 +386,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) { var i = 0; if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) { var match_start = 0; - var override = [null, 0, subject]; + var override = new InternalArray(null, 0, subject); var receiver = %GetGlobalReceiver(); while (i < len) { var elem = res[i]; @@ -447,7 +447,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) { replacement = %_CallFunction(%GetGlobalReceiver(), s, index, subject, replace); } else { - var parameters = $Array(m + 2); + var parameters = new InternalArray(m + 2); for (var j = 0; j < m; j++) { parameters[j] = CaptureString(subject, matchInfo, j); } @@ -720,7 +720,7 @@ function StringTrimRight() { return %StringTrim(TO_STRING_INLINE(this), false, true); } -var static_charcode_array = new $Array(4); +var static_charcode_array = new InternalArray(4); // ECMA-262, section 15.5.3.2 function StringFromCharCode(code) { @@ -825,7 +825,7 @@ function ReplaceResultBuilder(str) { if (%_ArgumentsLength() > 1) { this.elements = %_Arguments(1); } else { - this.elements = new $Array(); + this.elements = new InternalArray(); } this.special_string = str; } diff --git a/src/stub-cache.cc b/src/stub-cache.cc index f87728b7..f23f3825 100644 --- a/src/stub-cache.cc +++ b/src/stub-cache.cc @@ -498,13 +498,13 @@ MaybeObject* StubCache::ComputeStoreField(String* name, JSObject* receiver, int field_index, Map* transition, - Code::ExtraICState extra_ic_state) { + StrictModeFlag strict_mode) { PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION; Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STORE_IC, type, extra_ic_state); + Code::STORE_IC, type, strict_mode); Object* code = receiver->map()->FindInCodeCache(name, flags); if (code->IsUndefined()) { - StoreStubCompiler compiler(extra_ic_state); + StoreStubCompiler compiler(strict_mode); { MaybeObject* maybe_code = compiler.CompileStoreField(receiver, field_index, transition, name); if (!maybe_code->ToObject(&code)) return maybe_code; @@ -521,13 +521,15 @@ MaybeObject* StubCache::ComputeStoreField(String* name, } -MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) { +MaybeObject* StubCache::ComputeKeyedStoreSpecialized( + JSObject* receiver, + StrictModeFlag strict_mode) { Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL); + Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode); String* name = Heap::KeyedStoreSpecialized_symbol(); Object* code = receiver->map()->FindInCodeCache(name, flags); if (code->IsUndefined()) { - KeyedStoreStubCompiler compiler; + KeyedStoreStubCompiler compiler(strict_mode); { MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver); if (!maybe_code->ToObject(&code)) return maybe_code; } @@ -542,6 +544,35 @@ MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) { } +MaybeObject* StubCache::ComputeKeyedStorePixelArray( + JSObject* receiver, + StrictModeFlag strict_mode) { + // Using NORMAL as the PropertyType for array element stores is a misuse. The + // generated stub always accesses fast elements, not slow-mode fields, but + // some property type is required for the stub lookup. Note that overloading + // the NORMAL PropertyType is only safe as long as no stubs are generated for + // other keyed field stores. This is guaranteed to be the case since all field + // keyed stores that are not array elements go through a generic builtin stub. + Code::Flags flags = + Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode); + String* name = Heap::KeyedStorePixelArray_symbol(); + Object* code = receiver->map()->FindInCodeCache(name, flags); + if (code->IsUndefined()) { + KeyedStoreStubCompiler compiler(strict_mode); + { MaybeObject* maybe_code = compiler.CompileStorePixelArray(receiver); + if (!maybe_code->ToObject(&code)) return maybe_code; + } + PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0)); + Object* result; + { MaybeObject* maybe_result = + receiver->UpdateMapCodeCache(name, Code::cast(code)); + if (!maybe_result->ToObject(&result)) return maybe_result; + } + } + return code; +} + + namespace { ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) { @@ -571,11 +602,13 @@ ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) { MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray( JSObject* receiver, - bool is_store) { + bool is_store, + StrictModeFlag strict_mode) { Code::Flags flags = Code::ComputeMonomorphicFlags( is_store ? Code::KEYED_STORE_IC : Code::KEYED_LOAD_IC, - NORMAL); + NORMAL, + strict_mode); ExternalArrayType array_type = ElementsKindToExternalArrayType(receiver->GetElementsKind()); String* name = @@ -588,9 +621,9 @@ MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray( Object* code = map->FindInCodeCache(name, flags); if (code->IsUndefined()) { ExternalArrayStubCompiler compiler; - { MaybeObject* maybe_code = - is_store ? compiler.CompileKeyedStoreStub(array_type, flags) : - compiler.CompileKeyedLoadStub(array_type, flags); + { MaybeObject* maybe_code = is_store + ? compiler.CompileKeyedStoreStub(array_type, flags) + : compiler.CompileKeyedLoadStub(array_type, flags); if (!maybe_code->ToObject(&code)) return maybe_code; } if (is_store) { @@ -610,8 +643,8 @@ MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray( } -MaybeObject* StubCache::ComputeStoreNormal(Code::ExtraICState extra_ic_state) { - return Builtins::builtin(extra_ic_state == StoreIC::kStoreICStrict +MaybeObject* StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) { + return Builtins::builtin((strict_mode == kStrictMode) ? Builtins::StoreIC_Normal_Strict : Builtins::StoreIC_Normal); } @@ -620,12 +653,12 @@ MaybeObject* StubCache::ComputeStoreNormal(Code::ExtraICState extra_ic_state) { MaybeObject* StubCache::ComputeStoreGlobal(String* name, GlobalObject* receiver, JSGlobalPropertyCell* cell, - Code::ExtraICState extra_ic_state) { + StrictModeFlag strict_mode) { Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STORE_IC, NORMAL, extra_ic_state); + Code::STORE_IC, NORMAL, strict_mode); Object* code = receiver->map()->FindInCodeCache(name, flags); if (code->IsUndefined()) { - StoreStubCompiler compiler(extra_ic_state); + StoreStubCompiler compiler(strict_mode); { MaybeObject* maybe_code = compiler.CompileStoreGlobal(receiver, cell, name); if (!maybe_code->ToObject(&code)) return maybe_code; @@ -646,13 +679,13 @@ MaybeObject* StubCache::ComputeStoreCallback( String* name, JSObject* receiver, AccessorInfo* callback, - Code::ExtraICState extra_ic_state) { + StrictModeFlag strict_mode) { ASSERT(v8::ToCData<Address>(callback->setter()) != 0); Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STORE_IC, CALLBACKS, extra_ic_state); + Code::STORE_IC, CALLBACKS, strict_mode); Object* code = receiver->map()->FindInCodeCache(name, flags); if (code->IsUndefined()) { - StoreStubCompiler compiler(extra_ic_state); + StoreStubCompiler compiler(strict_mode); { MaybeObject* maybe_code = compiler.CompileStoreCallback(receiver, callback, name); if (!maybe_code->ToObject(&code)) return maybe_code; @@ -672,12 +705,12 @@ MaybeObject* StubCache::ComputeStoreCallback( MaybeObject* StubCache::ComputeStoreInterceptor( String* name, JSObject* receiver, - Code::ExtraICState extra_ic_state) { + StrictModeFlag strict_mode) { Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STORE_IC, INTERCEPTOR, extra_ic_state); + Code::STORE_IC, INTERCEPTOR, strict_mode); Object* code = receiver->map()->FindInCodeCache(name, flags); if (code->IsUndefined()) { - StoreStubCompiler compiler(extra_ic_state); + StoreStubCompiler compiler(strict_mode); { MaybeObject* maybe_code = compiler.CompileStoreInterceptor(receiver, name); if (!maybe_code->ToObject(&code)) return maybe_code; @@ -697,12 +730,14 @@ MaybeObject* StubCache::ComputeStoreInterceptor( MaybeObject* StubCache::ComputeKeyedStoreField(String* name, JSObject* receiver, int field_index, - Map* transition) { + Map* transition, + StrictModeFlag strict_mode) { PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION; - Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type); + Code::Flags flags = Code::ComputeMonomorphicFlags( + Code::KEYED_STORE_IC, type, strict_mode); Object* code = receiver->map()->FindInCodeCache(name, flags); if (code->IsUndefined()) { - KeyedStoreStubCompiler compiler; + KeyedStoreStubCompiler compiler(strict_mode); { MaybeObject* maybe_code = compiler.CompileStoreField(receiver, field_index, transition, name); if (!maybe_code->ToObject(&code)) return maybe_code; @@ -1390,12 +1425,17 @@ MaybeObject* LoadPropertyWithInterceptorForCall(Arguments args) { MaybeObject* StoreInterceptorProperty(Arguments args) { + ASSERT(args.length() == 4); JSObject* recv = JSObject::cast(args[0]); String* name = String::cast(args[1]); Object* value = args[2]; + StrictModeFlag strict_mode = + static_cast<StrictModeFlag>(Smi::cast(args[3])->value()); + ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode); ASSERT(recv->HasNamedInterceptor()); PropertyAttributes attr = NONE; - MaybeObject* result = recv->SetPropertyWithInterceptor(name, value, attr); + MaybeObject* result = recv->SetPropertyWithInterceptor( + name, value, attr, strict_mode); return result; } @@ -1648,8 +1688,8 @@ MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) { MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) { - Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type, - extra_ic_state_); + Code::Flags flags = Code::ComputeMonomorphicFlags( + Code::STORE_IC, type, strict_mode_); MaybeObject* result = GetCodeWithFlags(flags, name); if (!result->IsFailure()) { PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, @@ -1664,7 +1704,8 @@ MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) { MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) { - Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type); + Code::Flags flags = Code::ComputeMonomorphicFlags( + Code::KEYED_STORE_IC, type, strict_mode_); MaybeObject* result = GetCodeWithFlags(flags, name); if (!result->IsFailure()) { PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, diff --git a/src/stub-cache.h b/src/stub-cache.h index 307939dd..6927076c 100644 --- a/src/stub-cache.h +++ b/src/stub-cache.h @@ -143,27 +143,27 @@ class StubCache : public AllStatic { JSObject* receiver, int field_index, Map* transition, - Code::ExtraICState extra_ic_state); + StrictModeFlag strict_mode); MUST_USE_RESULT static MaybeObject* ComputeStoreNormal( - Code::ExtraICState extra_ic_state); + StrictModeFlag strict_mode); MUST_USE_RESULT static MaybeObject* ComputeStoreGlobal( String* name, GlobalObject* receiver, JSGlobalPropertyCell* cell, - Code::ExtraICState extra_ic_state); + StrictModeFlag strict_mode); MUST_USE_RESULT static MaybeObject* ComputeStoreCallback( String* name, JSObject* receiver, AccessorInfo* callback, - Code::ExtraICState extra_ic_state); + StrictModeFlag strict_mode); MUST_USE_RESULT static MaybeObject* ComputeStoreInterceptor( String* name, JSObject* receiver, - Code::ExtraICState extra_ic_state); + StrictModeFlag strict_mode); // --- @@ -171,14 +171,21 @@ class StubCache : public AllStatic { String* name, JSObject* receiver, int field_index, - Map* transition = NULL); + Map* transition, + StrictModeFlag strict_mode); MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized( - JSObject* receiver); + JSObject* receiver, + StrictModeFlag strict_mode); + + MUST_USE_RESULT static MaybeObject* ComputeKeyedStorePixelArray( + JSObject* receiver, + StrictModeFlag strict_mode); MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray( JSObject* receiver, - bool is_store); + bool is_store, + StrictModeFlag strict_mode); // --- @@ -625,8 +632,8 @@ class KeyedLoadStubCompiler: public StubCompiler { class StoreStubCompiler: public StubCompiler { public: - explicit StoreStubCompiler(Code::ExtraICState extra_ic_state) - : extra_ic_state_(extra_ic_state) { } + explicit StoreStubCompiler(StrictModeFlag strict_mode) + : strict_mode_(strict_mode) { } MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object, int index, @@ -646,12 +653,15 @@ class StoreStubCompiler: public StubCompiler { private: MaybeObject* GetCode(PropertyType type, String* name); - Code::ExtraICState extra_ic_state_; + StrictModeFlag strict_mode_; }; class KeyedStoreStubCompiler: public StubCompiler { public: + explicit KeyedStoreStubCompiler(StrictModeFlag strict_mode) + : strict_mode_(strict_mode) { } + MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object, int index, Map* transition, @@ -659,8 +669,12 @@ class KeyedStoreStubCompiler: public StubCompiler { MUST_USE_RESULT MaybeObject* CompileStoreSpecialized(JSObject* receiver); + MUST_USE_RESULT MaybeObject* CompileStorePixelArray(JSObject* receiver); + private: MaybeObject* GetCode(PropertyType type, String* name); + + StrictModeFlag strict_mode_; }; @@ -333,7 +333,7 @@ void Top::RegisterTryCatchHandler(v8::TryCatch* that) { void Top::UnregisterTryCatchHandler(v8::TryCatch* that) { - ASSERT(thread_local_.TryCatchHandler() == that); + ASSERT(try_catch_handler() == that); thread_local_.set_try_catch_handler_address( reinterpret_cast<Address>(that->next_)); thread_local_.catcher_ = NULL; @@ -732,6 +732,12 @@ Failure* Top::Throw(Object* exception, MessageLocation* location) { Failure* Top::ReThrow(MaybeObject* exception, MessageLocation* location) { + bool can_be_caught_externally = false; + ShouldReportException(&can_be_caught_externally, + is_catchable_by_javascript(exception)); + thread_local_.catcher_ = can_be_caught_externally ? + try_catch_handler() : NULL; + // Set the exception being re-thrown. set_pending_exception(exception); return Failure::Exception(); @@ -807,7 +813,7 @@ void Top::ComputeLocation(MessageLocation* target) { } -bool Top::ShouldReportException(bool* is_caught_externally, +bool Top::ShouldReportException(bool* can_be_caught_externally, bool catchable_by_javascript) { // Find the top-most try-catch handler. StackHandler* handler = @@ -823,13 +829,13 @@ bool Top::ShouldReportException(bool* is_caught_externally, // The exception has been externally caught if and only if there is // an external handler which is on top of the top-most try-catch // handler. - *is_caught_externally = external_handler_address != NULL && + *can_be_caught_externally = external_handler_address != NULL && (handler == NULL || handler->address() > external_handler_address || !catchable_by_javascript); - if (*is_caught_externally) { + if (*can_be_caught_externally) { // Only report the exception if the external handler is verbose. - return thread_local_.TryCatchHandler()->is_verbose_; + return try_catch_handler()->is_verbose_; } else { // Report the exception if it isn't caught by JavaScript code. return handler == NULL; @@ -848,14 +854,12 @@ void Top::DoThrow(MaybeObject* exception, Handle<Object> exception_handle(exception_object); // Determine reporting and whether the exception is caught externally. - bool is_out_of_memory = exception == Failure::OutOfMemoryException(); - bool is_termination_exception = exception == Heap::termination_exception(); - bool catchable_by_javascript = !is_termination_exception && !is_out_of_memory; + bool catchable_by_javascript = is_catchable_by_javascript(exception); // Only real objects can be caught by JS. ASSERT(!catchable_by_javascript || is_object); - bool is_caught_externally = false; + bool can_be_caught_externally = false; bool should_report_exception = - ShouldReportException(&is_caught_externally, catchable_by_javascript); + ShouldReportException(&can_be_caught_externally, catchable_by_javascript); bool report_exception = catchable_by_javascript && should_report_exception; #ifdef ENABLE_DEBUGGER_SUPPORT @@ -869,8 +873,8 @@ void Top::DoThrow(MaybeObject* exception, Handle<Object> message_obj; MessageLocation potential_computed_location; bool try_catch_needs_message = - is_caught_externally && - thread_local_.TryCatchHandler()->capture_message_; + can_be_caught_externally && + try_catch_handler()->capture_message_; if (report_exception || try_catch_needs_message) { if (location == NULL) { // If no location was specified we use a computed one instead @@ -908,9 +912,10 @@ void Top::DoThrow(MaybeObject* exception, } } - if (is_caught_externally) { - thread_local_.catcher_ = thread_local_.TryCatchHandler(); - } + // Do not forget to clean catcher_ if currently thrown exception cannot + // be caught. If necessary, ReThrow will update the catcher. + thread_local_.catcher_ = can_be_caught_externally ? + try_catch_handler() : NULL; // NOTE: Notifying the debugger or generating the message // may have caused new exceptions. For now, we just ignore @@ -925,22 +930,63 @@ void Top::DoThrow(MaybeObject* exception, } +bool Top::IsExternallyCaught() { + ASSERT(has_pending_exception()); + + if ((thread_local_.catcher_ == NULL) || + (try_catch_handler() != thread_local_.catcher_)) { + // When throwing the exception, we found no v8::TryCatch + // which should care about this exception. + return false; + } + + if (!is_catchable_by_javascript(pending_exception())) { + return true; + } + + // Get the address of the external handler so we can compare the address to + // determine which one is closer to the top of the stack. + Address external_handler_address = thread_local_.try_catch_handler_address(); + ASSERT(external_handler_address != NULL); + + // The exception has been externally caught if and only if there is + // an external handler which is on top of the top-most try-finally + // handler. + // There should be no try-catch blocks as they would prohibit us from + // finding external catcher in the first place (see catcher_ check above). + // + // Note, that finally clause would rethrow an exception unless it's + // aborted by jumps in control flow like return, break, etc. and we'll + // have another chances to set proper v8::TryCatch. + StackHandler* handler = + StackHandler::FromAddress(Top::handler(Top::GetCurrentThread())); + while (handler != NULL && handler->address() < external_handler_address) { + ASSERT(!handler->is_try_catch()); + if (handler->is_try_finally()) return false; + + handler = handler->next(); + } + + return true; +} + + void Top::ReportPendingMessages() { ASSERT(has_pending_exception()); - setup_external_caught(); // If the pending exception is OutOfMemoryException set out_of_memory in // the global context. Note: We have to mark the global context here // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to // set it. - bool external_caught = thread_local_.external_caught_exception_; + bool external_caught = IsExternallyCaught(); + thread_local_.external_caught_exception_ = external_caught; HandleScope scope; if (thread_local_.pending_exception_ == Failure::OutOfMemoryException()) { context()->mark_out_of_memory(); } else if (thread_local_.pending_exception_ == Heap::termination_exception()) { if (external_caught) { - thread_local_.TryCatchHandler()->can_continue_ = false; - thread_local_.TryCatchHandler()->exception_ = Heap::null_value(); + try_catch_handler()->can_continue_ = false; + try_catch_handler()->exception_ = Heap::null_value(); } } else { // At this point all non-object (failure) exceptions have @@ -949,9 +995,8 @@ void Top::ReportPendingMessages() { Handle<Object> exception(pending_exception_object); thread_local_.external_caught_exception_ = false; if (external_caught) { - thread_local_.TryCatchHandler()->can_continue_ = true; - thread_local_.TryCatchHandler()->exception_ = - thread_local_.pending_exception_; + try_catch_handler()->can_continue_ = true; + try_catch_handler()->exception_ = thread_local_.pending_exception_; if (!thread_local_.pending_message_obj_->IsTheHole()) { try_catch_handler()->message_ = thread_local_.pending_message_obj_; } @@ -249,12 +249,7 @@ class Top { thread_local_.scheduled_exception_ = Heap::the_hole_value(); } - static void setup_external_caught() { - thread_local_.external_caught_exception_ = - has_pending_exception() && - (thread_local_.catcher_ != NULL) && - (try_catch_handler() == thread_local_.catcher_); - } + static bool IsExternallyCaught(); static void SetCaptureStackTraceForUncaughtExceptions( bool capture, @@ -265,6 +260,11 @@ class Top { // exception. static bool is_out_of_memory(); + static bool is_catchable_by_javascript(MaybeObject* exception) { + return (exception != Failure::OutOfMemoryException()) && + (exception != Heap::termination_exception()); + } + // JS execution stack (see frames.h). static Address c_entry_fp(ThreadLocalTop* thread) { return thread->c_entry_fp_; @@ -397,7 +397,7 @@ class Top { const char* message); // Checks if exception should be reported and finds out if it's // caught externally. - static bool ShouldReportException(bool* is_caught_externally, + static bool ShouldReportException(bool* can_be_caught_externally, bool catchable_by_javascript); // Attempts to compute the current source location, storing the diff --git a/src/type-info.cc b/src/type-info.cc index 0bb72621..3438ff8f 100644 --- a/src/type-info.cc +++ b/src/type-info.cc @@ -337,26 +337,27 @@ void TypeFeedbackOracle::PopulateMap(Handle<Code> code) { // position by making sure that we have position information // recorded for all binary ICs. if (GetElement(map_, position)->IsUndefined()) { - SetElement(map_, position, target); + SetElement(map_, position, target, kNonStrictMode); } } else if (state == MONOMORPHIC) { if (target->kind() != Code::CALL_IC || target->check_type() == RECEIVER_MAP_CHECK) { Handle<Map> map = Handle<Map>(target->FindFirstMap()); if (*map == NULL) { - SetElement(map_, position, target); + SetElement(map_, position, target, kNonStrictMode); } else { - SetElement(map_, position, map); + SetElement(map_, position, map, kNonStrictMode); } } else { ASSERT(target->kind() == Code::CALL_IC); CheckType check = target->check_type(); ASSERT(check != RECEIVER_MAP_CHECK); - SetElement(map_, position, Handle<Object>(Smi::FromInt(check))); + SetElement(map_, position, + Handle<Object>(Smi::FromInt(check)), kNonStrictMode); ASSERT(Smi::cast(*GetElement(map_, position))->value() == check); } } else if (state == MEGAMORPHIC) { - SetElement(map_, position, target); + SetElement(map_, position, target, kNonStrictMode); } } } @@ -54,7 +54,12 @@ bool V8::Initialize(Deserializer* des) { if (has_been_disposed_ || has_fatal_error_) return false; if (IsRunning()) return true; +#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI) + use_crankshaft_ = false; +#else use_crankshaft_ = FLAG_crankshaft; +#endif + // Peephole optimization might interfere with deoptimization. FLAG_peephole_optimization = !use_crankshaft_; is_running_ = true; diff --git a/src/v8natives.js b/src/v8natives.js index 83b00b0f..563de732 100644 --- a/src/v8natives.js +++ b/src/v8natives.js @@ -92,7 +92,7 @@ function GlobalIsFinite(number) { // ECMA-262 - 15.1.2.2 function GlobalParseInt(string, radix) { - if (IS_UNDEFINED(radix)) { + if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) { // Some people use parseInt instead of Math.floor. This // optimization makes parseInt on a Smi 12 times faster (60ns // vs 800ns). The following optimization makes parseInt on a @@ -105,7 +105,7 @@ function GlobalParseInt(string, radix) { // Truncate number. return string | 0; } - radix = 0; + if (IS_UNDEFINED(radix)) radix = 0; } else { radix = TO_INT32(radix); if (!(radix == 0 || (2 <= radix && radix <= 36))) @@ -143,7 +143,7 @@ function GlobalEval(x) { var f = %CompileString(x); if (!IS_FUNCTION(f)) return f; - return f.call(this); + return %_CallFunction(this, f); } @@ -152,7 +152,7 @@ function GlobalExecScript(expr, lang) { // NOTE: We don't care about the character casing. if (!lang || /javascript/i.test(lang)) { var f = %CompileString(ToString(expr)); - f.call(%GlobalReceiver(global)); + %_CallFunction(%GlobalReceiver(global), f); } return null; } @@ -586,17 +586,20 @@ function DefineOwnProperty(obj, p, desc, should_throw) { // Step 7 if (desc.isConfigurable() || (desc.hasEnumerable() && - desc.isEnumerable() != current.isEnumerable())) + desc.isEnumerable() != current.isEnumerable())) { throw MakeTypeError("redefine_disallowed", ["defineProperty"]); + } // Step 8 if (!IsGenericDescriptor(desc)) { // Step 9a - if (IsDataDescriptor(current) != IsDataDescriptor(desc)) + if (IsDataDescriptor(current) != IsDataDescriptor(desc)) { throw MakeTypeError("redefine_disallowed", ["defineProperty"]); + } // Step 10a if (IsDataDescriptor(current) && IsDataDescriptor(desc)) { - if (!current.isWritable() && desc.isWritable()) + if (!current.isWritable() && desc.isWritable()) { throw MakeTypeError("redefine_disallowed", ["defineProperty"]); + } if (!current.isWritable() && desc.hasValue() && !SameValue(desc.getValue(), current.getValue())) { throw MakeTypeError("redefine_disallowed", ["defineProperty"]); @@ -604,11 +607,12 @@ function DefineOwnProperty(obj, p, desc, should_throw) { } // Step 11 if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) { - if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){ + if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())) { throw MakeTypeError("redefine_disallowed", ["defineProperty"]); } - if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) + if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) { throw MakeTypeError("redefine_disallowed", ["defineProperty"]); + } } } } @@ -1166,7 +1170,7 @@ function FunctionBind(this_arg) { // Length is 1. return fn.apply(this_arg, arguments); }; } else { - var bound_args = new $Array(argc_bound); + var bound_args = new InternalArray(argc_bound); for(var i = 0; i < argc_bound; i++) { bound_args[i] = %_Arguments(i+1); } @@ -1184,7 +1188,7 @@ function FunctionBind(this_arg) { // Length is 1. // Combine the args we got from the bind call with the args // given as argument to the invocation. var argc = %_ArgumentsLength(); - var args = new $Array(argc + argc_bound); + var args = new InternalArray(argc + argc_bound); // Add bound arguments. for (var i = 0; i < argc_bound; i++) { args[i] = bound_args[i]; @@ -1216,7 +1220,7 @@ function NewFunction(arg1) { // length == 1 var n = %_ArgumentsLength(); var p = ''; if (n > 1) { - p = new $Array(n - 1); + p = new InternalArray(n - 1); for (var i = 0; i < n - 1; i++) p[i] = %_Arguments(i); p = Join(p, n - 1, ',', NonStringToString); // If the formal parameters string include ) - an illegal diff --git a/src/version.cc b/src/version.cc index 2471bc92..ac4ab4a4 100644 --- a/src/version.cc +++ b/src/version.cc @@ -33,9 +33,9 @@ // NOTE these macros are used by the SCons build script so their names // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 3 -#define MINOR_VERSION 1 -#define BUILD_NUMBER 4 -#define PATCH_LEVEL 0 +#define MINOR_VERSION 2 +#define BUILD_NUMBER 0 +#define PATCH_LEVEL 1 #define CANDIDATE_VERSION false // Define SONAME to have the SCons build the put a specific SONAME into the diff --git a/src/virtual-frame-heavy-inl.h b/src/virtual-frame-heavy-inl.h index 2755eee6..cf12eca6 100644 --- a/src/virtual-frame-heavy-inl.h +++ b/src/virtual-frame-heavy-inl.h @@ -82,10 +82,8 @@ void VirtualFrame::Push(Register reg, TypeInfo info) { } -void VirtualFrame::Push(Handle<Object> value) { - FrameElement element = - FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED); - elements_.Add(element); +bool VirtualFrame::ConstantPoolOverflowed() { + return FrameElement::ConstantPoolOverflowed(); } diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h index 285c0781..b082624f 100644 --- a/src/x64/assembler-x64-inl.h +++ b/src/x64/assembler-x64-inl.h @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc index 697f6cd4..41111a77 100644 --- a/src/x64/assembler-x64.cc +++ b/src/x64/assembler-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -190,13 +190,13 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) { // ----------------------------------------------------------------------------- // Register constants. -const int Register::registerCodeByAllocationIndex[kNumAllocatableRegisters] = { - // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12 - 0, 3, 2, 1, 7, 8, 9, 11, 14, 12 +const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = { + // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12 + 0, 3, 2, 1, 7, 8, 9, 11, 14, 12 }; -const int Register::allocationIndexByRegisterCode[kNumRegisters] = { - 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1 +const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = { + 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1 }; @@ -2995,6 +2995,28 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) { } +void Assembler::andpd(XMMRegister dst, XMMRegister src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0x66); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0x54); + emit_sse_operand(dst, src); +} + + +void Assembler::orpd(XMMRegister dst, XMMRegister src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0x66); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0x56); + emit_sse_operand(dst, src); +} + + void Assembler::xorpd(XMMRegister dst, XMMRegister src) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -3114,8 +3136,8 @@ void Assembler::RecordDebugBreakSlot() { } -void Assembler::RecordComment(const char* msg) { - if (FLAG_code_comments) { +void Assembler::RecordComment(const char* msg, bool force) { + if (FLAG_code_comments || force) { EnsureSpace ensure_space(this); RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); } diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h index 91e7e6cc..f6cd5709 100644 --- a/src/x64/assembler-x64.h +++ b/src/x64/assembler-x64.h @@ -30,7 +30,7 @@ // The original source code covered by the above license above has been // modified significantly by Google Inc. -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // A lightweight X64 Assembler. @@ -99,12 +99,12 @@ struct Register { static const int kNumAllocatableRegisters = 10; static int ToAllocationIndex(Register reg) { - return allocationIndexByRegisterCode[reg.code()]; + return kAllocationIndexByRegisterCode[reg.code()]; } static Register FromAllocationIndex(int index) { ASSERT(index >= 0 && index < kNumAllocatableRegisters); - Register result = { registerCodeByAllocationIndex[index] }; + Register result = { kRegisterCodeByAllocationIndex[index] }; return result; } @@ -155,8 +155,8 @@ struct Register { int code_; private: - static const int registerCodeByAllocationIndex[kNumAllocatableRegisters]; - static const int allocationIndexByRegisterCode[kNumRegisters]; + static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters]; + static const int kAllocationIndexByRegisterCode[kNumRegisters]; }; const Register rax = { 0 }; @@ -1284,6 +1284,8 @@ class Assembler : public Malloced { void mulsd(XMMRegister dst, XMMRegister src); void divsd(XMMRegister dst, XMMRegister src); + void andpd(XMMRegister dst, XMMRegister src); + void orpd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src); @@ -1312,7 +1314,7 @@ class Assembler : public Malloced { // Record a comment relocation entry that can be used by a disassembler. // Use --code-comments to enable. - void RecordComment(const char* msg); + void RecordComment(const char* msg, bool force = false); // Writes a single word of data in the code stream. // Used for inline tables, e.g., jump-tables. diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc index 08cd21d4..b545876e 100644 --- a/src/x64/builtins-x64.cc +++ b/src/x64/builtins-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -601,7 +601,16 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { - __ int3(); + // For now, we are relying on the fact that Runtime::NotifyOSR + // doesn't do any garbage collection which allows us to save/restore + // the registers without worrying about which of them contain + // pointers. This seems a bit fragile. + __ Pushad(); + __ EnterInternalFrame(); + __ CallRuntime(Runtime::kNotifyOSR, 0); + __ LeaveInternalFrame(); + __ Popad(); + __ ret(0); } @@ -642,6 +651,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // Change context eagerly in case we need the global receiver. __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + // Do not transform the receiver for strict mode functions. + __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset), + Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); + __ j(not_equal, &shift_arguments); + + // Compute the receiver in non-strict mode. __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0)); __ JumpIfSmi(rbx, &convert_to_object); @@ -798,6 +814,14 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Compute the receiver. Label call_to_object, use_global_receiver, push_receiver; __ movq(rbx, Operand(rbp, kReceiverOffset)); + + // Do not transform the receiver for strict mode functions. + __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset), + Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); + __ j(not_equal, &push_receiver); + + // Compute the receiver in non-strict mode. __ JumpIfSmi(rbx, &call_to_object); __ CompareRoot(rbx, Heap::kNullValueRootIndex); __ j(equal, &use_global_receiver); @@ -1224,7 +1248,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi); if (FLAG_debug_code) { - // Initial map for the builtin Array function shoud be a map. + // Initial map for the builtin Array functions should be maps. __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. ASSERT(kSmiTag == 0); @@ -1256,11 +1280,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { Label generic_constructor; if (FLAG_debug_code) { - // The array construct code is only set for the builtin Array function which - // does always have a map. - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rbx); - __ cmpq(rdi, rbx); - __ Check(equal, "Unexpected Array function"); + // The array construct code is only set for the builtin and internal + // Array functions which always have a map. // Initial map for the builtin Array function should be a map. __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. @@ -1406,7 +1427,58 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - __ int3(); + // Get the loop depth of the stack guard check. This is recorded in + // a test(rax, depth) instruction right after the call. + Label stack_check; + __ movq(rbx, Operand(rsp, 0)); // return address + __ movzxbq(rbx, Operand(rbx, 1)); // depth + + // Get the loop nesting level at which we allow OSR from the + // unoptimized code and check if we want to do OSR yet. If not we + // should perform a stack guard check so we can get interrupts while + // waiting for on-stack replacement. + __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); + __ movq(rcx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset)); + __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset)); + __ cmpb(rbx, FieldOperand(rcx, Code::kAllowOSRAtLoopNestingLevelOffset)); + __ j(greater, &stack_check); + + // Pass the function to optimize as the argument to the on-stack + // replacement runtime function. + __ EnterInternalFrame(); + __ push(rax); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + __ LeaveInternalFrame(); + + // If the result was -1 it means that we couldn't optimize the + // function. Just return and continue in the unoptimized version. + NearLabel skip; + __ SmiCompare(rax, Smi::FromInt(-1)); + __ j(not_equal, &skip); + __ ret(0); + + // If we decide not to perform on-stack replacement we perform a + // stack guard check to enable interrupts. + __ bind(&stack_check); + NearLabel ok; + __ CompareRoot(rsp, Heap::kStackLimitRootIndex); + __ j(above_equal, &ok); + + StackCheckStub stub; + __ TailCallStub(&stub); + __ Abort("Unreachable code: returned from tail call."); + __ bind(&ok); + __ ret(0); + + __ bind(&skip); + // Untag the AST id and push it on the stack. + __ SmiToInteger32(rax, rax); + __ push(rax); + + // Generate the code for doing the frame-to-frame translation using + // the deoptimizer infrastructure. + Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR); + generator.Generate(); } diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index 4b4531eb..eb929782 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -1336,54 +1336,33 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode( void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) { - GenerateRegisterArgsPush(masm); - // Registers containing left and right operands respectively. - Register lhs = rdx; - Register rhs = rax; - - // Test for string arguments before calling runtime. - Label not_strings, both_strings, not_string1, string1, string1_smi2; - - __ JumpIfNotString(lhs, r8, ¬_string1); - - // First argument is a a string, test second. - __ JumpIfSmi(rhs, &string1_smi2); - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); - __ j(above_equal, &string1); - - // First and second argument are strings. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); - __ TailCallStub(&string_add_stub); - - __ bind(&string1_smi2); - // First argument is a string, second is a smi. Try to lookup the number - // string for the smi in the number string cache. - NumberToStringStub::GenerateLookupNumberStringCache( - masm, rhs, rbx, rcx, r8, true, &string1); + ASSERT(op_ == Token::ADD); + NearLabel left_not_string, call_runtime; - // Replace second argument on stack and tailcall string add stub to make - // the result. - __ movq(Operand(rsp, 1 * kPointerSize), rbx); - __ TailCallStub(&string_add_stub); + // Registers containing left and right operands respectively. + Register left = rdx; + Register right = rax; - // Only first argument is a string. - __ bind(&string1); - __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); + // Test if left operand is a string. + __ JumpIfSmi(left, &left_not_string); + __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx); + __ j(above_equal, &left_not_string); + StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_left_stub); - // First argument was not a string, test second. - __ bind(¬_string1); - __ JumpIfNotString(rhs, rhs, ¬_strings); + // Left operand is not a string, test right. + __ bind(&left_not_string); + __ JumpIfSmi(right, &call_runtime); + __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx); + __ j(above_equal, &call_runtime); - // Only second argument is a string. - __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); + StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_right_stub); - __ bind(¬_strings); // Neither argument is a string. - // Pop arguments, because CallRuntimeCode wants to push them again. - __ pop(rcx); - __ pop(rax); - __ pop(rdx); - __ push(rcx); + __ bind(&call_runtime); } @@ -1440,9 +1419,11 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { + ASSERT(operands_type_ == TRBinaryOpIC::STRING); ASSERT(op_ == Token::ADD); GenerateStringAddCode(masm); - + // Try to add arguments as strings, otherwise, transition to the generic + // TRBinaryOpIC type. GenerateTypeTransition(masm); } @@ -1525,40 +1506,59 @@ void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { void TranscendentalCacheStub::Generate(MacroAssembler* masm) { - // Input on stack: - // rsp[8]: argument (should be number). - // rsp[0]: return address. + // TAGGED case: + // Input: + // rsp[8]: argument (should be number). + // rsp[0]: return address. + // Output: + // rax: tagged double result. + // UNTAGGED case: + // Input:: + // rsp[0]: return address. + // xmm1: untagged double input argument + // Output: + // xmm1: untagged double result. + Label runtime_call; Label runtime_call_clear_stack; - Label input_not_smi; - NearLabel loaded; - // Test that rax is a number. - __ movq(rax, Operand(rsp, kPointerSize)); - __ JumpIfNotSmi(rax, &input_not_smi); - // Input is a smi. Untag and load it onto the FPU stack. - // Then load the bits of the double into rbx. - __ SmiToInteger32(rax, rax); - __ subq(rsp, Immediate(kPointerSize)); - __ cvtlsi2sd(xmm1, rax); - __ movsd(Operand(rsp, 0), xmm1); - __ movq(rbx, xmm1); - __ movq(rdx, xmm1); - __ fld_d(Operand(rsp, 0)); - __ addq(rsp, Immediate(kPointerSize)); - __ jmp(&loaded); - - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ Move(rbx, Factory::heap_number_map()); - __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); - __ j(not_equal, &runtime_call); - // Input is a HeapNumber. Push it on the FPU stack and load its - // bits into rbx. - __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); - __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); - __ movq(rdx, rbx); - __ bind(&loaded); - // ST[0] == double value + Label skip_cache; + const bool tagged = (argument_type_ == TAGGED); + if (tagged) { + NearLabel input_not_smi; + NearLabel loaded; + // Test that rax is a number. + __ movq(rax, Operand(rsp, kPointerSize)); + __ JumpIfNotSmi(rax, &input_not_smi); + // Input is a smi. Untag and load it onto the FPU stack. + // Then load the bits of the double into rbx. + __ SmiToInteger32(rax, rax); + __ subq(rsp, Immediate(kDoubleSize)); + __ cvtlsi2sd(xmm1, rax); + __ movsd(Operand(rsp, 0), xmm1); + __ movq(rbx, xmm1); + __ movq(rdx, xmm1); + __ fld_d(Operand(rsp, 0)); + __ addq(rsp, Immediate(kDoubleSize)); + __ jmp(&loaded); + + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex); + __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); + __ j(not_equal, &runtime_call); + // Input is a HeapNumber. Push it on the FPU stack and load its + // bits into rbx. + __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); + __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); + __ movq(rdx, rbx); + + __ bind(&loaded); + } else { // UNTAGGED. + __ movq(rbx, xmm1); + __ movq(rdx, xmm1); + } + + // ST[0] == double value, if TAGGED. // rbx = bits of double value. // rdx = also bits of double value. // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic): @@ -1590,7 +1590,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // rax points to the cache for the type type_. // If NULL, the cache hasn't been initialized yet, so go through runtime. __ testq(rax, rax); - __ j(zero, &runtime_call_clear_stack); + __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED. #ifdef DEBUG // Check that the layout of cache elements match expectations. { // NOLINT - doesn't like a single brace on a line. @@ -1616,30 +1616,70 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ j(not_equal, &cache_miss); // Cache hit! __ movq(rax, Operand(rcx, 2 * kIntSize)); - __ fstp(0); // Clear FPU stack. - __ ret(kPointerSize); + if (tagged) { + __ fstp(0); // Clear FPU stack. + __ ret(kPointerSize); + } else { // UNTAGGED. + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + __ Ret(); + } __ bind(&cache_miss); // Update cache with new value. - Label nan_result; - GenerateOperation(masm, &nan_result); + if (tagged) { __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack); + } else { // UNTAGGED. + __ AllocateHeapNumber(rax, rdi, &skip_cache); + __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); + __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); + } + GenerateOperation(masm); __ movq(Operand(rcx, 0), rbx); __ movq(Operand(rcx, 2 * kIntSize), rax); __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); - __ ret(kPointerSize); - - __ bind(&runtime_call_clear_stack); - __ fstp(0); - __ bind(&runtime_call); - __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); + if (tagged) { + __ ret(kPointerSize); + } else { // UNTAGGED. + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + __ Ret(); + + // Skip cache and return answer directly, only in untagged case. + __ bind(&skip_cache); + __ subq(rsp, Immediate(kDoubleSize)); + __ movsd(Operand(rsp, 0), xmm1); + __ fld_d(Operand(rsp, 0)); + GenerateOperation(masm); + __ fstp_d(Operand(rsp, 0)); + __ movsd(xmm1, Operand(rsp, 0)); + __ addq(rsp, Immediate(kDoubleSize)); + // We return the value in xmm1 without adding it to the cache, but + // we cause a scavenging GC so that future allocations will succeed. + __ EnterInternalFrame(); + // Allocate an unused object bigger than a HeapNumber. + __ Push(Smi::FromInt(2 * kDoubleSize)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + __ LeaveInternalFrame(); + __ Ret(); + } - __ bind(&nan_result); - __ fstp(0); // Remove argument from FPU stack. - __ LoadRoot(rax, Heap::kNanValueRootIndex); - __ movq(Operand(rcx, 0), rbx); - __ movq(Operand(rcx, 2 * kIntSize), rax); - __ ret(kPointerSize); + // Call runtime, doing whatever allocation and cleanup is necessary. + if (tagged) { + __ bind(&runtime_call_clear_stack); + __ fstp(0); + __ bind(&runtime_call); + __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); + } else { // UNTAGGED. + __ bind(&runtime_call_clear_stack); + __ bind(&runtime_call); + __ AllocateHeapNumber(rax, rdi, &skip_cache); + __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); + __ EnterInternalFrame(); + __ push(rax); + __ CallRuntime(RuntimeFunction(), 1); + __ LeaveInternalFrame(); + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + __ Ret(); + } } @@ -1656,9 +1696,9 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { } -void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm, - Label* on_nan_result) { +void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { // Registers: + // rax: Newly allocated HeapNumber, which must be preserved. // rbx: Bits of input double. Must be preserved. // rcx: Pointer to cache entry. Must be preserved. // st(0): Input double @@ -1680,9 +1720,18 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm, __ j(below, &in_range); // Check for infinity and NaN. Both return NaN for sin. __ cmpl(rdi, Immediate(0x7ff)); - __ j(equal, on_nan_result); + NearLabel non_nan_result; + __ j(not_equal, &non_nan_result); + // Input is +/-Infinity or NaN. Result is NaN. + __ fstp(0); + __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex); + __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&non_nan_result); // Use fpmod to restrict argument to the range +/-2*PI. + __ movq(rdi, rax); // Save rax before using fnstsw_ax. __ fldpi(); __ fadd(0); __ fld(1); @@ -1715,6 +1764,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm, // FPU Stack: input % 2*pi, 2*pi, __ fstp(0); // FPU Stack: input % 2*pi + __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber. __ bind(&in_range); switch (type_) { case TranscendentalCache::SIN: @@ -1967,8 +2017,8 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { __ AbortIfSmi(rax); } - __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); - __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); + __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); __ j(not_equal, &slow); // Operand is a float, negate its value by flipping sign bit. __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); @@ -1997,8 +2047,8 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { } // Check if the operand is a heap number. - __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); - __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); + __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); __ j(not_equal, &slow); // Convert the heap number in rax to an untagged integer in rcx. @@ -2031,6 +2081,157 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { } +void MathPowStub::Generate(MacroAssembler* masm) { + // Registers are used as follows: + // rdx = base + // rax = exponent + // rcx = temporary, result + + Label allocate_return, call_runtime; + + // Load input parameters. + __ movq(rdx, Operand(rsp, 2 * kPointerSize)); + __ movq(rax, Operand(rsp, 1 * kPointerSize)); + + // Save 1 in xmm3 - we need this several times later on. + __ movl(rcx, Immediate(1)); + __ cvtlsi2sd(xmm3, rcx); + + Label exponent_nonsmi; + Label base_nonsmi; + // If the exponent is a heap number go to that specific case. + __ JumpIfNotSmi(rax, &exponent_nonsmi); + __ JumpIfNotSmi(rdx, &base_nonsmi); + + // Optimized version when both exponent and base are smis. + Label powi; + __ SmiToInteger32(rdx, rdx); + __ cvtlsi2sd(xmm0, rdx); + __ jmp(&powi); + // Exponent is a smi and base is a heapnumber. + __ bind(&base_nonsmi); + __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &call_runtime); + + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); + + // Optimized version of pow if exponent is a smi. + // xmm0 contains the base. + __ bind(&powi); + __ SmiToInteger32(rax, rax); + + // Save exponent in base as we need to check if exponent is negative later. + // We know that base and exponent are in different registers. + __ movq(rdx, rax); + + // Get absolute value of exponent. + NearLabel no_neg; + __ cmpl(rax, Immediate(0)); + __ j(greater_equal, &no_neg); + __ negl(rax); + __ bind(&no_neg); + + // Load xmm1 with 1. + __ movsd(xmm1, xmm3); + NearLabel while_true; + NearLabel no_multiply; + + __ bind(&while_true); + __ shrl(rax, Immediate(1)); + __ j(not_carry, &no_multiply); + __ mulsd(xmm1, xmm0); + __ bind(&no_multiply); + __ mulsd(xmm0, xmm0); + __ j(not_zero, &while_true); + + // Base has the original value of the exponent - if the exponent is + // negative return 1/result. + __ testl(rdx, rdx); + __ j(positive, &allocate_return); + // Special case if xmm1 has reached infinity. + __ divsd(xmm3, xmm1); + __ movsd(xmm1, xmm3); + __ xorpd(xmm0, xmm0); + __ ucomisd(xmm0, xmm1); + __ j(equal, &call_runtime); + + __ jmp(&allocate_return); + + // Exponent (or both) is a heapnumber - no matter what we should now work + // on doubles. + __ bind(&exponent_nonsmi); + __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &call_runtime); + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + // Test if exponent is nan. + __ ucomisd(xmm1, xmm1); + __ j(parity_even, &call_runtime); + + NearLabel base_not_smi; + NearLabel handle_special_cases; + __ JumpIfNotSmi(rdx, &base_not_smi); + __ SmiToInteger32(rdx, rdx); + __ cvtlsi2sd(xmm0, rdx); + __ jmp(&handle_special_cases); + + __ bind(&base_not_smi); + __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &call_runtime); + __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset)); + __ andl(rcx, Immediate(HeapNumber::kExponentMask)); + __ cmpl(rcx, Immediate(HeapNumber::kExponentMask)); + // base is NaN or +/-Infinity + __ j(greater_equal, &call_runtime); + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); + + // base is in xmm0 and exponent is in xmm1. + __ bind(&handle_special_cases); + NearLabel not_minus_half; + // Test for -0.5. + // Load xmm2 with -0.5. + __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE); + __ movq(xmm2, rcx); + // xmm2 now has -0.5. + __ ucomisd(xmm2, xmm1); + __ j(not_equal, ¬_minus_half); + + // Calculates reciprocal of square root. + // sqrtsd returns -0 when input is -0. ECMA spec requires +0. + __ xorpd(xmm1, xmm1); + __ addsd(xmm1, xmm0); + __ sqrtsd(xmm1, xmm1); + __ divsd(xmm3, xmm1); + __ movsd(xmm1, xmm3); + __ jmp(&allocate_return); + + // Test for 0.5. + __ bind(¬_minus_half); + // Load xmm2 with 0.5. + // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3. + __ addsd(xmm2, xmm3); + // xmm2 now has 0.5. + __ ucomisd(xmm2, xmm1); + __ j(not_equal, &call_runtime); + // Calculates square root. + // sqrtsd returns -0 when input is -0. ECMA spec requires +0. + __ xorpd(xmm1, xmm1); + __ addsd(xmm1, xmm0); + __ sqrtsd(xmm1, xmm1); + + __ bind(&allocate_return); + __ AllocateHeapNumber(rcx, rax, &call_runtime); + __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1); + __ movq(rax, rcx); + __ ret(2 * kPointerSize); + + __ bind(&call_runtime); + __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); +} + + void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // The key is in rdx and the parameter count is in rax. @@ -2268,46 +2469,46 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // rcx: RegExp data (FixedArray) // rdx: Number of capture registers // Check that the second argument is a string. - __ movq(rax, Operand(rsp, kSubjectOffset)); - __ JumpIfSmi(rax, &runtime); - Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); + __ movq(rdi, Operand(rsp, kSubjectOffset)); + __ JumpIfSmi(rdi, &runtime); + Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx); __ j(NegateCondition(is_string), &runtime); - // rax: Subject string. - // rcx: RegExp data (FixedArray). + // rdi: Subject string. + // rax: RegExp data (FixedArray). // rdx: Number of capture registers. // Check that the third argument is a positive smi less than the string // length. A negative value will be greater (unsigned comparison). __ movq(rbx, Operand(rsp, kPreviousIndexOffset)); __ JumpIfNotSmi(rbx, &runtime); - __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset)); + __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset)); __ j(above_equal, &runtime); - // rcx: RegExp data (FixedArray) + // rax: RegExp data (FixedArray) // rdx: Number of capture registers // Check that the fourth object is a JSArray object. - __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); - __ JumpIfSmi(rax, &runtime); - __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister); + __ movq(rdi, Operand(rsp, kLastMatchInfoOffset)); + __ JumpIfSmi(rdi, &runtime); + __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister); __ j(not_equal, &runtime); // Check that the JSArray is in fast case. - __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); - __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset)); - __ Cmp(rax, Factory::fixed_array_map()); + __ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset)); + __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset)); + __ Cmp(rdi, Factory::fixed_array_map()); __ j(not_equal, &runtime); // Check that the last match info has space for the capture registers and the // additional information. Ensure no overflow in add. STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); - __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); + __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset)); __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); - __ cmpl(rdx, rax); + __ cmpl(rdx, rdi); __ j(greater, &runtime); - // rcx: RegExp data (FixedArray) + // rax: RegExp data (FixedArray) // Check the representation and encoding of the subject string. NearLabel seq_ascii_string, seq_two_byte_string, check_code; - __ movq(rax, Operand(rsp, kSubjectOffset)); - __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); + __ movq(rdi, Operand(rsp, kSubjectOffset)); + __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset)); __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); // First check for flat two byte string. __ andb(rbx, Immediate( @@ -2328,13 +2529,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag)); __ j(not_zero, &runtime); // String is a cons string. - __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset)); + __ movq(rdx, FieldOperand(rdi, ConsString::kSecondOffset)); __ Cmp(rdx, Factory::empty_string()); __ j(not_equal, &runtime); - __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset)); - __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); + __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset)); + __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset)); // String is a cons string with empty second part. - // rax: first part of cons string. + // rdi: first part of cons string. // rbx: map of first part of cons string. // Is first part a flat two byte string? __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset), @@ -2347,17 +2548,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ j(not_zero, &runtime); __ bind(&seq_ascii_string); - // rax: subject string (sequential ascii) - // rcx: RegExp data (FixedArray) - __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset)); - __ Set(rdi, 1); // Type is ascii. + // rdi: subject string (sequential ascii) + // rax: RegExp data (FixedArray) + __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset)); + __ Set(rcx, 1); // Type is ascii. __ jmp(&check_code); __ bind(&seq_two_byte_string); - // rax: subject string (flat two-byte) - // rcx: RegExp data (FixedArray) - __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset)); - __ Set(rdi, 0); // Type is two byte. + // rdi: subject string (flat two-byte) + // rax: RegExp data (FixedArray) + __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset)); + __ Set(rcx, 0); // Type is two byte. __ bind(&check_code); // Check that the irregexp code has been generated for the actual string @@ -2366,27 +2567,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ CmpObjectType(r11, CODE_TYPE, kScratchRegister); __ j(not_equal, &runtime); - // rax: subject string - // rdi: encoding of subject string (1 if ascii, 0 if two_byte); + // rdi: subject string + // rcx: encoding of subject string (1 if ascii, 0 if two_byte); // r11: code // Load used arguments before starting to push arguments for call to native // RegExp code to avoid handling changing stack height. __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset)); - // rax: subject string + // rdi: subject string // rbx: previous index - // rdi: encoding of subject string (1 if ascii 0 if two_byte); + // rcx: encoding of subject string (1 if ascii 0 if two_byte); // r11: code // All checks done. Now push arguments for native regexp code. __ IncrementCounter(&Counters::regexp_entry_native, 1); - // rsi is caller save on Windows and used to pass parameter on Linux. - __ push(rsi); - static const int kRegExpExecuteArguments = 7; - __ PrepareCallCFunction(kRegExpExecuteArguments); int argument_slots_on_stack = masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments); + __ EnterApiExitFrame(argument_slots_on_stack); // Clobbers rax! // Argument 7: Indicate that this is a direct call from JavaScript. __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), @@ -2423,60 +2621,57 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { #endif // Keep track on aliasing between argX defined above and the registers used. - // rax: subject string + // rdi: subject string // rbx: previous index - // rdi: encoding of subject string (1 if ascii 0 if two_byte); + // rcx: encoding of subject string (1 if ascii 0 if two_byte); // r11: code // Argument 4: End of string data // Argument 3: Start of string data NearLabel setup_two_byte, setup_rest; - __ testb(rdi, rdi); + __ testb(rcx, rcx); // Last use of rcx as encoding of subject string. __ j(zero, &setup_two_byte); - __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset)); - __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize)); - __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize)); + __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset)); + __ lea(arg4, FieldOperand(rdi, rcx, times_1, SeqAsciiString::kHeaderSize)); + __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize)); __ jmp(&setup_rest); __ bind(&setup_two_byte); - __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset)); - __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize)); - __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize)); + __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset)); + __ lea(arg4, FieldOperand(rdi, rcx, times_2, SeqTwoByteString::kHeaderSize)); + __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize)); __ bind(&setup_rest); // Argument 2: Previous index. __ movq(arg2, rbx); // Argument 1: Subject string. - __ movq(arg1, rax); +#ifdef _WIN64 + __ movq(arg1, rdi); +#else + // Already there in AMD64 calling convention. + ASSERT(arg1.is(rdi)); +#endif // Locate the code entry and call it. __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(r11, kRegExpExecuteArguments); + __ call(r11); - // rsi is caller save, as it is used to pass parameter. - __ pop(rsi); + __ LeaveApiExitFrame(); // Check the result. NearLabel success; + Label exception; __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS)); __ j(equal, &success); - NearLabel failure; - __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE)); - __ j(equal, &failure); __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION)); - // If not exception it can only be retry. Handle that in the runtime system. + __ j(equal, &exception); + __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE)); + // If none of the above, it can only be retry. + // Handle that in the runtime system. __ j(not_equal, &runtime); - // Result must now be exception. If there is no pending exception already a - // stack overflow (on the backtrack stack) was detected in RegExp code but - // haven't created the exception yet. Handle that in the runtime system. - // TODO(592): Rerunning the RegExp to get the stack overflow exception. - ExternalReference pending_exception_address(Top::k_pending_exception_address); - __ movq(kScratchRegister, pending_exception_address); - __ Cmp(kScratchRegister, Factory::the_hole_value()); - __ j(equal, &runtime); - __ bind(&failure); - // For failure and exception return null. - __ Move(rax, Factory::null_value()); + + // For failure return null. + __ LoadRoot(rax, Heap::kNullValueRootIndex); __ ret(4 * kPointerSize); // Load RegExp data. @@ -2537,6 +2732,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); __ ret(4 * kPointerSize); + __ bind(&exception); + // Result must now be exception. If there is no pending exception already a + // stack overflow (on the backtrack stack) was detected in RegExp code but + // haven't created the exception yet. Handle that in the runtime system. + // TODO(592): Rerunning the RegExp to get the stack overflow exception. + ExternalReference pending_exception_address(Top::k_pending_exception_address); + __ movq(rbx, pending_exception_address); + __ movq(rax, Operand(rbx, 0)); + __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex); + __ cmpq(rax, rdx); + __ j(equal, &runtime); + __ movq(Operand(rbx, 0), rdx); + + __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex); + NearLabel termination_exception; + __ j(equal, &termination_exception); + __ Throw(rax); + + __ bind(&termination_exception); + __ ThrowUncatchable(TERMINATION, rax); + // Do the runtime call to execute the regexp. __ bind(&runtime); __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); @@ -3085,31 +3301,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // Check that stack should contain next handler, frame pointer, state and - // return address in that order. - STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize == - StackHandlerConstants::kStateOffset); - STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize == - StackHandlerConstants::kPCOffset); - - ExternalReference handler_address(Top::k_handler_address); - __ movq(kScratchRegister, handler_address); - __ movq(rsp, Operand(kScratchRegister, 0)); - // get next in chain - __ pop(rcx); - __ movq(Operand(kScratchRegister, 0), rcx); - __ pop(rbp); // pop frame pointer - __ pop(rdx); // remove state - - // Before returning we restore the context from the frame pointer if not NULL. - // The frame pointer is NULL in the exception handler of a JS entry frame. - __ Set(rsi, 0); // Tentatively set context pointer to NULL - NearLabel skip; - __ cmpq(rbp, Immediate(0)); - __ j(equal, &skip); - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - __ bind(&skip); - __ ret(0); + // Throw exception in eax. + __ Throw(rax); } @@ -3251,54 +3444,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, UncatchableExceptionType type) { - // Fetch top stack handler. - ExternalReference handler_address(Top::k_handler_address); - __ movq(kScratchRegister, handler_address); - __ movq(rsp, Operand(kScratchRegister, 0)); - - // Unwind the handlers until the ENTRY handler is found. - NearLabel loop, done; - __ bind(&loop); - // Load the type of the current stack handler. - const int kStateOffset = StackHandlerConstants::kStateOffset; - __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY)); - __ j(equal, &done); - // Fetch the next handler in the list. - const int kNextOffset = StackHandlerConstants::kNextOffset; - __ movq(rsp, Operand(rsp, kNextOffset)); - __ jmp(&loop); - __ bind(&done); - - // Set the top handler address to next handler past the current ENTRY handler. - __ movq(kScratchRegister, handler_address); - __ pop(Operand(kScratchRegister, 0)); - - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Top::k_external_caught_exception_address); - __ movq(rax, Immediate(false)); - __ store_rax(external_caught); - - // Set pending exception and rax to out of memory exception. - ExternalReference pending_exception(Top::k_pending_exception_address); - __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE); - __ store_rax(pending_exception); - } - - // Clear the context pointer. - __ Set(rsi, 0); - - // Restore registers from handler. - STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize == - StackHandlerConstants::kFPOffset); - __ pop(rbp); // FP - STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize == - StackHandlerConstants::kStateOffset); - __ pop(rdx); // State - - STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize == - StackHandlerConstants::kPCOffset); - __ ret(0); + __ ThrowUncatchable(type, rax); } @@ -3415,8 +3561,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Set up the roots and smi constant registers. // Needs to be done before any further smi loads. - ExternalReference roots_address = ExternalReference::roots_address(); - __ movq(kRootRegister, roots_address); + __ InitializeRootRegister(); __ InitializeSmiConstantRegister(); #ifdef ENABLE_LOGGING_AND_PROFILING @@ -3516,6 +3661,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // is and instance of the function and anything else to // indicate that the value is not an instance. + // None of the flags are supported on X64. + ASSERT(flags_ == kNoFlags); + // Get the object - go slow case if it's a smi. Label slow; __ movq(rax, Operand(rsp, 2 * kPointerSize)); @@ -3591,10 +3739,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } -Register InstanceofStub::left() { return rax; } +// Passing arguments in registers is not supported. +Register InstanceofStub::left() { return no_reg; } -Register InstanceofStub::right() { return rdx; } +Register InstanceofStub::right() { return no_reg; } int CompareStub::MinorKey() { @@ -3853,14 +4002,15 @@ void StringCharAtGenerator::GenerateSlow( void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; + Label string_add_runtime, call_builtin; + Builtins::JavaScript builtin_id = Builtins::ADD; // Load the two arguments. - __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument. - __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument. + __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left). + __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right). // Make sure that both arguments are strings if not known in advance. - if (string_check_) { + if (flags_ == NO_STRING_ADD_FLAGS) { Condition is_smi; is_smi = masm->CheckSmi(rax); __ j(is_smi, &string_add_runtime); @@ -3872,6 +4022,20 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ j(is_smi, &string_add_runtime); __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9); __ j(above_equal, &string_add_runtime); + } else { + // Here at least one of the arguments is definitely a string. + // We convert the one that is not known to be a string. + if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); + GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi, + &call_builtin); + builtin_id = Builtins::STRING_ADD_RIGHT; + } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); + GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi, + &call_builtin); + builtin_id = Builtins::STRING_ADD_LEFT; + } } // Both arguments are strings. @@ -3899,14 +4063,14 @@ void StringAddStub::Generate(MacroAssembler* masm) { // rbx: length of first string // rcx: length of second string // rdx: second string - // r8: map of first string if string check was performed above - // r9: map of second string if string check was performed above + // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS) + // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS) Label string_add_flat_result, longer_than_two; __ bind(&both_not_zero_length); // If arguments where known to be strings, maps are not loaded to r8 and r9 // by the code above. - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); } @@ -4092,6 +4256,54 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&string_add_runtime); __ TailCallRuntime(Runtime::kStringAdd, 2, 1); + + if (call_builtin.is_linked()) { + __ bind(&call_builtin); + __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); + } +} + + +void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* slow) { + // First check if the argument is already a string. + Label not_string, done; + __ JumpIfSmi(arg, ¬_string); + __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1); + __ j(below, &done); + + // Check the number to string cache. + Label not_cached; + __ bind(¬_string); + // Puts the cached result into scratch1. + NumberToStringStub::GenerateLookupNumberStringCache(masm, + arg, + scratch1, + scratch2, + scratch3, + false, + ¬_cached); + __ movq(arg, scratch1); + __ movq(Operand(rsp, stack_offset), arg); + __ jmp(&done); + + // Check if the argument is a safe string wrapper. + __ bind(¬_cached); + __ JumpIfSmi(arg, slow); + __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1. + __ j(not_equal, slow); + __ testb(FieldOperand(scratch1, Map::kBitField2Offset), + Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ j(zero, slow); + __ movq(arg, FieldOperand(arg, JSValue::kValueOffset)); + __ movq(Operand(rsp, stack_offset), arg); + + __ bind(&done); } @@ -4620,6 +4832,61 @@ void StringCompareStub::Generate(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } + +void StringCharAtStub::Generate(MacroAssembler* masm) { + // Expects two arguments (object, index) on the stack: + + // Stack frame on entry. + // rsp[0]: return address + // rsp[8]: index + // rsp[16]: object + + Register object = rbx; + Register index = rax; + Register scratch1 = rcx; + Register scratch2 = rdx; + Register result = rax; + + __ pop(scratch1); // Return address. + __ pop(index); + __ pop(object); + __ push(scratch1); + + Label need_conversion; + Label index_out_of_range; + Label done; + StringCharAtGenerator generator(object, + index, + scratch1, + scratch2, + result, + &need_conversion, + &need_conversion, + &index_out_of_range, + STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm); + __ jmp(&done); + + __ bind(&index_out_of_range); + // When the index is out of range, the spec requires us to return + // the empty string. + __ Move(result, Factory::empty_string()); + __ jmp(&done); + + __ bind(&need_conversion); + // Move smi zero into the result register, which will trigger + // conversion. + __ Move(result, Smi::FromInt(0)); + __ jmp(&done); + + StubRuntimeCallHelper call_helper; + generator.GenerateSlow(masm, call_helper); + + __ bind(&done); + __ ret(0); +} + + void ICCompareStub::GenerateSmis(MacroAssembler* masm) { ASSERT(state_ == CompareIC::SMIS); NearLabel miss; @@ -4767,9 +5034,19 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, } __ SmiToInteger32(untagged_key, key); - // Verify that the receiver has pixel array elements. __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset)); - __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true); + // By passing NULL as not_pixel_array, callers signal that they have already + // verified that the receiver has pixel array elements. + if (not_pixel_array != NULL) { + __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true); + } else { + if (FLAG_debug_code) { + // Map check should have already made sure that elements is a pixel array. + __ Cmp(FieldOperand(elements, HeapObject::kMapOffset), + Factory::pixel_array_map()); + __ Assert(equal, "Elements isn't a pixel array"); + } + } // Check that the smi is in range. __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset)); @@ -4783,6 +5060,88 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, } +// Stores an indexed element into a pixel array, clamping the stored value. +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register scratch1, + bool load_elements_from_receiver, + bool key_is_untagged, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range) { + // Register use: + // receiver - holds the receiver and is unchanged. + // key - holds the key (must be a smi) and is unchanged. + // value - holds the value (must be a smi) and is unchanged. + // elements - holds the element object of the receiver on entry if + // load_elements_from_receiver is false, otherwise used + // internally to store the pixel arrays elements and + // external array pointer. + // + Register external_pointer = elements; + Register untagged_key = scratch1; + Register untagged_value = receiver; // Only set once success guaranteed. + + // Fetch the receiver's elements if the caller hasn't already done so. + if (load_elements_from_receiver) { + __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset)); + } + + // By passing NULL as not_pixel_array, callers signal that they have already + // verified that the receiver has pixel array elements. + if (not_pixel_array != NULL) { + __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true); + } else { + if (FLAG_debug_code) { + // Map check should have already made sure that elements is a pixel array. + __ Cmp(FieldOperand(elements, HeapObject::kMapOffset), + Factory::pixel_array_map()); + __ Assert(equal, "Elements isn't a pixel array"); + } + } + + // Key must be a smi and it must be in range. + if (key_is_untagged) { + untagged_key = key; + } else { + // Some callers already have verified that the key is a smi. key_not_smi is + // set to NULL as a sentinel for that case. Otherwise, add an explicit + // check to ensure the key is a smi. + if (key_not_smi != NULL) { + __ JumpIfNotSmi(key, key_not_smi); + } else { + if (FLAG_debug_code) { + __ AbortIfNotSmi(key); + } + } + __ SmiToInteger32(untagged_key, key); + } + __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset)); + __ j(above_equal, out_of_range); // unsigned check handles negative keys. + + // Value must be a smi. + __ JumpIfNotSmi(value, value_not_smi); + __ SmiToInteger32(untagged_value, value); + + { // Clamp the value to [0..255]. + NearLabel done; + __ testl(untagged_value, Immediate(0xFFFFFF00)); + __ j(zero, &done); + __ setcc(negative, untagged_value); // 1 if negative, 0 if positive. + __ decb(untagged_value); // 0 if negative, 255 if positive. + __ bind(&done); + } + + __ movq(external_pointer, + FieldOperand(elements, PixelArray::kExternalPointerOffset)); + __ movb(Operand(external_pointer, untagged_key, times_1, 0), untagged_value); + __ ret(0); // Return value in eax. +} + #undef __ } } // namespace v8::internal diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h index 8051d4bd..32a37b21 100644 --- a/src/x64/code-stubs-x64.h +++ b/src/x64/code-stubs-x64.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -39,15 +39,23 @@ namespace internal { // TranscendentalCache runtime function. class TranscendentalCacheStub: public CodeStub { public: - explicit TranscendentalCacheStub(TranscendentalCache::Type type) - : type_(type) {} + enum ArgumentType { + TAGGED = 0, + UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits + }; + + explicit TranscendentalCacheStub(TranscendentalCache::Type type, + ArgumentType argument_type) + : type_(type), argument_type_(argument_type) {} void Generate(MacroAssembler* masm); private: TranscendentalCache::Type type_; + ArgumentType argument_type_; + Major MajorKey() { return TranscendentalCache; } - int MinorKey() { return type_; } + int MinorKey() { return type_ | argument_type_; } Runtime::FunctionId RuntimeFunction(); - void GenerateOperation(MacroAssembler* masm, Label* on_nan_result); + void GenerateOperation(MacroAssembler* masm); }; @@ -360,24 +368,35 @@ class StringHelper : public AllStatic { // Flag that indicates how to generate code for the stub StringAddStub. enum StringAddFlags { NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. + // Omit left string check in stub (left is definitely a string). + NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0, + // Omit right string check in stub (right is definitely a string). + NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1, + // Omit both string checks in stub. + NO_STRING_CHECK_IN_STUB = + NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB }; class StringAddStub: public CodeStub { public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } + explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} private: Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } + int MinorKey() { return flags_; } void Generate(MacroAssembler* masm); - // Should the stub check whether arguments are strings? - bool string_check_; + void GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* slow); + + const StringAddFlags flags_; }; @@ -452,14 +471,14 @@ class NumberToStringStub: public CodeStub { }; -// Generate code the to load an element from a pixel array. The receiver is -// assumed to not be a smi and to have elements, the caller must guarantee this -// precondition. If the receiver does not have elements that are pixel arrays, -// the generated code jumps to not_pixel_array. If key is not a smi, then the -// generated code branches to key_not_smi. Callers can specify NULL for -// key_not_smi to signal that a smi check has already been performed on key so -// that the smi check is not generated . If key is not a valid index within the -// bounds of the pixel array, the generated code jumps to out_of_range. +// Generate code to load an element from a pixel array. The receiver is assumed +// to not be a smi and to have elements, the caller must guarantee this +// precondition. If key is not a smi, then the generated code branches to +// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi +// check has already been performed on key so that the smi check is not +// generated. If key is not a valid index within the bounds of the pixel array, +// the generated code jumps to out_of_range. receiver, key and elements are +// unchanged throughout the generated code sequence. void GenerateFastPixelArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -470,6 +489,30 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, Label* key_not_smi, Label* out_of_range); +// Generate code to store an element into a pixel array, clamping values between +// [0..255]. The receiver is assumed to not be a smi and to have elements, the +// caller must guarantee this precondition. If key is not a smi, then the +// generated code branches to key_not_smi. Callers can specify NULL for +// key_not_smi to signal that a smi check has already been performed on key so +// that the smi check is not generated. If the value is not a smi, the +// generated code will branch to value_not_smi. If the receiver +// doesn't have pixel array elements, the generated code will branch to +// not_pixel_array, unless not_pixel_array is NULL, in which case the caller +// must ensure that the receiver has pixel array elements. If key is not a +// valid index within the bounds of the pixel array, the generated code jumps to +// out_of_range. +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register scratch1, + bool load_elements_from_receiver, + bool key_is_untagged, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range); } } // namespace v8::internal diff --git a/src/x64/codegen-x64-inl.h b/src/x64/codegen-x64-inl.h index 60e9ab03..53caf919 100644 --- a/src/x64/codegen-x64-inl.h +++ b/src/x64/codegen-x64-inl.h @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index fe905670..fc4bc04e 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -2747,7 +2747,8 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { frame_->EmitPush(rsi); // The context is the first argument. frame_->EmitPush(kScratchRegister); frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0)); - Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3); + frame_->EmitPush(Smi::FromInt(strict_mode_flag())); + Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4); // Return value is ignored. } @@ -4605,7 +4606,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { // by initialization. value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); } else { - value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3); + frame_->Push(Smi::FromInt(strict_mode_flag())); + value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4); } // Storing a variable must keep the (new) value on the expression // stack. This is necessary for compiling chained assignment @@ -4914,8 +4916,9 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { Load(property->key()); Load(property->value()); if (property->emit_store()) { + frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes // Ignore the result. - Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3); + Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4); } else { frame_->Drop(3); } @@ -7030,7 +7033,8 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) { ASSERT_EQ(args->length(), 1); Load(args->at(0)); - TranscendentalCacheStub stub(TranscendentalCache::SIN); + TranscendentalCacheStub stub(TranscendentalCache::SIN, + TranscendentalCacheStub::TAGGED); Result result = frame_->CallStub(&stub, 1); frame_->Push(&result); } @@ -7039,7 +7043,8 @@ void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) { void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) { ASSERT_EQ(args->length(), 1); Load(args->at(0)); - TranscendentalCacheStub stub(TranscendentalCache::COS); + TranscendentalCacheStub stub(TranscendentalCache::COS, + TranscendentalCacheStub::TAGGED); Result result = frame_->CallStub(&stub, 1); frame_->Push(&result); } @@ -7048,7 +7053,8 @@ void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) { void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) { ASSERT_EQ(args->length(), 1); Load(args->at(0)); - TranscendentalCacheStub stub(TranscendentalCache::LOG); + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::TAGGED); Result result = frame_->CallStub(&stub, 1); frame_->Push(&result); } @@ -7230,21 +7236,25 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { if (property != NULL) { Load(property->obj()); Load(property->key()); - Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2); + frame_->Push(Smi::FromInt(strict_mode_flag())); + Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3); frame_->Push(&answer); return; } Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); if (variable != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this()); Slot* slot = variable->AsSlot(); if (variable->is_global()) { LoadGlobal(); frame_->Push(variable->name()); + frame_->Push(Smi::FromInt(kNonStrictMode)); Result answer = frame_->InvokeBuiltin(Builtins::DELETE, - CALL_FUNCTION, 2); + CALL_FUNCTION, 3); frame_->Push(&answer); - return; } else if (slot != NULL && slot->type() == Slot::LOOKUP) { // Call the runtime to delete from the context holding the named @@ -7255,13 +7265,11 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { frame_->EmitPush(variable->name()); Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2); frame_->Push(&answer); - return; + } else { + // Default: Result of deleting non-global, not dynamically + // introduced variables is false. + frame_->Push(Factory::false_value()); } - - // Default: Result of deleting non-global, not dynamically - // introduced variables is false. - frame_->Push(Factory::false_value()); - } else { // Default: Result of deleting expressions is true. Load(node->expression()); // may have side-effects @@ -8070,8 +8078,12 @@ class DeferredReferenceSetKeyedValue: public DeferredCode { public: DeferredReferenceSetKeyedValue(Register value, Register key, - Register receiver) - : value_(value), key_(key), receiver_(receiver) { + Register receiver, + StrictModeFlag strict_mode) + : value_(value), + key_(key), + receiver_(receiver), + strict_mode_(strict_mode) { set_comment("[ DeferredReferenceSetKeyedValue"); } @@ -8084,6 +8096,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode { Register key_; Register receiver_; Label patch_site_; + StrictModeFlag strict_mode_; }; @@ -8135,7 +8148,9 @@ void DeferredReferenceSetKeyedValue::Generate() { } // Call the IC stub. - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // The delta from the start of the map-compare instructions (initial movq) // to the test instruction. We use masm_-> directly here instead of the @@ -8202,7 +8217,8 @@ Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { // This is the map check instruction that will be patched (so we can't // use the double underscore macro that may insert instructions). // Initially use an invalid map to force a failure. - masm()->Move(kScratchRegister, Factory::null_value()); + masm()->movq(kScratchRegister, Factory::null_value(), + RelocInfo::EMBEDDED_OBJECT); masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), kScratchRegister); // This branch is always a forwards branch so it's always a fixed @@ -8278,7 +8294,8 @@ Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) { // the __ macro for the following two instructions because it // might introduce extra instructions. __ bind(&patch_site); - masm()->Move(kScratchRegister, Factory::null_value()); + masm()->movq(kScratchRegister, Factory::null_value(), + RelocInfo::EMBEDDED_OBJECT); masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), kScratchRegister); // This branch is always a forwards branch so it's always a fixed size @@ -8476,7 +8493,8 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) { DeferredReferenceSetKeyedValue* deferred = new DeferredReferenceSetKeyedValue(result.reg(), key.reg(), - receiver.reg()); + receiver.reg(), + strict_mode_flag()); // Check that the receiver is not a smi. __ JumpIfSmi(receiver.reg(), deferred->entry_label()); @@ -8538,7 +8556,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) { deferred->BindExit(); } else { - result = frame()->CallKeyedStoreIC(); + result = frame()->CallKeyedStoreIC(strict_mode_flag()); // Make sure that we do not have a test instruction after the // call. A test instruction after the call is used to // indicate that we have generated an inline version of the diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h index c283db3a..43928291 100644 --- a/src/x64/codegen-x64.h +++ b/src/x64/codegen-x64.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc index 513c5228..3ff292e8 100644 --- a/src/x64/cpu-x64.cc +++ b/src/x64/cpu-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc index 4218647f..2c50ddd1 100644 --- a/src/x64/debug-x64.cc +++ b/src/x64/debug-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc index ed6c47bf..daa91280 100644 --- a/src/x64/deoptimizer-x64.cc +++ b/src/x64/deoptimizer-x64.cc @@ -203,19 +203,196 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { - UNIMPLEMENTED(); + Address call_target_address = pc_after - kIntSize; + ASSERT(check_code->entry() == + Assembler::target_address_at(call_target_address)); + // The stack check code matches the pattern: + // + // cmp rsp, <limit> + // jae ok + // call <stack guard> + // test rax, <loop nesting depth> + // ok: ... + // + // We will patch away the branch so the code is: + // + // cmp rsp, <limit> ;; Not changed + // nop + // nop + // call <on-stack replacment> + // test rax, <loop nesting depth> + // ok: + // + ASSERT(*(call_target_address - 3) == 0x73 && // jae + *(call_target_address - 2) == 0x07 && // offset + *(call_target_address - 1) == 0xe8); // call + *(call_target_address - 3) = 0x90; // nop + *(call_target_address - 2) = 0x90; // nop + Assembler::set_target_address_at(call_target_address, + replacement_code->entry()); } void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { - UNIMPLEMENTED(); + Address call_target_address = pc_after - kIntSize; + ASSERT(replacement_code->entry() == + Assembler::target_address_at(call_target_address)); + // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to + // restore the conditional branch. + ASSERT(*(call_target_address - 3) == 0x90 && // nop + *(call_target_address - 2) == 0x90 && // nop + *(call_target_address - 1) == 0xe8); // call + *(call_target_address - 3) = 0x73; // jae + *(call_target_address - 2) = 0x07; // offset + Assembler::set_target_address_at(call_target_address, + check_code->entry()); +} + + +static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) { + ByteArray* translations = data->TranslationByteArray(); + int length = data->DeoptCount(); + for (int i = 0; i < length; i++) { + if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) { + TranslationIterator it(translations, data->TranslationIndex(i)->value()); + int value = it.Next(); + ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value)); + // Read the number of frames. + value = it.Next(); + if (value == 1) return i; + } + } + UNREACHABLE(); + return -1; } void Deoptimizer::DoComputeOsrOutputFrame() { - UNIMPLEMENTED(); + DeoptimizationInputData* data = DeoptimizationInputData::cast( + optimized_code_->deoptimization_data()); + unsigned ast_id = data->OsrAstId()->value(); + // TODO(kasperl): This should not be the bailout_id_. It should be + // the ast id. Confusing. + ASSERT(bailout_id_ == ast_id); + + int bailout_id = LookupBailoutId(data, ast_id); + unsigned translation_index = data->TranslationIndex(bailout_id)->value(); + ByteArray* translations = data->TranslationByteArray(); + + TranslationIterator iterator(translations, translation_index); + Translation::Opcode opcode = + static_cast<Translation::Opcode>(iterator.Next()); + ASSERT(Translation::BEGIN == opcode); + USE(opcode); + int count = iterator.Next(); + ASSERT(count == 1); + USE(count); + + opcode = static_cast<Translation::Opcode>(iterator.Next()); + USE(opcode); + ASSERT(Translation::FRAME == opcode); + unsigned node_id = iterator.Next(); + USE(node_id); + ASSERT(node_id == ast_id); + JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next())); + USE(function); + ASSERT(function == function_); + unsigned height = iterator.Next(); + unsigned height_in_bytes = height * kPointerSize; + USE(height_in_bytes); + + unsigned fixed_size = ComputeFixedSize(function_); + unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize()); + ASSERT(fixed_size + height_in_bytes == input_frame_size); + + unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize; + unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value(); + unsigned outgoing_size = outgoing_height * kPointerSize; + unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size; + ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call. + + if (FLAG_trace_osr) { + PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ", + reinterpret_cast<intptr_t>(function_)); + function_->PrintName(); + PrintF(" => node=%u, frame=%d->%d]\n", + ast_id, + input_frame_size, + output_frame_size); + } + + // There's only one output frame in the OSR case. + output_count_ = 1; + output_ = new FrameDescription*[1]; + output_[0] = new(output_frame_size) FrameDescription( + output_frame_size, function_); + + // Clear the incoming parameters in the optimized frame to avoid + // confusing the garbage collector. + unsigned output_offset = output_frame_size - kPointerSize; + int parameter_count = function_->shared()->formal_parameter_count() + 1; + for (int i = 0; i < parameter_count; ++i) { + output_[0]->SetFrameSlot(output_offset, 0); + output_offset -= kPointerSize; + } + + // Translate the incoming parameters. This may overwrite some of the + // incoming argument slots we've just cleared. + int input_offset = input_frame_size - kPointerSize; + bool ok = true; + int limit = input_offset - (parameter_count * kPointerSize); + while (ok && input_offset > limit) { + ok = DoOsrTranslateCommand(&iterator, &input_offset); + } + + // There are no translation commands for the caller's pc and fp, the + // context, and the function. Set them up explicitly. + for (int i = 0; ok && i < 4; i++) { + intptr_t input_value = input_->GetFrameSlot(input_offset); + if (FLAG_trace_osr) { + PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] (fixed part)\n", + output_offset, + input_value, + input_offset); + } + output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset)); + input_offset -= kPointerSize; + output_offset -= kPointerSize; + } + + // Translate the rest of the frame. + while (ok && input_offset >= 0) { + ok = DoOsrTranslateCommand(&iterator, &input_offset); + } + + // If translation of any command failed, continue using the input frame. + if (!ok) { + delete output_[0]; + output_[0] = input_; + output_[0]->SetPc(reinterpret_cast<intptr_t>(from_)); + } else { + // Setup the frame pointer and the context pointer. + output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code())); + output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code())); + + unsigned pc_offset = data->OsrPcOffset()->value(); + intptr_t pc = reinterpret_cast<intptr_t>( + optimized_code_->entry() + pc_offset); + output_[0]->SetPc(pc); + } + Code* continuation = Builtins::builtin(Builtins::NotifyOSR); + output_[0]->SetContinuation( + reinterpret_cast<intptr_t>(continuation->entry())); + + if (FLAG_trace_osr) { + PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ", + ok ? "finished" : "aborted", + reinterpret_cast<intptr_t>(function)); + function->PrintName(); + PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc()); + } } @@ -321,14 +498,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, fp_value, output_offset, value); } - // The context can be gotten from the function so long as we don't - // optimize functions that need local contexts. + // For the bottommost output frame the context can be gotten from the input + // frame. For all subsequent output frames it can be gotten from the function + // so long as we don't inline functions that need local contexts. output_offset -= kPointerSize; input_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(function->context()); - // The context for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); + if (is_bottommost) { + value = input_->GetFrameSlot(input_offset); + } else { + value = reinterpret_cast<intptr_t>(function->context()); + } output_frame->SetFrameSlot(output_offset, value); if (is_topmost) output_frame->SetRegister(rsi.code(), value); if (FLAG_trace_deopt) { @@ -461,7 +640,7 @@ void Deoptimizer::EntryGenerator::Generate() { // On windows put the argument on the stack (PrepareCallCFunction have // created space for this). On linux pass the argument in r8. #ifdef _WIN64 - __ movq(Operand(rsp, 0 * kPointerSize), arg5); + __ movq(Operand(rsp, 4 * kPointerSize), arg5); #else __ movq(r8, arg5); #endif @@ -570,11 +749,8 @@ void Deoptimizer::EntryGenerator::Generate() { // Set up the roots register. ExternalReference roots_address = ExternalReference::roots_address(); - __ movq(r13, roots_address); - - __ movq(kSmiConstantRegister, - reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), - RelocInfo::NONE); + __ InitializeRootRegister(); + __ InitializeSmiConstantRegister(); // Return to the continuation point. __ ret(0); diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc index f73f9484..21a100f5 100644 --- a/src/x64/disasm-x64.cc +++ b/src/x64/disasm-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -1040,14 +1040,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { AppendToBuffer(", %s", NameOfXMMRegister(regop)); } else { const char* mnemonic = "?"; - if (opcode == 0x57) { + if (opcode == 0x50) { + mnemonic = "movmskpd"; + } else if (opcode == 0x54) { + mnemonic = "andpd"; + } else if (opcode == 0x56) { + mnemonic = "orpd"; + } else if (opcode == 0x57) { mnemonic = "xorpd"; } else if (opcode == 0x2E) { mnemonic = "ucomisd"; } else if (opcode == 0x2F) { mnemonic = "comisd"; - } else if (opcode == 0x50) { - mnemonic = "movmskpd"; } else { UnimplementedInstruction(); } diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc index 9c960478..6c58bc9e 100644 --- a/src/x64/frames-x64.cc +++ b/src/x64/frames-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h index 998b3e9f..81be8191 100644 --- a/src/x64/frames-x64.h +++ b/src/x64/frames-x64.h @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index 556ec852..780f4b02 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -207,43 +207,45 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { Move(dot_arguments_slot, rcx, rbx, rdx); } - { Comment cmnt(masm_, "[ Declarations"); - // For named function expressions, declare the function name as a - // constant. - if (scope()->is_function_scope() && scope()->function() != NULL) { - EmitDeclaration(scope()->function(), Variable::CONST, NULL); - } - // Visit all the explicit declarations unless there is an illegal - // redeclaration. - if (scope()->HasIllegalRedeclaration()) { - scope()->VisitIllegalRedeclaration(this); - } else { - VisitDeclarations(scope()->declarations()); - } - } - if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); } - { Comment cmnt(masm_, "[ Stack check"); - PrepareForBailout(info->function(), NO_REGISTERS); - NearLabel ok; - __ CompareRoot(rsp, Heap::kStackLimitRootIndex); - __ j(above_equal, &ok); - StackCheckStub stub; - __ CallStub(&stub); - __ bind(&ok); - } + // Visit the declarations and body unless there is an illegal + // redeclaration. + if (scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ Declarations"); + scope()->VisitIllegalRedeclaration(this); + } else { + { Comment cmnt(masm_, "[ Declarations"); + // For named function expressions, declare the function name as a + // constant. + if (scope()->is_function_scope() && scope()->function() != NULL) { + EmitDeclaration(scope()->function(), Variable::CONST, NULL); + } + VisitDeclarations(scope()->declarations()); + } - { Comment cmnt(masm_, "[ Body"); - ASSERT(loop_depth() == 0); - VisitStatements(function()->body()); - ASSERT(loop_depth() == 0); + { Comment cmnt(masm_, "[ Stack check"); + PrepareForBailout(info->function(), NO_REGISTERS); + NearLabel ok; + __ CompareRoot(rsp, Heap::kStackLimitRootIndex); + __ j(above_equal, &ok); + StackCheckStub stub; + __ CallStub(&stub); + __ bind(&ok); + } + + { Comment cmnt(masm_, "[ Body"); + ASSERT(loop_depth() == 0); + VisitStatements(function()->body()); + ASSERT(loop_depth() == 0); + } } + // Always emit a 'return undefined' in case control fell off the end of + // the body. { Comment cmnt(masm_, "[ return <undefined>;"); - // Emit a 'return undefined' in case control fell off the end of the body. __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); EmitReturnSequence(); } @@ -267,6 +269,13 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { // the deoptimization input data found in the optimized code. RecordStackCheck(stmt->OsrEntryId()); + // Loop stack checks can be patched to perform on-stack replacement. In + // order to decide whether or not to perform OSR we embed the loop depth + // in a test instruction after the call so we can extract it from the OSR + // builtin. + ASSERT(loop_depth() > 0); + __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker))); + __ bind(&ok); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); // Record a mapping of the OSR id to this PC. This is used if the OSR @@ -318,13 +327,6 @@ void FullCodeGenerator::EmitReturnSequence() { } -FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand( - Token::Value op, Expression* left, Expression* right) { - ASSERT(ShouldInlineSmiCase(op)); - return kNoConstants; -} - - void FullCodeGenerator::EffectContext::Plug(Slot* slot) const { } @@ -543,7 +545,7 @@ void FullCodeGenerator::DoTest(Label* if_true, __ j(equal, if_true); __ CompareRoot(result_register(), Heap::kFalseValueRootIndex); __ j(equal, if_false); - ASSERT_EQ(0, kSmiTag); + STATIC_ASSERT(kSmiTag == 0); __ SmiCompare(result_register(), Smi::FromInt(0)); __ j(equal, if_false); Condition is_smi = masm_->CheckSmi(result_register()); @@ -733,7 +735,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, prop->key()->AsLiteral()->handle()->IsSmi()); __ Move(rcx, prop->key()->AsLiteral()->handle()); - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin(is_strict() + ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); } } @@ -750,7 +754,8 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { __ push(rsi); // The context is the first argument. __ Push(pairs); __ Push(Smi::FromInt(is_eval() ? 1 : 0)); - __ CallRuntime(Runtime::kDeclareGlobals, 3); + __ Push(Smi::FromInt(strict_mode_flag())); + __ CallRuntime(Runtime::kDeclareGlobals, 4); // Return value is ignored. } @@ -851,7 +856,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { VisitForAccumulatorValue(stmt->enumerable()); __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); __ j(equal, &exit); - __ CompareRoot(rax, Heap::kNullValueRootIndex); + Register null_value = rdi; + __ LoadRoot(null_value, Heap::kNullValueRootIndex); + __ cmpq(rax, null_value); __ j(equal, &exit); // Convert the object to a JS object. @@ -865,12 +872,61 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ bind(&done_convert); __ push(rax); - // BUG(867): Check cache validity in generated code. This is a fast - // case for the JSObject::IsSimpleEnum cache validity checks. If we - // cannot guarantee cache validity, call the runtime system to check - // cache validity or get the property names in a fixed array. + // Check cache validity in generated code. This is a fast case for + // the JSObject::IsSimpleEnum cache validity checks. If we cannot + // guarantee cache validity, call the runtime system to check cache + // validity or get the property names in a fixed array. + Label next, call_runtime; + Register empty_fixed_array_value = r8; + __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); + Register empty_descriptor_array_value = r9; + __ LoadRoot(empty_descriptor_array_value, + Heap::kEmptyDescriptorArrayRootIndex); + __ movq(rcx, rax); + __ bind(&next); + + // Check that there are no elements. Register rcx contains the + // current JS object we've reached through the prototype chain. + __ cmpq(empty_fixed_array_value, + FieldOperand(rcx, JSObject::kElementsOffset)); + __ j(not_equal, &call_runtime); + + // Check that instance descriptors are not empty so that we can + // check for an enum cache. Leave the map in rbx for the subsequent + // prototype load. + __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset)); + __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset)); + __ cmpq(rdx, empty_descriptor_array_value); + __ j(equal, &call_runtime); + + // Check that there is an enum cache in the non-empty instance + // descriptors (rdx). This is the case if the next enumeration + // index field does not contain a smi. + __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset)); + __ JumpIfSmi(rdx, &call_runtime); + + // For all objects but the receiver, check that the cache is empty. + NearLabel check_prototype; + __ cmpq(rcx, rax); + __ j(equal, &check_prototype); + __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset)); + __ cmpq(rdx, empty_fixed_array_value); + __ j(not_equal, &call_runtime); + + // Load the prototype from the map and loop if non-null. + __ bind(&check_prototype); + __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset)); + __ cmpq(rcx, null_value); + __ j(not_equal, &next); + + // The enum cache is valid. Load the map of the object being + // iterated over and use the cache for the iteration. + NearLabel use_cache; + __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset)); + __ jmp(&use_cache); // Get the set of properties to enumerate. + __ bind(&call_runtime); __ push(rax); // Duplicate the enumerable object on the stack. __ CallRuntime(Runtime::kGetPropertyNamesFast, 1); @@ -883,6 +939,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ j(not_equal, &fixed_array); // We got a map in register rax. Get the enumeration cache from it. + __ bind(&use_cache); __ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset)); __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); @@ -971,8 +1028,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure) { // Use the fast case closure allocation code that allocates in new - // space for nested functions that don't need literals cloning. - if (scope()->is_function_scope() && + // space for nested functions that don't need literals cloning. If + // we're running with the --always-opt or the --prepare-always-opt + // flag, we need to use the runtime function so that the new function + // we are creating here gets a chance to have its code optimized and + // doesn't just get a copy of the existing unoptimized code. + if (!FLAG_always_opt && + !FLAG_prepare_always_opt && + scope()->is_function_scope() && info->num_literals() == 0 && !pretenure) { FastNewClosureStub stub; @@ -1082,8 +1145,11 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( // Check that last extension is NULL. __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); __ j(not_equal, slow); - __ movq(temp, ContextOperand(context, Context::FCONTEXT_INDEX)); - return ContextOperand(temp, slot->index()); + + // This function is used only for loads, not stores, so it's safe to + // return an rsi-based operand (the write barrier cannot be allowed to + // destroy the rsi register). + return ContextOperand(context, slot->index()); } @@ -1333,7 +1399,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { VisitForStackValue(key); VisitForStackValue(value); if (property->emit_store()) { - __ CallRuntime(Runtime::kSetProperty, 3); + __ Push(Smi::FromInt(NONE)); // PropertyAttributes + __ CallRuntime(Runtime::kSetProperty, 4); } else { __ Drop(3); } @@ -1509,14 +1576,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } Token::Value op = expr->binary_op(); - ConstantOperand constant = ShouldInlineSmiCase(op) - ? GetConstantOperand(op, expr->target(), expr->value()) - : kNoConstants; - ASSERT(constant == kRightConstant || constant == kNoConstants); - if (constant == kNoConstants) { - __ push(rax); // Left operand goes on the stack. - VisitForAccumulatorValue(expr->value()); - } + __ push(rax); // Left operand goes on the stack. + VisitForAccumulatorValue(expr->value()); OverwriteMode mode = expr->value()->ResultOverwriteAllowed() ? OVERWRITE_RIGHT @@ -1528,8 +1589,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { op, mode, expr->target(), - expr->value(), - constant); + expr->value()); } else { EmitBinaryOp(op, mode); } @@ -1580,10 +1640,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, Token::Value op, OverwriteMode mode, Expression* left, - Expression* right, - ConstantOperand constant) { - ASSERT(constant == kNoConstants); // Only handled case. - + Expression* right) { // Do combined smi check of the operands. Left operand is on the // stack (popped into rdx). Right operand is in rax but moved into // rcx to make the shifts easier. @@ -1680,18 +1737,32 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { __ movq(rdx, rax); __ pop(rax); // Restore value. __ Move(rcx, prop->key()->AsLiteral()->handle()); - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); break; } case KEYED_PROPERTY: { __ push(rax); // Preserve value. - VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - __ movq(rcx, rax); - __ pop(rdx); - __ pop(rax); - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + if (prop->is_synthetic()) { + ASSERT(prop->obj()->AsVariableProxy() != NULL); + ASSERT(prop->key()->AsLiteral() != NULL); + { AccumulatorValueContext for_object(this); + EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + } + __ movq(rdx, rax); + __ Move(rcx, prop->key()->AsLiteral()->handle()); + } else { + VisitForStackValue(prop->obj()); + VisitForAccumulatorValue(prop->key()); + __ movq(rcx, rax); + __ pop(rdx); + } + __ pop(rax); // Restore value. + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); break; } @@ -1720,57 +1791,76 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); - } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) { - // Perform the assignment for non-const variables and for initialization - // of const variables. Const assignments are simply skipped. - Label done; + } else if (op == Token::INIT_CONST) { + // Like var declarations, const declarations are hoisted to function + // scope. However, unlike var initializers, const initializers are able + // to drill a hole to that function context, even from inside a 'with' + // context. We thus bypass the normal static scope lookup. + Slot* slot = var->AsSlot(); + Label skip; + switch (slot->type()) { + case Slot::PARAMETER: + // No const parameters. + UNREACHABLE(); + break; + case Slot::LOCAL: + __ movq(rdx, Operand(rbp, SlotOffset(slot))); + __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex); + __ j(not_equal, &skip); + __ movq(Operand(rbp, SlotOffset(slot)), rax); + break; + case Slot::CONTEXT: { + __ movq(rcx, ContextOperand(rsi, Context::FCONTEXT_INDEX)); + __ movq(rdx, ContextOperand(rcx, slot->index())); + __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex); + __ j(not_equal, &skip); + __ movq(ContextOperand(rcx, slot->index()), rax); + int offset = Context::SlotOffset(slot->index()); + __ movq(rdx, rax); // Preserve the stored value in eax. + __ RecordWrite(rcx, offset, rdx, rbx); + break; + } + case Slot::LOOKUP: + __ push(rax); + __ push(rsi); + __ Push(var->name()); + __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); + break; + } + __ bind(&skip); + + } else if (var->mode() != Variable::CONST) { + // Perform the assignment for non-const variables. Const assignments + // are simply skipped. Slot* slot = var->AsSlot(); switch (slot->type()) { case Slot::PARAMETER: case Slot::LOCAL: - if (op == Token::INIT_CONST) { - // Detect const reinitialization by checking for the hole value. - __ movq(rdx, Operand(rbp, SlotOffset(slot))); - __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex); - __ j(not_equal, &done); - } // Perform the assignment. __ movq(Operand(rbp, SlotOffset(slot)), rax); break; case Slot::CONTEXT: { MemOperand target = EmitSlotSearch(slot, rcx); - if (op == Token::INIT_CONST) { - // Detect const reinitialization by checking for the hole value. - __ movq(rdx, target); - __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex); - __ j(not_equal, &done); - } // Perform the assignment and issue the write barrier. __ movq(target, rax); // The value of the assignment is in rax. RecordWrite clobbers its // register arguments. __ movq(rdx, rax); - int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + int offset = Context::SlotOffset(slot->index()); __ RecordWrite(rcx, offset, rdx, rbx); break; } case Slot::LOOKUP: - // Call the runtime for the assignment. The runtime will ignore - // const reinitialization. + // Call the runtime for the assignment. __ push(rax); // Value. __ push(rsi); // Context. __ Push(var->name()); - if (op == Token::INIT_CONST) { - // The runtime will ignore const redeclaration. - __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); - } else { - __ CallRuntime(Runtime::kStoreContextSlot, 3); - } + __ Push(Smi::FromInt(strict_mode_flag())); + __ CallRuntime(Runtime::kStoreContextSlot, 4); break; } - __ bind(&done); } } @@ -1799,7 +1889,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { } else { __ pop(rdx); } - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. @@ -1837,7 +1929,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { } // Record source code position before IC call. SetSourcePosition(expr->position()); - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. @@ -1953,6 +2047,27 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) { } +void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, + int arg_count) { + // Push copy of the first argument or undefined if it doesn't exist. + if (arg_count > 0) { + __ push(Operand(rsp, arg_count * kPointerSize)); + } else { + __ PushRoot(Heap::kUndefinedValueRootIndex); + } + + // Push the receiver of the enclosing function and do runtime call. + __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize)); + + // Push the strict mode flag. + __ Push(Smi::FromInt(strict_mode_flag())); + + __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP + ? Runtime::kResolvePossiblyDirectEvalNoLookup + : Runtime::kResolvePossiblyDirectEval, 4); +} + + void FullCodeGenerator::VisitCall(Call* expr) { #ifdef DEBUG // We want to verify that RecordJSReturnSite gets called on all paths @@ -1980,21 +2095,30 @@ void FullCodeGenerator::VisitCall(Call* expr) { VisitForStackValue(args->at(i)); } - // Push copy of the function - found below the arguments. - __ push(Operand(rsp, (arg_count + 1) * kPointerSize)); - - // Push copy of the first argument or undefined if it doesn't exist. - if (arg_count > 0) { - __ push(Operand(rsp, arg_count * kPointerSize)); - } else { - __ PushRoot(Heap::kUndefinedValueRootIndex); + // If we know that eval can only be shadowed by eval-introduced + // variables we attempt to load the global eval function directly + // in generated code. If we succeed, there is no need to perform a + // context lookup in the runtime system. + Label done; + if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) { + Label slow; + EmitLoadGlobalSlotCheckExtensions(var->AsSlot(), + NOT_INSIDE_TYPEOF, + &slow); + // Push the function and resolve eval. + __ push(rax); + EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count); + __ jmp(&done); + __ bind(&slow); } - // Push the receiver of the enclosing function and do runtime call. - __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize)); - // Push the strict mode flag. - __ Push(Smi::FromInt(strict_mode_flag())); - __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); + // Push copy of the function (found below the arguments) and + // resolve eval. + __ push(Operand(rsp, (arg_count + 1) * kPointerSize)); + EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count); + if (done.is_linked()) { + __ bind(&done); + } // The runtime call returns a pair of values in rax (function) and // rdx (receiver). Touch up the stack with the right values. @@ -2611,7 +2735,8 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) { ASSERT(args->length() == 2); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - __ CallRuntime(Runtime::kMath_pow, 2); + MathPowStub stub; + __ CallStub(&stub); context()->Plug(rax); } @@ -2795,7 +2920,8 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) { // Load the argument on the stack and call the stub. - TranscendentalCacheStub stub(TranscendentalCache::SIN); + TranscendentalCacheStub stub(TranscendentalCache::SIN, + TranscendentalCacheStub::TAGGED); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); __ CallStub(&stub); @@ -2805,7 +2931,8 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) { // Load the argument on the stack and call the stub. - TranscendentalCacheStub stub(TranscendentalCache::COS); + TranscendentalCacheStub stub(TranscendentalCache::COS, + TranscendentalCacheStub::TAGGED); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); __ CallStub(&stub); @@ -2815,7 +2942,8 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) { // Load the argument on the stack and call the stub. - TranscendentalCacheStub stub(TranscendentalCache::LOG); + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::TAGGED); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); __ CallStub(&stub); @@ -2867,7 +2995,73 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); + Label done; + Label slow_case; + Register object = rax; + Register index_1 = rbx; + Register index_2 = rcx; + Register elements = rdi; + Register temp = rdx; + __ movq(object, Operand(rsp, 2 * kPointerSize)); + // Fetch the map and check if array is in fast case. + // Check that object doesn't require security checks and + // has no indexed interceptor. + __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp); + __ j(below, &slow_case); + __ testb(FieldOperand(temp, Map::kBitFieldOffset), + Immediate(KeyedLoadIC::kSlowCaseBitFieldMask)); + __ j(not_zero, &slow_case); + + // Check the object's elements are in fast case and writable. + __ movq(elements, FieldOperand(object, JSObject::kElementsOffset)); + __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), + Heap::kFixedArrayMapRootIndex); + __ j(not_equal, &slow_case); + + // Check that both indices are smis. + __ movq(index_1, Operand(rsp, 1 * kPointerSize)); + __ movq(index_2, Operand(rsp, 0 * kPointerSize)); + __ JumpIfNotBothSmi(index_1, index_2, &slow_case); + + // Check that both indices are valid. + // The JSArray length field is a smi since the array is in fast case mode. + __ movq(temp, FieldOperand(object, JSArray::kLengthOffset)); + __ SmiCompare(temp, index_1); + __ j(below_equal, &slow_case); + __ SmiCompare(temp, index_2); + __ j(below_equal, &slow_case); + + __ SmiToInteger32(index_1, index_1); + __ SmiToInteger32(index_2, index_2); + // Bring addresses into index1 and index2. + __ lea(index_1, FieldOperand(elements, index_1, times_pointer_size, + FixedArray::kHeaderSize)); + __ lea(index_2, FieldOperand(elements, index_2, times_pointer_size, + FixedArray::kHeaderSize)); + + // Swap elements. Use object and temp as scratch registers. + __ movq(object, Operand(index_1, 0)); + __ movq(temp, Operand(index_2, 0)); + __ movq(Operand(index_2, 0), object); + __ movq(Operand(index_1, 0), temp); + + Label new_space; + __ InNewSpace(elements, temp, equal, &new_space); + + __ movq(object, elements); + __ RecordWriteHelper(object, index_1, temp); + __ RecordWriteHelper(elements, index_2, temp); + + __ bind(&new_space); + // We are done. Drop elements from the stack, and return undefined. + __ addq(rsp, Immediate(3 * kPointerSize)); + __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); + __ jmp(&done); + + __ bind(&slow_case); __ CallRuntime(Runtime::kSwapElements, 3); + + __ bind(&done); context()->Plug(rax); } @@ -2990,9 +3184,12 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); - VisitForAccumulatorValue(args->at(0)); + if (FLAG_debug_code) { + __ AbortIfNotString(rax); + } + __ movl(rax, FieldOperand(rax, String::kHashFieldOffset)); ASSERT(String::kHashShift >= kSmiTagSize); __ IndexFromHash(rax, rax); @@ -3050,19 +3247,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { Comment cmnt(masm_, "[ UnaryOperation (DELETE)"); Property* prop = expr->expression()->AsProperty(); Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); - if (prop == NULL && var == NULL) { - // Result of deleting non-property, non-variable reference is true. - // The subexpression may have side effects. - VisitForEffect(expr->expression()); - context()->Plug(true); - } else if (var != NULL && - !var->is_global() && - var->AsSlot() != NULL && - var->AsSlot()->type() != Slot::LOOKUP) { - // Result of deleting non-global, non-dynamic variables is false. - // The subexpression does not have side effects. - context()->Plug(false); - } else if (prop != NULL) { + + if (prop != NULL) { if (prop->is_synthetic()) { // Result of deleting parameters is false, even when they rewrite // to accesses on the arguments object. @@ -3070,21 +3256,38 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { } else { VisitForStackValue(prop->obj()); VisitForStackValue(prop->key()); + __ Push(Smi::FromInt(strict_mode_flag())); __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); context()->Plug(rax); } - } else if (var->is_global()) { - __ push(GlobalObjectOperand()); - __ Push(var->name()); - __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); - context()->Plug(rax); + } else if (var != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this()); + if (var->is_global()) { + __ push(GlobalObjectOperand()); + __ Push(var->name()); + __ Push(Smi::FromInt(kNonStrictMode)); + __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); + context()->Plug(rax); + } else if (var->AsSlot() != NULL && + var->AsSlot()->type() != Slot::LOOKUP) { + // Result of deleting non-global, non-dynamic variables is false. + // The subexpression does not have side effects. + context()->Plug(false); + } else { + // Non-global variable. Call the runtime to try to delete from the + // context where the variable was introduced. + __ push(context_register()); + __ Push(var->name()); + __ CallRuntime(Runtime::kDeleteContextSlot, 2); + context()->Plug(rax); + } } else { - // Non-global variable. Call the runtime to try to delete from the - // context where the variable was introduced. - __ push(context_register()); - __ Push(var->name()); - __ CallRuntime(Runtime::kDeleteContextSlot, 2); - context()->Plug(rax); + // Result of deleting non-property, non-variable reference is true. + // The subexpression may have side effects. + VisitForEffect(expr->expression()); + context()->Plug(true); } break; } @@ -3098,16 +3301,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::NOT: { Comment cmnt(masm_, "[ UnaryOperation (NOT)"); - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - // Notice that the labels are swapped. - context()->PrepareTest(&materialize_true, &materialize_false, - &if_false, &if_true, &fall_through); - if (context()->IsTest()) ForwardBailoutToChild(expr); - VisitForControl(expr->expression(), if_true, if_false, fall_through); - context()->Plug(if_false, if_true); // Labels swapped. + if (context()->IsEffect()) { + // Unary NOT has no side effects so it's only necessary to visit the + // subexpression. Match the optimizing compiler by not branching. + VisitForEffect(expr->expression()); + } else { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + // Notice that the labels are swapped. + context()->PrepareTest(&materialize_true, &materialize_false, + &if_false, &if_true, &fall_through); + if (context()->IsTest()) ForwardBailoutToChild(expr); + VisitForControl(expr->expression(), if_true, if_false, fall_through); + context()->Plug(if_false, if_true); // Labels swapped. + } break; } @@ -3333,7 +3542,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case NAMED_PROPERTY: { __ Move(rcx, prop->key()->AsLiteral()->handle()); __ pop(rdx); - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -3348,7 +3559,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case KEYED_PROPERTY: { __ pop(rcx); __ pop(rdx); - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -3427,21 +3640,18 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op, PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); if (check->Equals(Heap::number_symbol())) { - Condition is_smi = masm_->CheckSmi(rax); - __ j(is_smi, if_true); + __ JumpIfSmi(rax, if_true); __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset)); __ CompareRoot(rax, Heap::kHeapNumberMapRootIndex); Split(equal, if_true, if_false, fall_through); } else if (check->Equals(Heap::string_symbol())) { - Condition is_smi = masm_->CheckSmi(rax); - __ j(is_smi, if_false); + __ JumpIfSmi(rax, if_false); // Check for undetectable objects => false. - __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); + __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx); + __ j(above_equal, if_false); __ testb(FieldOperand(rdx, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, if_false); - __ CmpInstanceType(rdx, FIRST_NONSTRING_TYPE); - Split(below, if_true, if_false, fall_through); + Split(zero, if_true, if_false, fall_through); } else if (check->Equals(Heap::boolean_symbol())) { __ CompareRoot(rax, Heap::kTrueValueRootIndex); __ j(equal, if_true); @@ -3450,38 +3660,28 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op, } else if (check->Equals(Heap::undefined_symbol())) { __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); __ j(equal, if_true); - Condition is_smi = masm_->CheckSmi(rax); - __ j(is_smi, if_false); + __ JumpIfSmi(rax, if_false); // Check for undetectable objects => true. __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); __ testb(FieldOperand(rdx, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); Split(not_zero, if_true, if_false, fall_through); } else if (check->Equals(Heap::function_symbol())) { - Condition is_smi = masm_->CheckSmi(rax); - __ j(is_smi, if_false); - __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx); - __ j(equal, if_true); - // Regular expressions => 'function' (they are callable). - __ CmpInstanceType(rdx, JS_REGEXP_TYPE); - Split(equal, if_true, if_false, fall_through); + __ JumpIfSmi(rax, if_false); + __ CmpObjectType(rax, FIRST_FUNCTION_CLASS_TYPE, rdx); + Split(above_equal, if_true, if_false, fall_through); } else if (check->Equals(Heap::object_symbol())) { - Condition is_smi = masm_->CheckSmi(rax); - __ j(is_smi, if_false); + __ JumpIfSmi(rax, if_false); __ CompareRoot(rax, Heap::kNullValueRootIndex); __ j(equal, if_true); - // Regular expressions => 'function', not 'object'. - __ CmpObjectType(rax, JS_REGEXP_TYPE, rdx); - __ j(equal, if_false); + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdx); + __ j(below, if_false); + __ CmpInstanceType(rdx, FIRST_FUNCTION_CLASS_TYPE); + __ j(above_equal, if_false); // Check for undetectable objects => false. __ testb(FieldOperand(rdx, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, if_false); - // Check for JS objects => true. - __ CmpInstanceType(rdx, FIRST_JS_OBJECT_TYPE); - __ j(below, if_false); - __ CmpInstanceType(rdx, LAST_JS_OBJECT_TYPE); - Split(below_equal, if_true, if_false, fall_through); + Split(zero, if_true, if_false, fall_through); } else { if (if_false != fall_through) __ jmp(if_false); } @@ -3693,6 +3893,22 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) { void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) { + switch (ic->kind()) { + case Code::LOAD_IC: + __ IncrementCounter(&Counters::named_load_full, 1); + break; + case Code::KEYED_LOAD_IC: + __ IncrementCounter(&Counters::keyed_load_full, 1); + break; + case Code::STORE_IC: + __ IncrementCounter(&Counters::named_store_full, 1); + break; + case Code::KEYED_STORE_IC: + __ IncrementCounter(&Counters::keyed_store_full, 1); + default: + break; + } + __ call(ic, RelocInfo::CODE_TARGET); if (patch_site != NULL && patch_site->is_bound()) { patch_site->EmitPatchInfo(); diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc index 8c2856f8..b3243cf4 100644 --- a/src/x64/ic-x64.cc +++ b/src/x64/ic-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -108,6 +108,9 @@ static void GenerateStringDictionaryProbes(MacroAssembler* masm, Register name, Register r0, Register r1) { + // Assert that name contains a string. + if (FLAG_debug_code) __ AbortIfNotString(name); + // Compute the capacity mask. const int kCapacityOffset = StringDictionary::kHeaderSize + @@ -763,7 +766,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { } -void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { +void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key @@ -810,7 +814,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ bind(&slow); __ Integer32ToSmi(rcx, rcx); __ bind(&slow_with_tagged_index); - GenerateRuntimeSetProperty(masm); + GenerateRuntimeSetProperty(masm, strict_mode); // Never returns to here. // Check whether the elements is a pixel array. @@ -819,27 +823,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // rbx: receiver's elements array // rcx: index, zero-extended. __ bind(&check_pixel_array); - __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), - Heap::kPixelArrayMapRootIndex); - __ j(not_equal, &slow); - // Check that the value is a smi. If a conversion is needed call into the - // runtime to convert and clamp. - __ JumpIfNotSmi(rax, &slow); - __ cmpl(rcx, FieldOperand(rbx, PixelArray::kLengthOffset)); - __ j(above_equal, &slow); - // No more bailouts to slow case on this path, so key not needed. - __ SmiToInteger32(rdi, rax); - { // Clamp the value to [0..255]. - NearLabel done; - __ testl(rdi, Immediate(0xFFFFFF00)); - __ j(zero, &done); - __ setcc(negative, rdi); // 1 if negative, 0 if positive. - __ decb(rdi); // 0 if negative, 255 if positive. - __ bind(&done); - } - __ movq(rbx, FieldOperand(rbx, PixelArray::kExternalPointerOffset)); - __ movb(Operand(rbx, rcx, times_1, 0), rdi); - __ ret(0); + GenerateFastPixelArrayStore(masm, + rdx, + rcx, + rax, + rbx, + rdi, + false, + true, + NULL, + &slow, + &slow, + &slow); // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one @@ -1233,7 +1228,13 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { // rsp[(argc + 1) * 8] : argument 0 = receiver // ----------------------------------- + // Check if the name is a string. + Label miss; + __ JumpIfSmi(rcx, &miss); + Condition cond = masm->IsObjectStringType(rcx, rax, rax); + __ j(NegateCondition(cond), &miss); GenerateCallNormal(masm, argc); + __ bind(&miss); GenerateMiss(masm, argc); } @@ -1474,7 +1475,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { void StoreIC::GenerateMegamorphic(MacroAssembler* masm, - Code::ExtraICState extra_ic_state) { + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : name @@ -1486,7 +1487,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm, Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, NOT_IN_LOOP, MONOMORPHIC, - extra_ic_state); + strict_mode); StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg); // Cache miss: Jump to runtime. @@ -1593,7 +1594,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { } -void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { +void StoreIC::GenerateGlobalProxy(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : name @@ -1604,14 +1606,17 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { __ push(rdx); __ push(rcx); __ push(rax); - __ push(rbx); + __ Push(Smi::FromInt(NONE)); // PropertyAttributes + __ Push(Smi::FromInt(strict_mode)); + __ push(rbx); // return address // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); } -void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { +void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, + StrictModeFlag strict_mode) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key @@ -1623,10 +1628,12 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { __ push(rdx); // receiver __ push(rcx); // key __ push(rax); // value + __ Push(Smi::FromInt(NONE)); // PropertyAttributes + __ Push(Smi::FromInt(strict_mode)); // Strict mode. __ push(rbx); // return address // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); } diff --git a/src/x64/jump-target-x64.cc b/src/x64/jump-target-x64.cc index 1208b0db..e7156046 100644 --- a/src/x64/jump-target-x64.cc +++ b/src/x64/jump-target-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index 36c9aac2..bd968b95 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -37,6 +37,53 @@ namespace v8 { namespace internal { +// When invoking builtins, we need to record the safepoint in the middle of +// the invoke instruction sequence generated by the macro assembler. +class SafepointGenerator : public PostCallGenerator { + public: + SafepointGenerator(LCodeGen* codegen, + LPointerMap* pointers, + int deoptimization_index, + bool ensure_reloc_space = false) + : codegen_(codegen), + pointers_(pointers), + deoptimization_index_(deoptimization_index), + ensure_reloc_space_(ensure_reloc_space), + previous_safepoint_position_(-kMinSafepointSize) { } + virtual ~SafepointGenerator() { } + + virtual void Generate() { + // Ensure that we have enough space after the previous safepoint position + // for the generated code there. + int position = codegen_->masm()->pc_offset(); + ASSERT(position > previous_safepoint_position_); + if (position < previous_safepoint_position_ + kMinSafepointSize) { + int padding_size = + previous_safepoint_position_ + kMinSafepointSize - position; + STATIC_ASSERT(kMinSafepointSize <= 9); // One multibyte nop is enough. + codegen_->masm()->nop(padding_size); + position += padding_size; + } + // Ensure that we have enough space in the reloc info to patch + // this with calls when doing deoptimization. + if (ensure_reloc_space_) { + codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true); + } + codegen_->RecordSafepoint(pointers_, deoptimization_index_); + previous_safepoint_position_ = position; + } + + private: + static const int kMinSafepointSize = + MacroAssembler::kShortCallInstructionLength; + LCodeGen* codegen_; + LPointerMap* pointers_; + int deoptimization_index_; + bool ensure_reloc_space_; + int previous_safepoint_position_; +}; + + #define __ masm()-> bool LCodeGen::GenerateCode() { @@ -46,6 +93,7 @@ bool LCodeGen::GenerateCode() { return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && + GenerateJumpTable() && GenerateSafepointTable(); } @@ -60,8 +108,8 @@ void LCodeGen::FinishCode(Handle<Code> code) { void LCodeGen::Abort(const char* format, ...) { if (FLAG_trace_bailout) { - SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); - PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name); + SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); + PrintF("Aborting LCodeGen in @\"%s\": ", *name); va_list arguments; va_start(arguments, format); OS::VPrint(format, arguments); @@ -132,6 +180,45 @@ bool LCodeGen::GeneratePrologue() { } } + // Possibly allocate a local context. + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment(";;; Allocate local context"); + // Argument to NewContext is the function, which is still in rdi. + __ push(rdi); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kNewContext, 1); + } + RecordSafepoint(Safepoint::kNoDeoptimizationIndex); + // Context is returned in both rax and rsi. It replaces the context + // passed to us. It's saved in the stack and kept live in rsi. + __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi); + + // Copy any necessary parameters into the context. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Slot* slot = scope()->parameter(i)->AsSlot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ movq(rax, Operand(rbp, parameter_offset)); + // Store it in the context. + int context_offset = Context::SlotOffset(slot->index()); + __ movq(Operand(rsi, context_offset), rax); + // Update the write barrier. This clobbers all involved + // registers, so we have use a third register to avoid + // clobbering rsi. + __ movq(rcx, rsi); + __ RecordWrite(rcx, context_offset, rax, rbx); + } + } + Comment(";;; End allocate local context"); + } + // Trace the call. if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); @@ -170,6 +257,16 @@ LInstruction* LCodeGen::GetNextInstruction() { } +bool LCodeGen::GenerateJumpTable() { + for (int i = 0; i < jump_table_.length(); i++) { + JumpTableEntry* info = jump_table_[i]; + __ bind(&(info->label_)); + __ Jump(info->address_, RelocInfo::RUNTIME_ENTRY); + } + return !is_aborted(); +} + + bool LCodeGen::GenerateDeferredCode() { ASSERT(is_generating()); for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { @@ -252,8 +349,7 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const { Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { Handle<Object> literal = chunk_->LookupLiteral(op); - Representation r = chunk_->LookupLiteralRepresentation(op); - ASSERT(r.IsTagged()); + ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged()); return literal; } @@ -443,10 +539,17 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { if (cc == no_condition) { __ Jump(entry, RelocInfo::RUNTIME_ENTRY); } else { - NearLabel done; - __ j(NegateCondition(cc), &done); - __ Jump(entry, RelocInfo::RUNTIME_ENTRY); - __ bind(&done); + JumpTableEntry* jump_info = NULL; + // We often have several deopts to the same entry, reuse the last + // jump entry if this is the case. + if (jump_table_.length() > 0 && + jump_table_[jump_table_.length() - 1]->address_ == entry) { + jump_info = jump_table_[jump_table_.length() - 1]; + } else { + jump_info = new JumpTableEntry(entry); + jump_table_.Add(jump_info); + } + __ j(cc, &jump_info->label_); } } @@ -458,7 +561,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { Handle<DeoptimizationInputData> data = Factory::NewDeoptimizationInputData(length, TENURED); - data->SetTranslationByteArray(*translations_.CreateByteArray()); + Handle<ByteArray> translations = translations_.CreateByteArray(); + data->SetTranslationByteArray(*translations); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); Handle<FixedArray> literals = @@ -539,6 +643,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, } +void LCodeGen::RecordSafepoint(int deoptimization_index) { + LPointerMap empty_pointers(RelocInfo::kNoPosition); + RecordSafepoint(&empty_pointers, deoptimization_index); +} + + void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index) { @@ -611,13 +721,13 @@ void LCodeGen::DoCallStub(LCallStub* instr) { break; } case CodeStub::StringCharAt: { - // TODO(1116): Add StringCharAt stub to x64. - Abort("Unimplemented: %s", "StringCharAt Stub"); + StringCharAtStub stub; + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::MathPow: { - // TODO(1115): Add MathPow stub to x64. - Abort("Unimplemented: %s", "MathPow Stub"); + MathPowStub stub; + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::NumberToString: { @@ -636,7 +746,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) { break; } case CodeStub::TranscendentalCache: { - TranscendentalCacheStub stub(instr->transcendental_type()); + TranscendentalCacheStub stub(instr->transcendental_type(), + TranscendentalCacheStub::TAGGED); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } @@ -652,7 +763,42 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { void LCodeGen::DoModI(LModI* instr) { - Abort("Unimplemented: %s", "DoModI"); + LOperand* right = instr->InputAt(1); + ASSERT(ToRegister(instr->result()).is(rdx)); + ASSERT(ToRegister(instr->InputAt(0)).is(rax)); + ASSERT(!ToRegister(instr->InputAt(1)).is(rax)); + ASSERT(!ToRegister(instr->InputAt(1)).is(rdx)); + + Register right_reg = ToRegister(right); + + // Check for x % 0. + if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { + __ testl(right_reg, right_reg); + DeoptimizeIf(zero, instr->environment()); + } + + // Sign extend eax to edx. (We are using only the low 32 bits of the values.) + __ cdq(); + + // Check for (0 % -x) that will produce negative zero. + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + NearLabel positive_left; + NearLabel done; + __ testl(rax, rax); + __ j(not_sign, &positive_left); + __ idivl(right_reg); + + // Test the remainder for 0, because then the result would be -0. + __ testl(rdx, rdx); + __ j(not_zero, &done); + + DeoptimizeIf(no_condition, instr->environment()); + __ bind(&positive_left); + __ idivl(right_reg); + __ bind(&done); + } else { + __ idivl(right_reg); + } } @@ -888,21 +1034,15 @@ void LCodeGen::DoConstantD(LConstantD* instr) { ASSERT(instr->result()->IsDoubleRegister()); XMMRegister res = ToDoubleRegister(instr->result()); double v = instr->value(); + uint64_t int_val = BitCast<uint64_t, double>(v); // Use xor to produce +0.0 in a fast and compact way, but avoid to // do so if the constant is -0.0. - if (BitCast<uint64_t, double>(v) == 0) { + if (int_val == 0) { __ xorpd(res, res); } else { Register tmp = ToRegister(instr->TempAt(0)); - int32_t v_int32 = static_cast<int32_t>(v); - if (static_cast<double>(v_int32) == v) { - __ movl(tmp, Immediate(v_int32)); - __ cvtlsi2sd(res, tmp); - } else { - uint64_t int_val = BitCast<uint64_t, double>(v); - __ Set(tmp, int_val); - __ movd(res, tmp); - } + __ Set(tmp, int_val); + __ movq(res, tmp); } } @@ -935,7 +1075,19 @@ void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) { void LCodeGen::DoValueOf(LValueOf* instr) { - Abort("Unimplemented: %s", "DoValueOf"); + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + ASSERT(input.is(result)); + NearLabel done; + // If the object is a smi return the object. + __ JumpIfSmi(input, &done); + + // If the object is not a value type, return the object. + __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister); + __ j(not_equal, &done); + __ movq(result, FieldOperand(input, JSValue::kValueOffset)); + + __ bind(&done); } @@ -978,7 +1130,36 @@ void LCodeGen::DoAddI(LAddI* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - Abort("Unimplemented: %s", "DoArithmeticD"); + XMMRegister left = ToDoubleRegister(instr->InputAt(0)); + XMMRegister right = ToDoubleRegister(instr->InputAt(1)); + XMMRegister result = ToDoubleRegister(instr->result()); + // All operations except MOD are computed in-place. + ASSERT(instr->op() == Token::MOD || left.is(result)); + switch (instr->op()) { + case Token::ADD: + __ addsd(left, right); + break; + case Token::SUB: + __ subsd(left, right); + break; + case Token::MUL: + __ mulsd(left, right); + break; + case Token::DIV: + __ divsd(left, right); + break; + case Token::MOD: + __ PrepareCallCFunction(2); + __ movsd(xmm0, left); + ASSERT(right.is(xmm1)); + __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 2); + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + __ movsd(result, xmm0); + break; + default: + UNREACHABLE(); + break; + } } @@ -1267,7 +1448,7 @@ void LCodeGen::DoIsNull(LIsNull* instr) { __ j(equal, &load); __ movl(result, Immediate(Heap::kFalseValueRootIndex)); __ bind(&load); - __ movq(result, Operand(kRootRegister, result, times_pointer_size, 0)); + __ LoadRootIndexed(result, result, 0); } else { NearLabel true_value, false_value, done; __ j(equal, &true_value); @@ -1398,8 +1579,7 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) { } // result is zero if input is a smi, and one otherwise. ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1); - __ movq(result, Operand(kRootRegister, result, times_pointer_size, - Heap::kTrueValueRootIndex * kPointerSize)); + __ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex); } @@ -1440,7 +1620,20 @@ static Condition BranchCondition(HHasInstanceType* instr) { void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) { - Abort("Unimplemented: %s", "DoHasInstanceType"); + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + + ASSERT(instr->hydrogen()->value()->representation().IsTagged()); + __ testl(input, Immediate(kSmiTagMask)); + NearLabel done, is_false; + __ j(zero, &is_false); + __ CmpObjectType(input, TestType(instr->hydrogen()), result); + __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + __ jmp(&done); + __ bind(&is_false); + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ bind(&done); } @@ -1459,8 +1652,32 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { } +void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + + if (FLAG_debug_code) { + __ AbortIfNotString(input); + } + + __ movl(result, FieldOperand(input, String::kHashFieldOffset)); + ASSERT(String::kHashShift >= kSmiTagSize); + __ IndexFromHash(result, result); +} + + void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { - Abort("Unimplemented: %s", "DoHasCachedArrayIndex"); + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + + ASSERT(instr->hydrogen()->value()->representation().IsTagged()); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + __ testl(FieldOperand(input, String::kHashFieldOffset), + Immediate(String::kContainsCachedArrayIndexMask)); + NearLabel done; + __ j(zero, &done); + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ bind(&done); } @@ -1473,7 +1690,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch( __ testl(FieldOperand(input, String::kHashFieldOffset), Immediate(String::kContainsCachedArrayIndexMask)); - EmitBranch(true_block, false_block, not_equal); + EmitBranch(true_block, false_block, equal); } @@ -1582,7 +1799,18 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { void LCodeGen::DoInstanceOf(LInstanceOf* instr) { - Abort("Unimplemented: %s", "DoInstanceOf"); + InstanceofStub stub(InstanceofStub::kNoFlags); + __ push(ToRegister(instr->InputAt(0))); + __ push(ToRegister(instr->InputAt(1))); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + NearLabel true_value, done; + __ testq(rax, rax); + __ j(zero, &true_value); + __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); + __ jmp(&done); + __ bind(&true_value); + __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); + __ bind(&done); } @@ -1590,7 +1818,9 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - InstanceofStub stub(InstanceofStub::kArgsInRegisters); + InstanceofStub stub(InstanceofStub::kNoFlags); + __ push(ToRegister(instr->InputAt(0))); + __ push(ToRegister(instr->InputAt(1))); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ testq(rax, rax); EmitBranch(true_block, false_block, zero); @@ -1598,13 +1828,63 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { - Abort("Unimplemented: %s", "DoInstanceOfKnowGLobal"); + class DeferredInstanceOfKnownGlobal: public LDeferredCode { + public: + DeferredInstanceOfKnownGlobal(LCodeGen* codegen, + LInstanceOfKnownGlobal* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { + codegen()->DoDeferredLInstanceOfKnownGlobal(instr_); + } + + private: + LInstanceOfKnownGlobal* instr_; + }; + + + DeferredInstanceOfKnownGlobal* deferred; + deferred = new DeferredInstanceOfKnownGlobal(this, instr); + + Label false_result; + Register object = ToRegister(instr->InputAt(0)); + + // A Smi is not an instance of anything. + __ JumpIfSmi(object, &false_result); + + // Null is not an instance of anything. + __ CompareRoot(object, Heap::kNullValueRootIndex); + __ j(equal, &false_result); + + // String values are not instances of anything. + __ JumpIfNotString(object, kScratchRegister, deferred->entry()); + + __ bind(&false_result); + __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); + + __ bind(deferred->exit()); } -void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, - Label* map_check) { - Abort("Unimplemented: %s", "DoDeferredLInstanceOfKnownGlobakl"); +void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { + __ PushSafepointRegisters(); + + InstanceofStub stub(InstanceofStub::kNoFlags); + + __ push(ToRegister(instr->InputAt(0))); + __ Push(instr->function()); + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + __ movq(kScratchRegister, rax); + __ PopSafepointRegisters(); + __ testq(kScratchRegister, kScratchRegister); + Label load_false; + Label done; + __ j(not_zero, &load_false); + __ LoadRoot(rax, Heap::kTrueValueRootIndex); + __ jmp(&done); + __ bind(&load_false); + __ LoadRoot(rax, Heap::kFalseValueRootIndex); + __ bind(&done); } @@ -1701,7 +1981,21 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Abort("Unimplemented: %s", "DoLoadContextSlot"); + Register context = ToRegister(instr->context()); + Register result = ToRegister(instr->result()); + __ movq(result, ContextOperand(context, instr->slot_index())); +} + + +void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { + Register context = ToRegister(instr->context()); + Register value = ToRegister(instr->value()); + __ movq(ContextOperand(context, instr->slot_index()), value); + if (instr->needs_write_barrier()) { + int offset = Context::SlotOffset(instr->slot_index()); + Register scratch = ToRegister(instr->TempAt(0)); + __ RecordWrite(context, offset, value, scratch); + } } @@ -1797,7 +2091,20 @@ void LCodeGen::DoLoadPixelArrayExternalPointer( void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Abort("Unimplemented: %s", "DoAccessArgumentsAt"); + Register arguments = ToRegister(instr->arguments()); + Register length = ToRegister(instr->length()); + Register result = ToRegister(instr->result()); + + if (instr->index()->IsRegister()) { + __ subl(length, ToRegister(instr->index())); + } else { + __ subl(length, ToOperand(instr->index())); + } + DeoptimizeIf(below_equal, instr->environment()); + + // There are two words between the frame pointer and the last argument. + // Subtracting from length accounts for one of them add one more. + __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize)); } @@ -1831,40 +2138,135 @@ void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) { void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { - Abort("Unimplemented: %s", "DoLoadKeyedGeneric"); + ASSERT(ToRegister(instr->object()).is(rdx)); + ASSERT(ToRegister(instr->key()).is(rax)); + + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + CallCode(ic, RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Abort("Unimplemented: %s", "DoArgumentsElements"); + Register result = ToRegister(instr->result()); + + // Check for arguments adapter frame. + NearLabel done, adapted; + __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ SmiCompare(Operand(result, StandardFrameConstants::kContextOffset), + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ j(equal, &adapted); + + // No arguments adaptor frame. + __ movq(result, rbp); + __ jmp(&done); + + // Arguments adaptor frame present. + __ bind(&adapted); + __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + + // Result is the frame pointer for the frame if not adapted and for the real + // frame below the adaptor frame if adapted. + __ bind(&done); } void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Abort("Unimplemented: %s", "DoArgumentsLength"); + Register result = ToRegister(instr->result()); + + NearLabel done; + + // If no arguments adaptor frame the number of arguments is fixed. + if (instr->InputAt(0)->IsRegister()) { + __ cmpq(rbp, ToRegister(instr->InputAt(0))); + } else { + __ cmpq(rbp, ToOperand(instr->InputAt(0))); + } + __ movq(result, Immediate(scope()->num_parameters())); + __ j(equal, &done); + + // Arguments adaptor frame present. Get argument length from there. + __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ movq(result, Operand(result, + ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiToInteger32(result, result); + + // Argument length is in result register. + __ bind(&done); } void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Abort("Unimplemented: %s", "DoApplyArguments"); + Register receiver = ToRegister(instr->receiver()); + Register function = ToRegister(instr->function()); + Register length = ToRegister(instr->length()); + Register elements = ToRegister(instr->elements()); + ASSERT(receiver.is(rax)); // Used for parameter count. + ASSERT(function.is(rdi)); // Required by InvokeFunction. + ASSERT(ToRegister(instr->result()).is(rax)); + + // If the receiver is null or undefined, we have to pass the global object + // as a receiver. + NearLabel global_object, receiver_ok; + __ CompareRoot(receiver, Heap::kNullValueRootIndex); + __ j(equal, &global_object); + __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); + __ j(equal, &global_object); + + // The receiver should be a JS object. + Condition is_smi = __ CheckSmi(receiver); + DeoptimizeIf(is_smi, instr->environment()); + __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister); + DeoptimizeIf(below, instr->environment()); + __ jmp(&receiver_ok); + + __ bind(&global_object); + // TODO(kmillikin): We have a hydrogen value for the global object. See + // if it's better to use it than to explicitly fetch it from the context + // here. + __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset)); + __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX)); + __ bind(&receiver_ok); + + // Copy the arguments to this function possibly from the + // adaptor frame below it. + const uint32_t kArgumentsLimit = 1 * KB; + __ cmpq(length, Immediate(kArgumentsLimit)); + DeoptimizeIf(above, instr->environment()); + + __ push(receiver); + __ movq(receiver, length); + + // Loop through the arguments pushing them onto the execution + // stack. + NearLabel invoke, loop; + // length is a small non-negative integer, due to the test above. + __ testl(length, length); + __ j(zero, &invoke); + __ bind(&loop); + __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); + __ decl(length); + __ j(not_zero, &loop); + + // Invoke the function. + __ bind(&invoke); + ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + LPointerMap* pointers = instr->pointer_map(); + LEnvironment* env = instr->deoptimization_environment(); + RecordPosition(pointers->position()); + RegisterEnvironmentForDeoptimization(env); + SafepointGenerator safepoint_generator(this, + pointers, + env->deoptimization_index(), + true); + v8::internal::ParameterCount actual(rax); + __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator); } void LCodeGen::DoPushArgument(LPushArgument* instr) { LOperand* argument = instr->InputAt(0); if (argument->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(argument); - Handle<Object> literal = chunk_->LookupLiteral(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - ASSERT(literal->IsNumber()); - __ push(Immediate(static_cast<int32_t>(literal->Number()))); - } else if (r.IsDouble()) { - Abort("unsupported double immediate"); - } else { - ASSERT(r.IsTagged()); - __ Push(literal); - } + EmitPushConstantOperand(argument); } else if (argument->IsRegister()) { __ push(ToRegister(argument)); } else { @@ -1880,6 +2282,15 @@ void LCodeGen::DoContext(LContext* instr) { } +void LCodeGen::DoOuterContext(LOuterContext* instr) { + Register context = ToRegister(instr->context()); + Register result = ToRegister(instr->result()); + __ movq(result, + Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX))); + __ movq(result, FieldOperand(result, JSFunction::kContextOffset)); +} + + void LCodeGen::DoGlobalObject(LGlobalObject* instr) { Register result = ToRegister(instr->result()); __ movq(result, GlobalObjectOperand()); @@ -1898,7 +2309,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, LInstruction* instr) { // Change context if needed. bool change_context = - (graph()->info()->closure()->context() != function->context()) || + (info()->closure()->context() != function->context()) || scope()->contains_with() || (scope()->num_heap_slots() > 0); if (change_context) { @@ -1915,7 +2326,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, RecordPosition(pointers->position()); // Invoke function. - if (*function == *graph()->info()->closure()) { + if (*function == *info()->closure()) { __ CallSelf(); } else { __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset)); @@ -1937,62 +2348,299 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoDeferredMathAbsTaggedHeapNumber"); + Register input_reg = ToRegister(instr->InputAt(0)); + __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + DeoptimizeIf(not_equal, instr->environment()); + + Label done; + Register tmp = input_reg.is(rax) ? rcx : rax; + Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx; + + // Preserve the value of all registers. + __ PushSafepointRegisters(); + + Label negative; + __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); + // Check the sign of the argument. If the argument is positive, just + // return it. We do not need to patch the stack since |input| and + // |result| are the same register and |input| will be restored + // unchanged by popping safepoint registers. + __ testl(tmp, Immediate(HeapNumber::kSignMask)); + __ j(not_zero, &negative); + __ jmp(&done); + + __ bind(&negative); + + Label allocated, slow; + __ AllocateHeapNumber(tmp, tmp2, &slow); + __ jmp(&allocated); + + // Slow case: Call the runtime system to do the number allocation. + __ bind(&slow); + + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); + // Set the pointer to the new heap number in tmp. + if (!tmp.is(rax)) { + __ movq(tmp, rax); + } + + // Restore input_reg after call to runtime. + __ LoadFromSafepointRegisterSlot(input_reg, input_reg); + + __ bind(&allocated); + __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); + __ shl(tmp2, Immediate(1)); + __ shr(tmp2, Immediate(1)); + __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); + __ StoreToSafepointRegisterSlot(input_reg, tmp); + + __ bind(&done); + __ PopSafepointRegisters(); +} + + +void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { + Register input_reg = ToRegister(instr->InputAt(0)); + __ testl(input_reg, input_reg); + Label is_positive; + __ j(not_sign, &is_positive); + __ negl(input_reg); // Sets flags. + DeoptimizeIf(negative, instr->environment()); + __ bind(&is_positive); } void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathAbs"); + // Class for deferred case. + class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { + public: + DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, + LUnaryMathOperation* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { + codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); + } + private: + LUnaryMathOperation* instr_; + }; + + ASSERT(instr->InputAt(0)->Equals(instr->result())); + Representation r = instr->hydrogen()->value()->representation(); + + if (r.IsDouble()) { + XMMRegister scratch = xmm0; + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); + __ xorpd(scratch, scratch); + __ subsd(scratch, input_reg); + __ andpd(input_reg, scratch); + } else if (r.IsInteger32()) { + EmitIntegerMathAbs(instr); + } else { // Tagged case. + DeferredMathAbsTaggedHeapNumber* deferred = + new DeferredMathAbsTaggedHeapNumber(this, instr); + Register input_reg = ToRegister(instr->InputAt(0)); + // Smi check. + __ JumpIfNotSmi(input_reg, deferred->entry()); + EmitIntegerMathAbs(instr); + __ bind(deferred->exit()); + } } void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathFloor"); + XMMRegister xmm_scratch = xmm0; + Register output_reg = ToRegister(instr->result()); + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); + __ xorpd(xmm_scratch, xmm_scratch); // Zero the register. + __ ucomisd(input_reg, xmm_scratch); + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(below_equal, instr->environment()); + } else { + DeoptimizeIf(below, instr->environment()); + } + + // Use truncating instruction (OK because input is positive). + __ cvttsd2si(output_reg, input_reg); + + // Overflow is signalled with minint. + __ cmpl(output_reg, Immediate(0x80000000)); + DeoptimizeIf(equal, instr->environment()); } void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathRound"); + const XMMRegister xmm_scratch = xmm0; + Register output_reg = ToRegister(instr->result()); + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); + + // xmm_scratch = 0.5 + __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE); + __ movq(xmm_scratch, kScratchRegister); + + // input = input + 0.5 + __ addsd(input_reg, xmm_scratch); + + // We need to return -0 for the input range [-0.5, 0[, otherwise + // compute Math.floor(value + 0.5). + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ ucomisd(input_reg, xmm_scratch); + DeoptimizeIf(below_equal, instr->environment()); + } else { + // If we don't need to bailout on -0, we check only bailout + // on negative inputs. + __ xorpd(xmm_scratch, xmm_scratch); // Zero the register. + __ ucomisd(input_reg, xmm_scratch); + DeoptimizeIf(below, instr->environment()); + } + + // Compute Math.floor(value + 0.5). + // Use truncating instruction (OK because input is positive). + __ cvttsd2si(output_reg, input_reg); + + // Overflow is signalled with minint. + __ cmpl(output_reg, Immediate(0x80000000)); + DeoptimizeIf(equal, instr->environment()); } void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathSqrt"); + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); + ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); + __ sqrtsd(input_reg, input_reg); } void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathPowHalf"); + XMMRegister xmm_scratch = xmm0; + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); + ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); + __ xorpd(xmm_scratch, xmm_scratch); + __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. + __ sqrtsd(input_reg, input_reg); } void LCodeGen::DoPower(LPower* instr) { - Abort("Unimplemented: %s", "DoPower"); + LOperand* left = instr->InputAt(0); + XMMRegister left_reg = ToDoubleRegister(left); + ASSERT(!left_reg.is(xmm1)); + LOperand* right = instr->InputAt(1); + XMMRegister result_reg = ToDoubleRegister(instr->result()); + Representation exponent_type = instr->hydrogen()->right()->representation(); + if (exponent_type.IsDouble()) { + __ PrepareCallCFunction(2); + // Move arguments to correct registers + __ movsd(xmm0, left_reg); + ASSERT(ToDoubleRegister(right).is(xmm1)); + __ CallCFunction(ExternalReference::power_double_double_function(), 2); + } else if (exponent_type.IsInteger32()) { + __ PrepareCallCFunction(2); + // Move arguments to correct registers: xmm0 and edi (not rdi). + // On Windows, the registers are xmm0 and edx. + __ movsd(xmm0, left_reg); +#ifdef _WIN64 + ASSERT(ToRegister(right).is(rdx)); +#else + ASSERT(ToRegister(right).is(rdi)); +#endif + __ CallCFunction(ExternalReference::power_double_int_function(), 2); + } else { + ASSERT(exponent_type.IsTagged()); + CpuFeatures::Scope scope(SSE2); + Register right_reg = ToRegister(right); + + Label non_smi, call; + __ JumpIfNotSmi(right_reg, &non_smi); + __ SmiToInteger32(right_reg, right_reg); + __ cvtlsi2sd(xmm1, right_reg); + __ jmp(&call); + + __ bind(&non_smi); + __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister); + DeoptimizeIf(not_equal, instr->environment()); + __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset)); + + __ bind(&call); + __ PrepareCallCFunction(2); + // Move arguments to correct registers xmm0 and xmm1. + __ movsd(xmm0, left_reg); + // Right argument is already in xmm1. + __ CallCFunction(ExternalReference::power_double_double_function(), 2); + } + // Return value is in xmm0. + __ movsd(result_reg, xmm0); } void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathLog"); + ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::UNTAGGED); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathCos"); + ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::UNTAGGED); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathSin"); + ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); + TranscendentalCacheStub stub(TranscendentalCache::LOG, + TranscendentalCacheStub::UNTAGGED); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoUnaryMathOperation"); + switch (instr->op()) { + case kMathAbs: + DoMathAbs(instr); + break; + case kMathFloor: + DoMathFloor(instr); + break; + case kMathRound: + DoMathRound(instr); + break; + case kMathSqrt: + DoMathSqrt(instr); + break; + case kMathPowHalf: + DoMathPowHalf(instr); + break; + case kMathCos: + DoMathCos(instr); + break; + case kMathSin: + DoMathSin(instr); + break; + case kMathLog: + DoMathLog(instr); + break; + + default: + UNREACHABLE(); + } } void LCodeGen::DoCallKeyed(LCallKeyed* instr) { - Abort("Unimplemented: %s", "DoCallKeyed"); + ASSERT(ToRegister(instr->key()).is(rcx)); + ASSERT(ToRegister(instr->result()).is(rax)); + + int arity = instr->arity(); + Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); } @@ -2008,7 +2656,13 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) { void LCodeGen::DoCallFunction(LCallFunction* instr) { - Abort("Unimplemented: %s", "DoCallFunction"); + ASSERT(ToRegister(instr->result()).is(rax)); + + int arity = instr->arity(); + CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + __ Drop(1); } @@ -2040,7 +2694,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) { void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - Abort("Unimplemented: %s", "DoCallRuntime"); + CallRuntime(instr->function(), instr->arity(), instr); } @@ -2075,7 +2729,32 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { - Abort("Unimplemented: %s", "DoStoreNamedGeneric"); + ASSERT(ToRegister(instr->object()).is(rdx)); + ASSERT(ToRegister(instr->value()).is(rax)); + + __ Move(rcx, instr->hydrogen()->name()); + Handle<Code> ic(Builtins::builtin( + info_->is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) { + Register external_pointer = ToRegister(instr->external_pointer()); + Register key = ToRegister(instr->key()); + Register value = ToRegister(instr->value()); + + { // Clamp the value to [0..255]. + NearLabel done; + __ testl(value, Immediate(0xFFFFFF00)); + __ j(zero, &done); + __ setcc(negative, value); // 1 if negative, 0 if positive. + __ decb(value); // 0 if negative, 255 if positive. + __ bind(&done); + } + + __ movb(Operand(external_pointer, key, times_1, 0), value); } @@ -2121,7 +2800,161 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { - Abort("Unimplemented: %s", "DoStoreKeyedGeneric"); + ASSERT(ToRegister(instr->object()).is(rdx)); + ASSERT(ToRegister(instr->key()).is(rcx)); + ASSERT(ToRegister(instr->value()).is(rax)); + + Handle<Code> ic(Builtins::builtin( + info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { + class DeferredStringCharCodeAt: public LDeferredCode { + public: + DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } + private: + LStringCharCodeAt* instr_; + }; + + Register string = ToRegister(instr->string()); + Register index = no_reg; + int const_index = -1; + if (instr->index()->IsConstantOperand()) { + const_index = ToInteger32(LConstantOperand::cast(instr->index())); + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); + if (!Smi::IsValid(const_index)) { + // Guaranteed to be out of bounds because of the assert above. + // So the bounds check that must dominate this instruction must + // have deoptimized already. + if (FLAG_debug_code) { + __ Abort("StringCharCodeAt: out of bounds index."); + } + // No code needs to be generated. + return; + } + } else { + index = ToRegister(instr->index()); + } + Register result = ToRegister(instr->result()); + + DeferredStringCharCodeAt* deferred = + new DeferredStringCharCodeAt(this, instr); + + NearLabel flat_string, ascii_string, done; + + // Fetch the instance type of the receiver into result register. + __ movq(result, FieldOperand(string, HeapObject::kMapOffset)); + __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset)); + + // We need special handling for non-sequential strings. + STATIC_ASSERT(kSeqStringTag == 0); + __ testb(result, Immediate(kStringRepresentationMask)); + __ j(zero, &flat_string); + + // Handle cons strings and go to deferred code for the rest. + __ testb(result, Immediate(kIsConsStringMask)); + __ j(zero, deferred->entry()); + + // ConsString. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset), + Heap::kEmptyStringRootIndex); + __ j(not_equal, deferred->entry()); + // Get the first of the two strings and load its instance type. + __ movq(string, FieldOperand(string, ConsString::kFirstOffset)); + __ movq(result, FieldOperand(string, HeapObject::kMapOffset)); + __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset)); + // If the first cons component is also non-flat, then go to runtime. + STATIC_ASSERT(kSeqStringTag == 0); + __ testb(result, Immediate(kStringRepresentationMask)); + __ j(not_zero, deferred->entry()); + + // Check for ASCII or two-byte string. + __ bind(&flat_string); + STATIC_ASSERT(kAsciiStringTag != 0); + __ testb(result, Immediate(kStringEncodingMask)); + __ j(not_zero, &ascii_string); + + // Two-byte string. + // Load the two-byte character code into the result register. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + if (instr->index()->IsConstantOperand()) { + __ movzxwl(result, + FieldOperand(string, + SeqTwoByteString::kHeaderSize + + (kUC16Size * const_index))); + } else { + __ movzxwl(result, FieldOperand(string, + index, + times_2, + SeqTwoByteString::kHeaderSize)); + } + __ jmp(&done); + + // ASCII string. + // Load the byte into the result register. + __ bind(&ascii_string); + if (instr->index()->IsConstantOperand()) { + __ movzxbl(result, FieldOperand(string, + SeqAsciiString::kHeaderSize + const_index)); + } else { + __ movzxbl(result, FieldOperand(string, + index, + times_1, + SeqAsciiString::kHeaderSize)); + } + __ bind(&done); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { + Register string = ToRegister(instr->string()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ Set(result, 0); + + __ PushSafepointRegisters(); + __ push(string); + // Push the index as a smi. This is safe because of the checks in + // DoStringCharCodeAt above. + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); + if (instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + __ Push(Smi::FromInt(const_index)); + } else { + Register index = ToRegister(instr->index()); + __ Integer32ToSmi(index, index); + __ push(index); + } + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex); + if (FLAG_debug_code) { + __ AbortIfNotSmi(rax); + } + __ SmiToInteger32(rax, rax); + __ StoreToSafepointRegisterSlot(result, rax); + __ PopSafepointRegisters(); +} + + +void LCodeGen::DoStringLength(LStringLength* instr) { + Register string = ToRegister(instr->string()); + Register result = ToRegister(instr->result()); + __ movq(result, FieldOperand(string, String::kLengthOffset)); } @@ -2130,7 +2963,11 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); ASSERT(output->IsDoubleRegister()); - __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input)); + if (input->IsRegister()) { + __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input)); + } else { + __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input)); + } } @@ -2233,7 +3070,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, // Smi to XMM conversion __ bind(&load_smi); - __ SmiToInteger32(kScratchRegister, input_reg); // Untag smi first. + __ SmiToInteger32(kScratchRegister, input_reg); __ cvtlsi2sd(result_reg, kScratchRegister); __ bind(&done); } @@ -2310,12 +3147,55 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - Abort("Unimplemented: %s", "DoNumberUntagD"); + LOperand* input = instr->InputAt(0); + ASSERT(input->IsRegister()); + LOperand* result = instr->result(); + ASSERT(result->IsDoubleRegister()); + + Register input_reg = ToRegister(input); + XMMRegister result_reg = ToDoubleRegister(result); + + EmitNumberUntagD(input_reg, result_reg, instr->environment()); } void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - Abort("Unimplemented: %s", "DoDoubleToI"); + LOperand* input = instr->InputAt(0); + ASSERT(input->IsDoubleRegister()); + LOperand* result = instr->result(); + ASSERT(result->IsRegister()); + + XMMRegister input_reg = ToDoubleRegister(input); + Register result_reg = ToRegister(result); + + if (instr->truncating()) { + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. + __ cvttsd2siq(result_reg, input_reg); + __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE); + __ cmpl(result_reg, kScratchRegister); + DeoptimizeIf(equal, instr->environment()); + } else { + __ cvttsd2si(result_reg, input_reg); + __ cvtlsi2sd(xmm0, result_reg); + __ ucomisd(xmm0, input_reg); + DeoptimizeIf(not_equal, instr->environment()); + DeoptimizeIf(parity_even, instr->environment()); // NaN. + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + NearLabel done; + // The integer converted back is equal to the original. We + // only have to test if we got -0 as an input. + __ testl(result_reg, result_reg); + __ j(not_zero, &done); + __ movmskpd(result_reg, input_reg); + // Bit 0 contains the sign of the double in input_reg. + // If input was positive, we are ok and return 0, otherwise + // deoptimize. + __ andl(result_reg, Immediate(1)); + DeoptimizeIf(not_zero, instr->environment()); + __ bind(&done); + } + } } @@ -2464,7 +3344,54 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { - Abort("Unimplemented: %s", "DoRegExpLiteral"); + NearLabel materialized; + // Registers will be used as follows: + // rdi = JS function. + // rcx = literals array. + // rbx = regexp literal. + // rax = regexp literal clone. + __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); + __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset)); + int literal_offset = FixedArray::kHeaderSize + + instr->hydrogen()->literal_index() * kPointerSize; + __ movq(rbx, FieldOperand(rcx, literal_offset)); + __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); + __ j(not_equal, &materialized); + + // Create regexp literal using runtime function + // Result will be in rax. + __ push(rcx); + __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); + __ Push(instr->hydrogen()->pattern()); + __ Push(instr->hydrogen()->flags()); + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); + __ movq(rbx, rax); + + __ bind(&materialized); + int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; + Label allocated, runtime_allocate; + __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); + __ jmp(&allocated); + + __ bind(&runtime_allocate); + __ push(rbx); + __ Push(Smi::FromInt(size)); + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); + __ pop(rbx); + + __ bind(&allocated); + // Copy the content into the newly allocated memory. + // (Unroll copy loop once for better throughput). + for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { + __ movq(rdx, FieldOperand(rbx, i)); + __ movq(rcx, FieldOperand(rbx, i + kPointerSize)); + __ movq(FieldOperand(rax, i), rdx); + __ movq(FieldOperand(rax, i + kPointerSize), rcx); + } + if ((size % (2 * kPointerSize)) != 0) { + __ movq(rdx, FieldOperand(rbx, size - kPointerSize)); + __ movq(FieldOperand(rax, size - kPointerSize), rdx); + } } @@ -2487,60 +3414,56 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { void LCodeGen::DoTypeof(LTypeof* instr) { - Abort("Unimplemented: %s", "DoTypeof"); + LOperand* input = instr->InputAt(0); + if (input->IsConstantOperand()) { + __ Push(ToHandle(LConstantOperand::cast(input))); + } else if (input->IsRegister()) { + __ push(ToRegister(input)); + } else { + ASSERT(input->IsStackSlot()); + __ push(ToOperand(input)); + } + CallRuntime(Runtime::kTypeof, 1, instr); } void LCodeGen::DoTypeofIs(LTypeofIs* instr) { - Abort("Unimplemented: %s", "DoTypeofIs"); -} - - -void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) { + Register input = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); - NearLabel true_label; - NearLabel false_label; + Label true_label; + Label false_label; NearLabel done; - EmitIsConstructCall(result); - __ j(equal, &true_label); - + Condition final_branch_condition = EmitTypeofIs(&true_label, + &false_label, + input, + instr->type_literal()); + __ j(final_branch_condition, &true_label); + __ bind(&false_label); __ LoadRoot(result, Heap::kFalseValueRootIndex); __ jmp(&done); __ bind(&true_label); __ LoadRoot(result, Heap::kTrueValueRootIndex); - __ bind(&done); } -void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { - Register temp = ToRegister(instr->TempAt(0)); - int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - EmitIsConstructCall(temp); - EmitBranch(true_block, false_block, equal); -} - - -void LCodeGen::EmitIsConstructCall(Register temp) { - // Get the frame pointer for the calling frame. - __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - - // Skip the arguments adaptor frame if it exists. - NearLabel check_frame_marker; - __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset), - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ j(not_equal, &check_frame_marker); - __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset)); - - // Check the marker in the calling frame. - __ bind(&check_frame_marker); - __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset), - Smi::FromInt(StackFrame::CONSTRUCT)); +void LCodeGen::EmitPushConstantOperand(LOperand* operand) { + ASSERT(operand->IsConstantOperand()); + LConstantOperand* const_op = LConstantOperand::cast(operand); + Handle<Object> literal = chunk_->LookupLiteral(const_op); + Representation r = chunk_->LookupLiteralRepresentation(const_op); + if (r.IsInteger32()) { + ASSERT(literal->IsNumber()); + __ push(Immediate(static_cast<int32_t>(literal->Number()))); + } else if (r.IsDouble()) { + Abort("unsupported double immediate"); + } else { + ASSERT(r.IsTagged()); + __ Push(literal); + } } @@ -2573,12 +3496,11 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, } else if (type_name->Equals(Heap::string_symbol())) { __ JumpIfSmi(input, false_label); - __ movq(input, FieldOperand(input, HeapObject::kMapOffset)); + __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); + __ j(above_equal, false_label); __ testb(FieldOperand(input, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, false_label); - __ CmpInstanceType(input, FIRST_NONSTRING_TYPE); - final_branch_condition = below; + final_branch_condition = zero; } else if (type_name->Equals(Heap::boolean_symbol())) { __ CompareRoot(input, Heap::kTrueValueRootIndex); @@ -2603,17 +3525,16 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, } else if (type_name->Equals(Heap::object_symbol())) { __ JumpIfSmi(input, false_label); - __ Cmp(input, Factory::null_value()); + __ CompareRoot(input, Heap::kNullValueRootIndex); __ j(equal, true_label); + __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input); + __ j(below, false_label); + __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE); + __ j(above_equal, false_label); // Check for undetectable objects => false. __ testb(FieldOperand(input, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, false_label); - // Check for JS objects that are not RegExp or Function => true. - __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE); - __ j(below, false_label); - __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE); - final_branch_condition = below_equal; + final_branch_condition = zero; } else { final_branch_condition = never; @@ -2624,6 +3545,54 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, } +void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) { + Register result = ToRegister(instr->result()); + NearLabel true_label; + NearLabel false_label; + NearLabel done; + + EmitIsConstructCall(result); + __ j(equal, &true_label); + + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ jmp(&done); + + __ bind(&true_label); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + + + __ bind(&done); +} + + +void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { + Register temp = ToRegister(instr->TempAt(0)); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + EmitIsConstructCall(temp); + EmitBranch(true_block, false_block, equal); +} + + +void LCodeGen::EmitIsConstructCall(Register temp) { + // Get the frame pointer for the calling frame. + __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + NearLabel check_frame_marker; + __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset), + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ j(not_equal, &check_frame_marker); + __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset), + Smi::FromInt(StackFrame::CONSTRUCT)); +} + + void LCodeGen::DoLazyBailout(LLazyBailout* instr) { // No code for lazy bailout instruction. Used to capture environment after a // call for populating the safepoint data with deoptimization data. @@ -2636,7 +3605,36 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) { void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { - Abort("Unimplemented: %s", "DoDeleteProperty"); + LOperand* obj = instr->object(); + LOperand* key = instr->key(); + // Push object. + if (obj->IsRegister()) { + __ push(ToRegister(obj)); + } else { + __ push(ToOperand(obj)); + } + // Push key. + if (key->IsConstantOperand()) { + EmitPushConstantOperand(key); + } else if (key->IsRegister()) { + __ push(ToRegister(key)); + } else { + __ push(ToOperand(key)); + } + ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + LPointerMap* pointers = instr->pointer_map(); + LEnvironment* env = instr->deoptimization_environment(); + RecordPosition(pointers->position()); + RegisterEnvironmentForDeoptimization(env); + // Create safepoint generator that will also ensure enough space in the + // reloc info for patching in deoptimization (since this is invoking a + // builtin) + SafepointGenerator safepoint_generator(this, + pointers, + env->deoptimization_index(), + true); + __ Push(Smi::FromInt(strict_mode_flag())); + __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator); } @@ -2653,7 +3651,19 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - Abort("Unimplemented: %s", "DoOsrEntry"); + // This is a pseudo-instruction that ensures that the environment here is + // properly registered for deoptimization and records the assembler's PC + // offset. + LEnvironment* environment = instr->environment(); + environment->SetSpilledRegisters(instr->SpilledRegisterArray(), + instr->SpilledDoubleRegisterArray()); + + // If the environment were already registered, we would have no way of + // backpatching it with the spill slot operands. + ASSERT(!environment->HasBeenRegistered()); + RegisterEnvironmentForDeoptimization(environment); + ASSERT(osr_pc_offset_ == -1); + osr_pc_offset_ = masm()->pc_offset(); } #undef __ diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h index 6f8f06e3..ab0dffb1 100644 --- a/src/x64/lithium-codegen-x64.h +++ b/src/x64/lithium-codegen-x64.h @@ -53,9 +53,10 @@ class LCodeGen BASE_EMBEDDED { current_instruction_(-1), instructions_(chunk->instructions()), deoptimizations_(4), + jump_table_(4), deoptimization_literals_(8), inlined_function_count_(0), - scope_(chunk->graph()->info()->scope()), + scope_(info->scope()), status_(UNUSED), deferred_(8), osr_pc_offset_(-1), @@ -65,6 +66,7 @@ class LCodeGen BASE_EMBEDDED { // Simple accessors. MacroAssembler* masm() const { return masm_; } + CompilationInfo* info() const { return info_; } // Support for converting LOperands to assembler types. Register ToRegister(LOperand* op) const; @@ -90,8 +92,8 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredTaggedToI(LTaggedToI* instr); void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); void DoDeferredStackCheck(LGoto* instr); - void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, - Label* map_check); + void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); + void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -117,6 +119,10 @@ class LCodeGen BASE_EMBEDDED { bool is_done() const { return status_ == DONE; } bool is_aborted() const { return status_ == ABORTED; } + int strict_mode_flag() const { + return info()->is_strict() ? kStrictMode : kNonStrictMode; + } + LChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } HGraph* graph() const { return chunk_->graph(); } @@ -143,6 +149,7 @@ class LCodeGen BASE_EMBEDDED { bool GeneratePrologue(); bool GenerateBody(); bool GenerateDeferredCode(); + bool GenerateJumpTable(); bool GenerateSafepointTable(); void CallCode(Handle<Code> code, @@ -182,6 +189,7 @@ class LCodeGen BASE_EMBEDDED { XMMRegister ToDoubleRegister(int index) const; // Specific math operations - used from DoUnaryMathOperation. + void EmitIntegerMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr); void DoMathFloor(LUnaryMathOperation* instr); void DoMathRound(LUnaryMathOperation* instr); @@ -197,6 +205,7 @@ class LCodeGen BASE_EMBEDDED { int arguments, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); + void RecordSafepoint(int deoptimization_index); void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index); @@ -225,6 +234,17 @@ class LCodeGen BASE_EMBEDDED { // Caller should branch on equal condition. void EmitIsConstructCall(Register temp); + // Emits code for pushing a constant operand. + void EmitPushConstantOperand(LOperand* operand); + + struct JumpTableEntry { + inline JumpTableEntry(Address address) + : label_(), + address_(address) { } + Label label_; + Address address_; + }; + LChunk* const chunk_; MacroAssembler* const masm_; CompilationInfo* const info_; @@ -233,6 +253,7 @@ class LCodeGen BASE_EMBEDDED { int current_instruction_; const ZoneList<LInstruction*>* instructions_; ZoneList<LEnvironment*> deoptimizations_; + ZoneList<JumpTableEntry*> jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; int inlined_function_count_; Scope* const scope_; diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc index a6afbf72..bf4d5a16 100644 --- a/src/x64/lithium-x64.cc +++ b/src/x64/lithium-x64.cc @@ -296,8 +296,15 @@ void LLoadContextSlot::PrintDataTo(StringStream* stream) { } +void LStoreContextSlot::PrintDataTo(StringStream* stream) { + InputAt(0)->PrintTo(stream); + stream->Add("[%d] <- ", slot_index()); + InputAt(1)->PrintTo(stream); +} + + void LCallKeyed::PrintDataTo(StringStream* stream) { - stream->Add("[ecx] #%d / ", arity()); + stream->Add("[rcx] #%d / ", arity()); } @@ -398,7 +405,16 @@ void LChunk::MarkEmptyBlocks() { } -void LStoreNamed::PrintDataTo(StringStream* stream) { +void LStoreNamedField::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("."); stream->Add(*String::cast(*name())->ToCString()); @@ -407,7 +423,16 @@ void LStoreNamed::PrintDataTo(StringStream* stream) { } -void LStoreKeyed::PrintDataTo(StringStream* stream) { +void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); @@ -445,7 +470,7 @@ int LChunk::GetParameterStackSlot(int index) const { // shift all parameter indexes down by the number of parameters, and // make sure they end up negative so they are distinguishable from // spill slots. - int result = index - graph()->info()->scope()->num_parameters() - 1; + int result = index - info()->scope()->num_parameters() - 1; ASSERT(result < 0); return result; } @@ -453,7 +478,7 @@ int LChunk::GetParameterStackSlot(int index) const { // A parameter relative to ebp in the arguments stub. int LChunk::ParameterAt(int index) { ASSERT(-1 <= index); // -1 is the receiver. - return (1 + graph()->info()->scope()->num_parameters() - index) * + return (1 + info()->scope()->num_parameters() - index) * kPointerSize; } @@ -492,7 +517,7 @@ Representation LChunk::LookupLiteralRepresentation( LChunk* LChunkBuilder::Build() { ASSERT(is_unused()); - chunk_ = new LChunk(graph()); + chunk_ = new LChunk(info(), graph()); HPhase phase("Building chunk", chunk_); status_ = BUILDING; const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); @@ -509,8 +534,8 @@ LChunk* LChunkBuilder::Build() { void LChunkBuilder::Abort(const char* format, ...) { if (FLAG_trace_bailout) { - SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); - PrintF("Aborting LChunk building in @\"%s\": ", *debug_name); + SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); + PrintF("Aborting LChunk building in @\"%s\": ", *name); va_list arguments; va_start(arguments, format); OS::VPrint(format, arguments); @@ -843,8 +868,14 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr) { - Abort("Unimplemented: %s", "DoArithmeticD"); - return NULL; + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->left()->representation().IsDouble()); + ASSERT(instr->right()->representation().IsDouble()); + ASSERT(op != Token::MOD); + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseRegisterAtStart(instr->right()); + LArithmeticD* result = new LArithmeticD(op, left, right); + return DefineSameAsFirst(result); } @@ -1073,9 +1104,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) { } else if (v->IsInstanceOf()) { HInstanceOf* instance_of = HInstanceOf::cast(v); LInstanceOfAndBranch* result = - new LInstanceOfAndBranch( - UseFixed(instance_of->left(), InstanceofStub::left()), - UseFixed(instance_of->right(), InstanceofStub::right())); + new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax), + UseFixed(instance_of->right(), rdx)); return MarkAsCall(result, instr); } else if (v->IsTypeofIs()) { HTypeofIs* typeof_is = HTypeofIs::cast(v); @@ -1106,33 +1136,41 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { - Abort("Unimplemented: %s", "DoArgumentsLength"); - return NULL; + return DefineAsRegister(new LArgumentsLength(Use(length->value()))); } LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { - Abort("Unimplemented: %s", "DoArgumentsElements"); - return NULL; + return DefineAsRegister(new LArgumentsElements); } LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { - Abort("Unimplemented: %s", "DoInstanceOf"); - return NULL; + LOperand* left = UseFixed(instr->left(), rax); + LOperand* right = UseFixed(instr->right(), rdx); + LInstanceOf* result = new LInstanceOf(left, right); + return MarkAsCall(DefineFixed(result, rax), instr); } LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( HInstanceOfKnownGlobal* instr) { - Abort("Unimplemented: %s", "DoInstanceOfKnownGlobal"); - return NULL; + LInstanceOfKnownGlobal* result = + new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax)); + return MarkAsCall(DefineFixed(result, rax), instr); } LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - Abort("Unimplemented: %s", "DoApplyArguments"); - return NULL; + LOperand* function = UseFixed(instr->function(), rdi); + LOperand* receiver = UseFixed(instr->receiver(), rax); + LOperand* length = UseFixed(instr->length(), rbx); + LOperand* elements = UseFixed(instr->elements(), rcx); + LApplyArguments* result = new LApplyArguments(function, + receiver, + length, + elements); + return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY); } @@ -1149,8 +1187,8 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) { LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) { - Abort("Unimplemented: DoOuterContext"); - return NULL; + LOperand* context = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LOuterContext(context)); } @@ -1172,14 +1210,39 @@ LInstruction* LChunkBuilder::DoCallConstantFunction( LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoUnaryMathOperation"); - return NULL; + BuiltinFunctionId op = instr->op(); + if (op == kMathLog || op == kMathSin || op == kMathCos) { + LOperand* input = UseFixedDouble(instr->value(), xmm1); + LUnaryMathOperation* result = new LUnaryMathOperation(input); + return MarkAsCall(DefineFixedDouble(result, xmm1), instr); + } else { + LOperand* input = UseRegisterAtStart(instr->value()); + LUnaryMathOperation* result = new LUnaryMathOperation(input); + switch (op) { + case kMathAbs: + return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); + case kMathFloor: + return AssignEnvironment(DefineAsRegister(result)); + case kMathRound: + return AssignEnvironment(DefineAsRegister(result)); + case kMathSqrt: + return DefineSameAsFirst(result); + case kMathPowHalf: + return DefineSameAsFirst(result); + default: + UNREACHABLE(); + return NULL; + } + } } LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { - Abort("Unimplemented: %s", "DoCallKeyed"); - return NULL; + ASSERT(instr->key()->representation().IsTagged()); + LOperand* key = UseFixed(instr->key(), rcx); + argument_count_ -= instr->argument_count(); + LCallKeyed* result = new LCallKeyed(key); + return MarkAsCall(DefineFixed(result, rax), instr); } @@ -1210,8 +1273,9 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { - Abort("Unimplemented: %s", "DoCallFunction"); - return NULL; + argument_count_ -= instr->argument_count(); + LCallFunction* result = new LCallFunction(); + return MarkAsCall(DefineFixed(result, rax), instr); } @@ -1279,8 +1343,32 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { LInstruction* LChunkBuilder::DoMod(HMod* instr) { - Abort("Unimplemented: %s", "DoMod"); - return NULL; + if (instr->representation().IsInteger32()) { + ASSERT(instr->left()->representation().IsInteger32()); + ASSERT(instr->right()->representation().IsInteger32()); + // The temporary operand is necessary to ensure that right is not allocated + // into edx. + LOperand* temp = FixedTemp(rdx); + LOperand* value = UseFixed(instr->left(), rax); + LOperand* divisor = UseRegister(instr->right()); + LModI* mod = new LModI(value, divisor, temp); + LInstruction* result = DefineFixed(mod, rdx); + return (instr->CheckFlag(HValue::kBailoutOnMinusZero) || + instr->CheckFlag(HValue::kCanBeDivByZero)) + ? AssignEnvironment(result) + : result; + } else if (instr->representation().IsTagged()) { + return DoArithmeticT(Token::MOD, instr); + } else { + ASSERT(instr->representation().IsDouble()); + // We call a C function for double modulo. It can't trigger a GC. + // We need to use fixed result register for the call. + // TODO(fschneider): Allow any register as input registers. + LOperand* left = UseFixedDouble(instr->left(), xmm2); + LOperand* right = UseFixedDouble(instr->right(), xmm1); + LArithmeticD* result = new LArithmeticD(Token::MOD, left, right); + return MarkAsCall(DefineFixedDouble(result, xmm1), instr); + } } @@ -1335,7 +1423,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { } return result; } else if (instr->representation().IsDouble()) { - Abort("Unimplemented: %s", "DoAdd on Doubles"); + return DoArithmeticD(Token::ADD, instr); } else { ASSERT(instr->representation().IsTagged()); return DoArithmeticT(Token::ADD, instr); @@ -1345,8 +1433,22 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { LInstruction* LChunkBuilder::DoPower(HPower* instr) { - Abort("Unimplemented: %s", "DoPower"); - return NULL; + ASSERT(instr->representation().IsDouble()); + // We call a C function for double power. It can't trigger a GC. + // We need to use fixed result register for the call. + Representation exponent_type = instr->right()->representation(); + ASSERT(instr->left()->representation().IsDouble()); + LOperand* left = UseFixedDouble(instr->left(), xmm2); + LOperand* right = exponent_type.IsDouble() ? + UseFixedDouble(instr->right(), xmm1) : +#ifdef _WIN64 + UseFixed(instr->right(), rdx); +#else + UseFixed(instr->right(), rdi); +#endif + LPower* result = new LPower(left, right); + return MarkAsCall(DefineFixedDouble(result, xmm1), instr, + CAN_DEOPTIMIZE_EAGERLY); } @@ -1411,15 +1513,27 @@ LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) { LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) { - Abort("Unimplemented: %s", "DoHasInstanceType"); - return NULL; + ASSERT(instr->value()->representation().IsTagged()); + LOperand* value = UseRegisterAtStart(instr->value()); + + return DefineAsRegister(new LHasInstanceType(value)); +} + + +LInstruction* LChunkBuilder::DoGetCachedArrayIndex( + HGetCachedArrayIndex* instr) { + ASSERT(instr->value()->representation().IsTagged()); + LOperand* value = UseRegisterAtStart(instr->value()); + + return DefineAsRegister(new LGetCachedArrayIndex(value)); } LInstruction* LChunkBuilder::DoHasCachedArrayIndex( HHasCachedArrayIndex* instr) { - Abort("Unimplemented: %s", "DoHasCachedArrayIndex"); - return NULL; + ASSERT(instr->value()->representation().IsTagged()); + LOperand* value = UseRegister(instr->value()); + return DefineAsRegister(new LHasCachedArrayIndex(value)); } @@ -1448,8 +1562,9 @@ LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) { LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) { - Abort("Unimplemented: %s", "DoValueOf"); - return NULL; + LOperand* object = UseRegister(instr->value()); + LValueOf* result = new LValueOf(object); + return AssignEnvironment(DefineSameAsFirst(result)); } @@ -1506,12 +1621,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { return AssignPointerMap(Define(result, result_temp)); } else { ASSERT(to.IsInteger32()); - bool needs_temp = instr->CanTruncateToInt32() && - !CpuFeatures::IsSupported(SSE3); - LOperand* value = needs_temp ? - UseTempRegister(instr->value()) : UseRegister(instr->value()); - LOperand* temp = needs_temp ? TempRegister() : NULL; - return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp))); + LOperand* value = UseRegister(instr->value()); + return AssignEnvironment(DefineAsRegister(new LDoubleToI(value))); } } else if (from.IsInteger32()) { if (to.IsTagged()) { @@ -1609,14 +1720,25 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) { LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - Abort("Unimplemented: %s", "DoLoadContextSlot"); - return NULL; + LOperand* context = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LLoadContextSlot(context)); } LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - Abort("Unimplemented: DoStoreContextSlot"); - return NULL; + LOperand* context; + LOperand* value; + LOperand* temp; + if (instr->NeedsWriteBarrier()) { + context = UseTempRegister(instr->context()); + value = UseTempRegister(instr->value()); + temp = TempRegister(); + } else { + context = UseRegister(instr->context()); + value = UseRegister(instr->value()); + temp = NULL; + } + return new LStoreContextSlot(context, value, temp); } @@ -1679,8 +1801,11 @@ LInstruction* LChunkBuilder::DoLoadPixelArrayElement( LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { - Abort("Unimplemented: %s", "DoLoadKeyedGeneric"); - return NULL; + LOperand* object = UseFixed(instr->object(), rdx); + LOperand* key = UseFixed(instr->key(), rax); + + LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key); + return MarkAsCall(DefineFixed(result, rax), instr); } @@ -1703,9 +1828,31 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( } +LInstruction* LChunkBuilder::DoStorePixelArrayElement( + HStorePixelArrayElement* instr) { + ASSERT(instr->value()->representation().IsInteger32()); + ASSERT(instr->external_pointer()->representation().IsExternal()); + ASSERT(instr->key()->representation().IsInteger32()); + + LOperand* external_pointer = UseRegister(instr->external_pointer()); + LOperand* val = UseTempRegister(instr->value()); + LOperand* key = UseRegister(instr->key()); + + return new LStorePixelArrayElement(external_pointer, key, val); +} + + LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { - Abort("Unimplemented: %s", "DoStoreKeyedGeneric"); - return NULL; + LOperand* object = UseFixed(instr->object(), rdx); + LOperand* key = UseFixed(instr->key(), rcx); + LOperand* value = UseFixed(instr->value(), rax); + + ASSERT(instr->object()->representation().IsTagged()); + ASSERT(instr->key()->representation().IsTagged()); + ASSERT(instr->value()->representation().IsTagged()); + + LStoreKeyedGeneric* result = new LStoreKeyedGeneric(object, key, value); + return MarkAsCall(result, instr); } @@ -1730,20 +1877,25 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { - Abort("Unimplemented: %s", "DoStoreNamedGeneric"); - return NULL; + LOperand* object = UseFixed(instr->object(), rdx); + LOperand* value = UseFixed(instr->value(), rax); + + LStoreNamedGeneric* result = new LStoreNamedGeneric(object, value); + return MarkAsCall(result, instr); } LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - Abort("Unimplemented: %s", "DoStringCharCodeAt"); - return NULL; + LOperand* string = UseRegister(instr->string()); + LOperand* index = UseRegisterOrConstant(instr->index()); + LStringCharCodeAt* result = new LStringCharCodeAt(string, index); + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); } LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { - Abort("Unimplemented: %s", "DoStringLength"); - return NULL; + LOperand* string = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LStringLength(string)); } @@ -1758,8 +1910,7 @@ LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) { LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) { - Abort("Unimplemented: %s", "DoRegExpLiteral"); - return NULL; + return MarkAsCall(DefineFixed(new LRegExpLiteral, rax), instr); } @@ -1769,14 +1920,16 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) { - Abort("Unimplemented: %s", "DoDeleteProperty"); - return NULL; + LDeleteProperty* result = + new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key())); + return MarkAsCall(DefineFixed(result, rax), instr); } LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - Abort("Unimplemented: %s", "DoOsrEntry"); - return NULL; + allocator_->MarkAsOsrEntry(); + current_block_->last_environment()->set_ast_id(instr->ast_id()); + return AssignEnvironment(new LOsrEntry); } @@ -1787,8 +1940,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - Abort("Unimplemented: %s", "DoUnknownOSRValue"); - return NULL; + int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width. + return DefineAsSpilled(new LUnknownOSRValue, spill_index); } @@ -1799,26 +1952,31 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - Abort("Unimplemented: %s", "DoArgumentsObject"); + // There are no real uses of the arguments object. + // arguments.length and element access are supported directly on + // stack arguments, and any real arguments object use causes a bailout. + // So this value is never used. return NULL; } LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - Abort("Unimplemented: %s", "DoAccessArgumentsAt"); - return NULL; + LOperand* arguments = UseRegister(instr->arguments()); + LOperand* length = UseTempRegister(instr->length()); + LOperand* index = Use(instr->index()); + LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index); + return AssignEnvironment(DefineAsRegister(result)); } LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - Abort("Unimplemented: %s", "DoTypeof"); - return NULL; + LTypeof* result = new LTypeof(UseAtStart(instr->value())); + return MarkAsCall(DefineFixed(result, rax), instr); } LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) { - Abort("Unimplemented: %s", "DoTypeofIs"); - return NULL; + return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value()))); } diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h index 0cb5cc7a..67ec7af8 100644 --- a/src/x64/lithium-x64.h +++ b/src/x64/lithium-x64.h @@ -42,8 +42,6 @@ class LCodeGen; #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ V(ControlInstruction) \ V(Call) \ - V(StoreKeyed) \ - V(StoreNamed) \ LITHIUM_CONCRETE_INSTRUCTION_LIST(V) @@ -91,6 +89,7 @@ class LCodeGen; V(DoubleToI) \ V(FunctionLiteral) \ V(Gap) \ + V(GetCachedArrayIndex) \ V(GlobalObject) \ V(GlobalReceiver) \ V(Goto) \ @@ -131,6 +130,7 @@ class LCodeGen; V(NumberUntagD) \ V(ObjectLiteral) \ V(OsrEntry) \ + V(OuterContext) \ V(Parameter) \ V(PixelArrayLength) \ V(Power) \ @@ -141,11 +141,15 @@ class LCodeGen; V(SmiTag) \ V(SmiUntag) \ V(StackCheck) \ + V(StoreContextSlot) \ V(StoreGlobal) \ V(StoreKeyedFastElement) \ V(StoreKeyedGeneric) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ + V(StorePixelArrayElement) \ + V(StringCharCodeAt) \ + V(StringLength) \ V(SubI) \ V(TaggedToI) \ V(Throw) \ @@ -727,6 +731,17 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> { }; +class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { + public: + explicit LGetCachedArrayIndex(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index") + DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex) +}; + + class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { public: explicit LHasCachedArrayIndex(LOperand* value) { @@ -829,11 +844,10 @@ class LInstanceOfAndBranch: public LControlInstruction<2, 0> { }; -class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { +class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 0> { public: - LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) { + explicit LInstanceOfKnownGlobal(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal, @@ -1004,11 +1018,10 @@ class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> { }; -class LValueOf: public LTemplateInstruction<1, 1, 1> { +class LValueOf: public LTemplateInstruction<1, 1, 0> { public: - LValueOf(LOperand* value, LOperand* temp) { + explicit LValueOf(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of") @@ -1245,6 +1258,26 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> { }; +class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> { + public: + LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) { + inputs_[0] = context; + inputs_[1] = value; + temps_[0] = temp; + } + + DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") + DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) + + LOperand* context() { return InputAt(0); } + LOperand* value() { return InputAt(1); } + int slot_index() { return hydrogen()->slot_index(); } + int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } + + virtual void PrintDataTo(StringStream* stream); +}; + + class LPushArgument: public LTemplateInstruction<0, 1, 0> { public: explicit LPushArgument(LOperand* value) { @@ -1261,6 +1294,18 @@ class LContext: public LTemplateInstruction<1, 0, 0> { }; +class LOuterContext: public LTemplateInstruction<1, 1, 0> { + public: + explicit LOuterContext(LOperand* context) { + inputs_[0] = context; + } + + DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context") + + LOperand* context() { return InputAt(0); } +}; + + class LGlobalObject: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object") @@ -1294,6 +1339,8 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> { DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed") DECLARE_HYDROGEN_ACCESSOR(CallKeyed) + LOperand* key() { return inputs_[0]; } + virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } @@ -1314,6 +1361,8 @@ class LCallNamed: public LTemplateInstruction<1, 0, 0> { class LCallFunction: public LTemplateInstruction<1, 0, 0> { public: + LCallFunction() {} + DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function") DECLARE_HYDROGEN_ACCESSOR(CallFunction) @@ -1402,11 +1451,10 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> { // Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI: public LTemplateInstruction<1, 1, 1> { +class LDoubleToI: public LTemplateInstruction<1, 1, 0> { public: - LDoubleToI(LOperand* value, LOperand* temp) { + explicit LDoubleToI(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") @@ -1467,34 +1515,23 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { }; -class LStoreNamed: public LTemplateInstruction<0, 2, 1> { +class LStoreNamedField: public LTemplateInstruction<0, 2, 1> { public: - LStoreNamed(LOperand* obj, LOperand* val) { - inputs_[0] = obj; - inputs_[1] = val; + LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) { + inputs_[0] = object; + inputs_[1] = value; + temps_[0] = temp; } - DECLARE_INSTRUCTION(StoreNamed) - DECLARE_HYDROGEN_ACCESSOR(StoreNamed) + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) virtual void PrintDataTo(StringStream* stream); LOperand* object() { return inputs_[0]; } LOperand* value() { return inputs_[1]; } - Handle<Object> name() const { return hydrogen()->name(); } -}; - - -class LStoreNamedField: public LStoreNamed { - public: - LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) - : LStoreNamed(obj, val) { - temps_[0] = temp; - } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) + Handle<Object> name() const { return hydrogen()->name(); } bool is_in_object() { return hydrogen()->is_in_object(); } int offset() { return hydrogen()->offset(); } bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } @@ -1502,25 +1539,35 @@ class LStoreNamedField: public LStoreNamed { }; -class LStoreNamedGeneric: public LStoreNamed { +class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamedGeneric(LOperand* obj, LOperand* val) - : LStoreNamed(obj, val) { } + LStoreNamedGeneric(LOperand* object, LOperand* value) { + inputs_[0] = object; + inputs_[1] = value; + } DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) + + virtual void PrintDataTo(StringStream* stream); + + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + Handle<Object> name() const { return hydrogen()->name(); } }; -class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { + LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) { inputs_[0] = obj; inputs_[1] = key; inputs_[2] = val; } - DECLARE_INSTRUCTION(StoreKeyed) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store-keyed-fast-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) virtual void PrintDataTo(StringStream* stream); @@ -1530,23 +1577,69 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedFastElement: public LStoreKeyed { +class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) {} + LStorePixelArrayElement(LOperand* external_pointer, + LOperand* key, + LOperand* val) { + inputs_[0] = external_pointer; + inputs_[1] = key; + inputs_[2] = val; + } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) + DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement, + "store-pixel-array-element") + DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement) + + LOperand* external_pointer() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } }; -class LStoreKeyedGeneric: public LStoreKeyed { +class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) { } + LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) { + inputs_[0] = object; + inputs_[1] = key; + inputs_[2] = value; + } DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + + virtual void PrintDataTo(StringStream* stream); + + LOperand* object() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } +}; + + +class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> { + public: + LStringCharCodeAt(LOperand* string, LOperand* index) { + inputs_[0] = string; + inputs_[1] = index; + } + + DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") + DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) + + LOperand* string() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } +}; + + +class LStringLength: public LTemplateInstruction<1, 1, 0> { + public: + explicit LStringLength(LOperand* string) { + inputs_[0] = string; + } + + DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length") + DECLARE_HYDROGEN_ACCESSOR(StringLength) + + LOperand* string() { return inputs_[0]; } }; @@ -1750,8 +1843,9 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> { class LChunkBuilder; class LChunk: public ZoneObject { public: - explicit LChunk(HGraph* graph) + explicit LChunk(CompilationInfo* info, HGraph* graph) : spill_slot_count_(0), + info_(info), graph_(graph), instructions_(32), pointer_maps_(8), @@ -1768,6 +1862,7 @@ class LChunk: public ZoneObject { int ParameterAt(int index); int GetParameterStackSlot(int index) const; int spill_slot_count() const { return spill_slot_count_; } + CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } const ZoneList<LInstruction*>* instructions() const { return &instructions_; } void AddGapMove(int index, LOperand* from, LOperand* to); @@ -1804,6 +1899,7 @@ class LChunk: public ZoneObject { private: int spill_slot_count_; + CompilationInfo* info_; HGraph* const graph_; ZoneList<LInstruction*> instructions_; ZoneList<LPointerMap*> pointer_maps_; @@ -1813,8 +1909,9 @@ class LChunk: public ZoneObject { class LChunkBuilder BASE_EMBEDDED { public: - LChunkBuilder(HGraph* graph, LAllocator* allocator) + LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) : chunk_(NULL), + info_(info), graph_(graph), status_(UNUSED), current_instruction_(NULL), @@ -1843,6 +1940,7 @@ class LChunkBuilder BASE_EMBEDDED { }; LChunk* chunk() const { return chunk_; } + CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } bool is_unused() const { return status_ == UNUSED; } @@ -1949,6 +2047,7 @@ class LChunkBuilder BASE_EMBEDDED { HArithmeticBinaryOperation* instr); LChunk* chunk_; + CompilationInfo* info_; HGraph* const graph_; Status status_; HInstruction* current_instruction_; diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 56a2d6f9..b468e82f 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -49,22 +49,35 @@ MacroAssembler::MacroAssembler(void* buffer, int size) void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { - movq(destination, Operand(kRootRegister, index << kPointerSizeLog2)); + movq(destination, Operand(kRootRegister, + (index << kPointerSizeLog2) - kRootRegisterBias)); +} + + +void MacroAssembler::LoadRootIndexed(Register destination, + Register variable_offset, + int fixed_offset) { + movq(destination, + Operand(kRootRegister, + variable_offset, times_pointer_size, + (fixed_offset << kPointerSizeLog2) - kRootRegisterBias)); } void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) { - movq(Operand(kRootRegister, index << kPointerSizeLog2), source); + movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias), + source); } void MacroAssembler::PushRoot(Heap::RootListIndex index) { - push(Operand(kRootRegister, index << kPointerSizeLog2)); + push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias)); } void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) { - cmpq(with, Operand(kRootRegister, index << kPointerSizeLog2)); + cmpq(with, Operand(kRootRegister, + (index << kPointerSizeLog2) - kRootRegisterBias)); } @@ -136,7 +149,7 @@ void MacroAssembler::RecordWrite(Register object, Register value) { // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered - // registers are esi. + // registers are rsi. ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi)); // First, check if a write barrier is even needed. The tests below @@ -623,7 +636,9 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( } -void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { +void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, + InvokeFlag flag, + PostCallGenerator* post_call_generator) { // Calls are not allowed in some stubs. ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); @@ -632,7 +647,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { // parameter count to avoid emitting code to do the check. ParameterCount expected(0); GetBuiltinEntry(rdx, id); - InvokeCode(rdx, expected, expected, flag); + InvokeCode(rdx, expected, expected, flag, post_call_generator); } @@ -906,7 +921,7 @@ Condition MacroAssembler::CheckSmi(const Operand& src) { Condition MacroAssembler::CheckNonNegativeSmi(Register src) { ASSERT_EQ(0, kSmiTag); - // Make mask 0x8000000000000001 and test that both bits are zero. + // Test that both bits of the mask 0x8000000000000001 are zero. movq(kScratchRegister, src); rol(kScratchRegister, Immediate(1)); testb(kScratchRegister, Immediate(3)); @@ -1442,10 +1457,19 @@ void MacroAssembler::Pushad() { // r13 is kRootRegister. push(r14); // r15 is kSmiConstantRegister + STATIC_ASSERT(11 == kNumSafepointSavedRegisters); + // Use lea for symmetry with Popad. + int sp_delta = + (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize; + lea(rsp, Operand(rsp, -sp_delta)); } void MacroAssembler::Popad() { + // Popad must not change the flags, so use lea instead of addq. + int sp_delta = + (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize; + lea(rsp, Operand(rsp, sp_delta)); pop(r14); pop(r12); pop(r11); @@ -1461,8 +1485,7 @@ void MacroAssembler::Popad() { void MacroAssembler::Dropad() { - const int kRegistersPushedByPushad = 11; - addq(rsp, Immediate(kRegistersPushedByPushad * kPointerSize)); + addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize)); } @@ -1488,6 +1511,21 @@ int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = { }; +void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) { + movq(SafepointRegisterSlot(dst), src); +} + + +void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { + movq(dst, SafepointRegisterSlot(src)); +} + + +Operand MacroAssembler::SafepointRegisterSlot(Register reg) { + return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); +} + + void MacroAssembler::PushTryHandler(CodeLocation try_location, HandlerType type) { // Adjust this code if not the case. @@ -1536,6 +1574,96 @@ void MacroAssembler::PopTryHandler() { } +void MacroAssembler::Throw(Register value) { + // Check that stack should contain next handler, frame pointer, state and + // return address in that order. + STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize == + StackHandlerConstants::kStateOffset); + STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize == + StackHandlerConstants::kPCOffset); + // Keep thrown value in rax. + if (!value.is(rax)) { + movq(rax, value); + } + + ExternalReference handler_address(Top::k_handler_address); + movq(kScratchRegister, handler_address); + movq(rsp, Operand(kScratchRegister, 0)); + // get next in chain + pop(rcx); + movq(Operand(kScratchRegister, 0), rcx); + pop(rbp); // pop frame pointer + pop(rdx); // remove state + + // Before returning we restore the context from the frame pointer if not NULL. + // The frame pointer is NULL in the exception handler of a JS entry frame. + Set(rsi, 0); // Tentatively set context pointer to NULL + NearLabel skip; + cmpq(rbp, Immediate(0)); + j(equal, &skip); + movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + bind(&skip); + ret(0); +} + + +void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, + Register value) { + // Keep thrown value in rax. + if (!value.is(rax)) { + movq(rax, value); + } + // Fetch top stack handler. + ExternalReference handler_address(Top::k_handler_address); + movq(kScratchRegister, handler_address); + movq(rsp, Operand(kScratchRegister, 0)); + + // Unwind the handlers until the ENTRY handler is found. + NearLabel loop, done; + bind(&loop); + // Load the type of the current stack handler. + const int kStateOffset = StackHandlerConstants::kStateOffset; + cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY)); + j(equal, &done); + // Fetch the next handler in the list. + const int kNextOffset = StackHandlerConstants::kNextOffset; + movq(rsp, Operand(rsp, kNextOffset)); + jmp(&loop); + bind(&done); + + // Set the top handler address to next handler past the current ENTRY handler. + movq(kScratchRegister, handler_address); + pop(Operand(kScratchRegister, 0)); + + if (type == OUT_OF_MEMORY) { + // Set external caught exception to false. + ExternalReference external_caught(Top::k_external_caught_exception_address); + movq(rax, Immediate(false)); + store_rax(external_caught); + + // Set pending exception and rax to out of memory exception. + ExternalReference pending_exception(Top::k_pending_exception_address); + movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE); + store_rax(pending_exception); + } + + // Clear the context pointer. + Set(rsi, 0); + + // Restore registers from handler. + STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize == + StackHandlerConstants::kFPOffset); + pop(rbp); // FP + STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize == + StackHandlerConstants::kStateOffset); + pop(rdx); // State + + STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize == + StackHandlerConstants::kPCOffset); + ret(0); +} + + void MacroAssembler::Ret() { ret(0); } @@ -1610,6 +1738,17 @@ void MacroAssembler::AbortIfNotSmi(Register object) { } +void MacroAssembler::AbortIfNotString(Register object) { + testb(object, Immediate(kSmiTagMask)); + Assert(not_equal, "Operand is not a string"); + push(object); + movq(object, FieldOperand(object, HeapObject::kMapOffset)); + CmpInstanceType(object, FIRST_NONSTRING_TYPE); + pop(object); + Assert(below, "Operand is not a string"); +} + + void MacroAssembler::AbortIfNotRootValue(Register src, Heap::RootListIndex root_value_index, const char* message) { @@ -1728,11 +1867,19 @@ void MacroAssembler::DebugBreak() { void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected, const ParameterCount& actual, - InvokeFlag flag) { + InvokeFlag flag, + PostCallGenerator* post_call_generator) { NearLabel done; - InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); + InvokePrologue(expected, + actual, + Handle<Code>::null(), + code, + &done, + flag, + post_call_generator); if (flag == CALL_FUNCTION) { call(code); + if (post_call_generator != NULL) post_call_generator->Generate(); } else { ASSERT(flag == JUMP_FUNCTION); jmp(code); @@ -1745,12 +1892,20 @@ void MacroAssembler::InvokeCode(Handle<Code> code, const ParameterCount& expected, const ParameterCount& actual, RelocInfo::Mode rmode, - InvokeFlag flag) { + InvokeFlag flag, + PostCallGenerator* post_call_generator) { NearLabel done; Register dummy = rax; - InvokePrologue(expected, actual, code, dummy, &done, flag); + InvokePrologue(expected, + actual, + code, + dummy, + &done, + flag, + post_call_generator); if (flag == CALL_FUNCTION) { Call(code, rmode); + if (post_call_generator != NULL) post_call_generator->Generate(); } else { ASSERT(flag == JUMP_FUNCTION); Jump(code, rmode); @@ -1761,7 +1916,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code, void MacroAssembler::InvokeFunction(Register function, const ParameterCount& actual, - InvokeFlag flag) { + InvokeFlag flag, + PostCallGenerator* post_call_generator) { ASSERT(function.is(rdi)); movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); @@ -1772,13 +1928,14 @@ void MacroAssembler::InvokeFunction(Register function, movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); ParameterCount expected(rbx); - InvokeCode(rdx, expected, actual, flag); + InvokeCode(rdx, expected, actual, flag, post_call_generator); } void MacroAssembler::InvokeFunction(JSFunction* function, const ParameterCount& actual, - InvokeFlag flag) { + InvokeFlag flag, + PostCallGenerator* post_call_generator) { ASSERT(function->is_compiled()); // Get the function and setup the context. Move(rdi, Handle<JSFunction>(function)); @@ -1789,12 +1946,17 @@ void MacroAssembler::InvokeFunction(JSFunction* function, // the Code object every time we call the function. movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); ParameterCount expected(function->shared()->formal_parameter_count()); - InvokeCode(rdx, expected, actual, flag); + InvokeCode(rdx, expected, actual, flag, post_call_generator); } else { // Invoke the cached code. Handle<Code> code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); - InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag); + InvokeCode(code, + expected, + actual, + RelocInfo::CODE_TARGET, + flag, + post_call_generator); } } @@ -2387,9 +2549,21 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) { } // The context may be an intermediate context, not a function context. movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); - } else { // context is the current function context. - // The context may be an intermediate context, not a function context. - movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX))); + } else { + // Slot is in the current function context. Move it into the + // destination register in case we store into it (the write barrier + // cannot be allowed to destroy the context in rsi). + movq(dst, rsi); + } + + // We should not have found a 'with' context by walking the context chain + // (i.e., the static scope chain and runtime context chain do not agree). + // A variable occurring in such a scope should have slot type LOOKUP and + // not CONTEXT. + if (FLAG_debug_code) { + cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); + Check(equal, "Yo dawg, I heard you liked function contexts " + "so I put function contexts in all your contexts"); } } diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index 10026359..7a7f1a27 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -52,12 +52,16 @@ static const Register kSmiConstantRegister = { 15 }; // r15 (callee save). static const Register kRootRegister = { 13 }; // r13 (callee save). // Value of smi in kSmiConstantRegister. static const int kSmiConstantRegisterValue = 1; +// Actual value of root register is offset from the root array's start +// to take advantage of negitive 8-bit displacement values. +static const int kRootRegisterBias = 128; // Convenience for platform-independent signatures. typedef Operand MemOperand; // Forward declaration. class JumpTarget; +class PostCallGenerator; struct SmiIndex { SmiIndex(Register index_register, ScaleFactor scale) @@ -73,6 +77,12 @@ class MacroAssembler: public Assembler { MacroAssembler(void* buffer, int size); void LoadRoot(Register destination, Heap::RootListIndex index); + // Load a root value where the index (or part of it) is variable. + // The variable_offset register is added to the fixed_offset value + // to get the index into the root-array. + void LoadRootIndexed(Register destination, + Register variable_offset, + int fixed_offset); void CompareRoot(Register with, Heap::RootListIndex index); void CompareRoot(const Operand& with, Heap::RootListIndex index); void PushRoot(Heap::RootListIndex index); @@ -170,11 +180,17 @@ class MacroAssembler: public Assembler { // Push and pop the registers that can hold pointers. void PushSafepointRegisters() { Pushad(); } void PopSafepointRegisters() { Popad(); } - static int SafepointRegisterStackIndex(int reg_code) { - return kSafepointPushRegisterIndices[reg_code]; + // Store the value in register src in the safepoint register stack + // slot for register dst. + void StoreToSafepointRegisterSlot(Register dst, Register src); + void LoadFromSafepointRegisterSlot(Register dst, Register src); + + void InitializeRootRegister() { + ExternalReference roots_address = ExternalReference::roots_address(); + movq(kRootRegister, roots_address); + addq(kRootRegister, Immediate(kRootRegisterBias)); } - // --------------------------------------------------------------------------- // JavaScript invokes @@ -182,27 +198,33 @@ class MacroAssembler: public Assembler { void InvokeCode(Register code, const ParameterCount& expected, const ParameterCount& actual, - InvokeFlag flag); + InvokeFlag flag, + PostCallGenerator* post_call_generator = NULL); void InvokeCode(Handle<Code> code, const ParameterCount& expected, const ParameterCount& actual, RelocInfo::Mode rmode, - InvokeFlag flag); + InvokeFlag flag, + PostCallGenerator* post_call_generator = NULL); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. void InvokeFunction(Register function, const ParameterCount& actual, - InvokeFlag flag); + InvokeFlag flag, + PostCallGenerator* post_call_generator = NULL); void InvokeFunction(JSFunction* function, const ParameterCount& actual, - InvokeFlag flag); + InvokeFlag flag, + PostCallGenerator* post_call_generator = NULL); // Invoke specified builtin JavaScript function. Adds an entry to // the unresolved list if the name does not resolve. - void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag); + void InvokeBuiltin(Builtins::JavaScript id, + InvokeFlag flag, + PostCallGenerator* post_call_generator = NULL); // Store the function for the given builtin in the target register. void GetBuiltinFunction(Register target, Builtins::JavaScript id); @@ -661,6 +683,9 @@ class MacroAssembler: public Assembler { // Abort execution if argument is not a smi. Used in debug code. void AbortIfNotSmi(Register object); + // Abort execution if argument is a string. Used in debug code. + void AbortIfNotString(Register object); + // Abort execution if argument is not the root value with the given index. void AbortIfNotRootValue(Register src, Heap::RootListIndex root_value_index, @@ -676,6 +701,13 @@ class MacroAssembler: public Assembler { // Unlink the stack handler on top of the stack from the try handler chain. void PopTryHandler(); + // Activate the top handler in the try hander chain and pass the + // thrown value. + void Throw(Register value); + + // Propagate an uncatchable exception out of the current JS stack. + void ThrowUncatchable(UncatchableExceptionType type, Register value); + // --------------------------------------------------------------------------- // Inline caching support @@ -963,6 +995,8 @@ class MacroAssembler: public Assembler { // Order general registers are pushed by Pushad. // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14. static int kSafepointPushRegisterIndices[Register::kNumRegisters]; + static const int kNumSafepointSavedRegisters = 11; + bool generating_stub_; bool allow_stub_calls_; @@ -983,7 +1017,8 @@ class MacroAssembler: public Assembler { Handle<Code> code_constant, Register code_register, LabelType* done, - InvokeFlag flag); + InvokeFlag flag, + PostCallGenerator* post_call_generator); // Activation support. void EnterFrame(StackFrame::Type type); @@ -1014,6 +1049,17 @@ class MacroAssembler: public Assembler { Object* PopHandleScopeHelper(Register saved, Register scratch, bool gc_allowed); + + + // Compute memory operands for safepoint stack slots. + Operand SafepointRegisterSlot(Register reg); + static int SafepointRegisterStackIndex(int reg_code) { + return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1; + } + + // Needs access to SafepointRegisterStackIndex for optimized frame + // traversal. + friend class OptimizedFrame; }; @@ -1037,6 +1083,17 @@ class CodePatcher { }; +// Helper class for generating code or data associated with the code +// right after a call instruction. As an example this can be used to +// generate safepoint data after calls for crankshaft. +class PostCallGenerator { + public: + PostCallGenerator() { } + virtual ~PostCallGenerator() { } + virtual void Generate() = 0; +}; + + // ----------------------------------------------------------------------------- // Static helper functions. @@ -1743,7 +1800,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, Handle<Code> code_constant, Register code_register, LabelType* done, - InvokeFlag flag) { + InvokeFlag flag, + PostCallGenerator* post_call_generator) { bool definitely_matches = false; NearLabel invoke; if (expected.is_immediate()) { @@ -1794,6 +1852,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, if (flag == CALL_FUNCTION) { Call(adaptor, RelocInfo::CODE_TARGET); + if (post_call_generator != NULL) post_call_generator->Generate(); jmp(done); } else { Jump(adaptor, RelocInfo::CODE_TARGET); diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc index 27f3482a..cd3bfbd4 100644 --- a/src/x64/regexp-macro-assembler-x64.cc +++ b/src/x64/regexp-macro-assembler-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h index 182bc552..421a2294 100644 --- a/src/x64/regexp-macro-assembler-x64.h +++ b/src/x64/regexp-macro-assembler-x64.h @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h index e607c8b8..aa2994f2 100644 --- a/src/x64/simulator-x64.h +++ b/src/x64/simulator-x64.h @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -39,10 +39,13 @@ namespace internal { #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ (entry(p0, p1, p2, p3, p4)) -// Call the generated regexp code directly. The entry function pointer should +typedef int (*regexp_matcher)(String*, int, const byte*, + const byte*, int*, Address, int); + +// Call the generated regexp code directly. The code at the entry address should // expect seven int/pointer sized arguments and return an int. #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ - (entry(p0, p1, p2, p3, p4, p5, p6)) + (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ (reinterpret_cast<TryCatch*>(try_catch_address)) diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc index 9cb88f36..109985c7 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/x64/stub-cache-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -2060,8 +2060,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case STRING_CHECK: - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { // Check that the object is a two-byte string or a symbol. @@ -2076,8 +2077,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case NUMBER_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2096,8 +2098,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } case BOOLEAN_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2405,12 +2408,13 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, __ push(rdx); // receiver __ push(rcx); // name __ push(rax); // value + __ Push(Smi::FromInt(strict_mode_)); __ push(rbx); // restore return address // Do tail-call to the runtime system. ExternalReference store_ic_property = ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); - __ TailCallExternalReference(store_ic_property, 3, 1); + __ TailCallExternalReference(store_ic_property, 4, 1); // Handle store cache miss. __ bind(&miss); @@ -2559,6 +2563,43 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized( } +MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray( + JSObject* receiver) { + // ----------- S t a t e ------------- + // -- rax : value + // -- rcx : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + Label miss; + + // Check that the map matches. + __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false); + + // Do the load. + GenerateFastPixelArrayStore(masm(), + rdx, + rcx, + rax, + rdi, + rbx, + true, + false, + &miss, + &miss, + NULL, + &miss); + + // Handle store cache miss. + __ bind(&miss); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); + __ jmp(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, NULL); +} + + MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name, JSObject* object, JSObject* last) { @@ -3450,10 +3491,13 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub( __ push(rdx); // receiver __ push(rcx); // key __ push(rax); // value + __ Push(Smi::FromInt(NONE)); // PropertyAttributes + __ Push(Smi::FromInt( + Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)); __ push(rbx); // return address // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); return GetCode(flags); } diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc index 31f9527a..c4d7e656 100644 --- a/src/x64/virtual-frame-x64.cc +++ b/src/x64/virtual-frame-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -274,6 +274,24 @@ void VirtualFrame::Push(Expression* expr) { } +void VirtualFrame::Push(Handle<Object> value) { + if (ConstantPoolOverflowed()) { + Result temp = cgen()->allocator()->Allocate(); + ASSERT(temp.is_valid()); + if (value->IsSmi()) { + __ Move(temp.reg(), Smi::cast(*value)); + } else { + __ movq(temp.reg(), value, RelocInfo::EMBEDDED_OBJECT); + } + Push(&temp); + } else { + FrameElement element = + FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED); + elements_.Add(element); + } +} + + void VirtualFrame::Drop(int count) { ASSERT(count >= 0); ASSERT(height() >= count); @@ -1124,9 +1142,9 @@ Result VirtualFrame::CallStoreIC(Handle<String> name, StrictModeFlag strict_mode) { // Value and (if not contextual) receiver are on top of the frame. // The IC expects name in rcx, value in rax, and receiver in rdx. - Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode - ? Builtins::StoreIC_Initialize_Strict - : Builtins::StoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); Result value = Pop(); RelocInfo::Mode mode; if (is_contextual) { @@ -1146,7 +1164,7 @@ Result VirtualFrame::CallStoreIC(Handle<String> name, } -Result VirtualFrame::CallKeyedStoreIC() { +Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) { // Value, key, and receiver are on the top of the frame. The IC // expects value in rax, key in rcx, and receiver in rdx. Result value = Pop(); @@ -1190,7 +1208,9 @@ Result VirtualFrame::CallKeyedStoreIC() { receiver.Unuse(); } - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + Handle<Code> ic(Builtins::builtin( + (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); return RawCallCodeObject(ic, RelocInfo::CODE_TARGET); } diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h index 4a9c7203..7396db17 100644 --- a/src/x64/virtual-frame-x64.h +++ b/src/x64/virtual-frame-x64.h @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -343,7 +343,7 @@ class VirtualFrame : public ZoneObject { // Call keyed store IC. Value, key, and receiver are found on top // of the frame. All three are dropped. - Result CallKeyedStoreIC(); + Result CallKeyedStoreIC(StrictModeFlag strict_mode); // Call call IC. Function name, arguments, and receiver are found on top // of the frame and dropped by the call. @@ -400,9 +400,11 @@ class VirtualFrame : public ZoneObject { // Uses kScratchRegister, emits appropriate relocation info. void EmitPush(Handle<Object> value); + inline bool ConstantPoolOverflowed(); + // Push an element on the virtual frame. + void Push(Handle<Object> value); inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown()); - inline void Push(Handle<Object> value); inline void Push(Smi* value); // Pushing a result invalidates it (its contents become owned by the diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status index a7422c25..e573eb29 100644 --- a/test/cctest/cctest.status +++ b/test/cctest/cctest.status @@ -47,29 +47,8 @@ test-serialize/TestThatAlwaysFails: FAIL test-serialize/DependentTestThatAlwaysFails: FAIL ############################################################################## -[ $arch == x64 ] - -# Optimization is currently not working on crankshaft x64 and ARM. -test-heap/TestInternalWeakLists: PASS || FAIL -test-heap/TestInternalWeakListsTraverseWithGC: PASS || FAIL - - -############################################################################## -[ $arch == x64 && $crankshaft ] - -# Tests that fail with crankshaft. -test-deoptimization/DeoptimizeBinaryOperationMOD: FAIL -test-deoptimization/DeoptimizeLoadICStoreIC: FAIL -test-deoptimization/DeoptimizeLoadICStoreICNested: FAIL -test-deoptimization/DeoptimizeCompare: PASS || FAIL - -############################################################################## [ $arch == arm ] -# Optimization is currently not working on crankshaft x64 and ARM. -test-heap/TestInternalWeakLists: PASS || FAIL -test-heap/TestInternalWeakListsTraverseWithGC: PASS || FAIL - # We cannot assume that we can throw OutOfMemory exceptions in all situations. # Apparently our ARM box is in such a state. Skip the test as it also runs for # a long time. diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index b92185f5..cd264127 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -2691,6 +2691,41 @@ THREADED_TEST(CatchExceptionFromWith) { } +THREADED_TEST(TryCatchAndFinallyHidingException) { + v8::HandleScope scope; + LocalContext context; + v8::TryCatch try_catch; + CHECK(!try_catch.HasCaught()); + CompileRun("function f(k) { try { this[k]; } finally { return 0; } };"); + CompileRun("f({toString: function() { throw 42; }});"); + CHECK(!try_catch.HasCaught()); +} + + +v8::Handle<v8::Value> WithTryCatch(const v8::Arguments& args) { + v8::TryCatch try_catch; + return v8::Undefined(); +} + + +THREADED_TEST(TryCatchAndFinally) { + v8::HandleScope scope; + LocalContext context; + context->Global()->Set( + v8_str("native_with_try_catch"), + v8::FunctionTemplate::New(WithTryCatch)->GetFunction()); + v8::TryCatch try_catch; + CHECK(!try_catch.HasCaught()); + CompileRun( + "try {\n" + " throw new Error('a');\n" + "} finally {\n" + " native_with_try_catch();\n" + "}\n"); + CHECK(try_catch.HasCaught()); +} + + THREADED_TEST(Equality) { v8::HandleScope scope; LocalContext context; @@ -5617,6 +5652,56 @@ TEST(AccessControl) { } +TEST(AccessControlES5) { + v8::HandleScope handle_scope; + v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(); + + global_template->SetAccessCheckCallbacks(NamedAccessBlocker, + IndexedAccessBlocker); + + // Add an accessor that is not accessible by cross-domain JS code. + global_template->SetAccessor(v8_str("blocked_prop"), + UnreachableGetter, UnreachableSetter, + v8::Handle<Value>(), + v8::DEFAULT); + + // Create an environment + v8::Persistent<Context> context0 = Context::New(NULL, global_template); + context0->Enter(); + + v8::Handle<v8::Object> global0 = context0->Global(); + + v8::Persistent<Context> context1 = Context::New(); + context1->Enter(); + v8::Handle<v8::Object> global1 = context1->Global(); + global1->Set(v8_str("other"), global0); + + // Regression test for issue 1154. + ExpectTrue("Object.keys(other).indexOf('blocked_prop') == -1"); + + ExpectUndefined("other.blocked_prop"); + + // Regression test for issue 1027. + CompileRun("Object.defineProperty(\n" + " other, 'blocked_prop', {configurable: false})"); + ExpectUndefined("other.blocked_prop"); + ExpectUndefined( + "Object.getOwnPropertyDescriptor(other, 'blocked_prop')"); + + // Regression test for issue 1171. + ExpectTrue("Object.isExtensible(other)"); + CompileRun("Object.preventExtensions(other)"); + ExpectTrue("Object.isExtensible(other)"); + + // Object.seal and Object.freeze. + CompileRun("Object.freeze(other)"); + ExpectTrue("Object.isExtensible(other)"); + + CompileRun("Object.seal(other)"); + ExpectTrue("Object.isExtensible(other)"); +} + + static bool GetOwnPropertyNamesNamedBlocker(Local<v8::Object> global, Local<Value> name, v8::AccessType type, @@ -7542,10 +7627,11 @@ static void GenerateSomeGarbage() { "garbage = undefined;"); } + v8::Handle<v8::Value> DirectApiCallback(const v8::Arguments& args) { static int count = 0; if (count++ % 3 == 0) { - v8::V8::LowMemoryNotification(); // This should move the stub + i::Heap::CollectAllGarbage(true); // This should move the stub GenerateSomeGarbage(); // This should ensure the old stub memory is flushed } return v8::Handle<v8::Value>(); @@ -7597,6 +7683,54 @@ THREADED_TEST(CallICFastApi_DirectCall_Throw) { } +v8::Handle<v8::Value> DirectGetterCallback(Local<String> name, + const v8::AccessorInfo& info) { + if (++p_getter_count % 3 == 0) { + i::Heap::CollectAllGarbage(true); + GenerateSomeGarbage(); + } + return v8::Handle<v8::Value>(); +} + + +THREADED_TEST(LoadICFastApi_DirectCall_GCMoveStub) { + v8::HandleScope scope; + LocalContext context; + v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New(); + obj->SetAccessor(v8_str("p1"), DirectGetterCallback); + context->Global()->Set(v8_str("o1"), obj->NewInstance()); + p_getter_count = 0; + CompileRun( + "function f() {" + " for (var i = 0; i < 30; i++) o1.p1;" + "}" + "f();"); + CHECK_EQ(30, p_getter_count); +} + + +v8::Handle<v8::Value> ThrowingDirectGetterCallback( + Local<String> name, const v8::AccessorInfo& info) { + return v8::ThrowException(v8_str("g")); +} + + +THREADED_TEST(LoadICFastApi_DirectCall_Throw) { + v8::HandleScope scope; + LocalContext context; + v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New(); + obj->SetAccessor(v8_str("p1"), ThrowingDirectGetterCallback); + context->Global()->Set(v8_str("o1"), obj->NewInstance()); + v8::Handle<Value> result = CompileRun( + "var result = '';" + "for (var i = 0; i < 5; i++) {" + " try { o1.p1; } catch (e) { result += e; }" + "}" + "result;"); + CHECK_EQ(v8_str("ggggg"), result); +} + + THREADED_TEST(InterceptorCallICFastApi_TrivialSignature) { int interceptor_call_count = 0; v8::HandleScope scope; @@ -9957,10 +10091,11 @@ class RegExpStringModificationTest { // Inject the input as a global variable. i::Handle<i::String> input_name = i::Factory::NewStringFromAscii(i::Vector<const char>("input", 5)); - i::Top::global_context()->global()->SetProperty(*input_name, - *input_, - NONE)->ToObjectChecked(); - + i::Top::global_context()->global()->SetProperty( + *input_name, + *input_, + NONE, + i::kNonStrictMode)->ToObjectChecked(); MorphThread morph_thread(this); morph_thread.Start(); @@ -10482,14 +10617,14 @@ THREADED_TEST(PixelArray) { CHECK_EQ(28, result->Int32Value()); i::Handle<i::Smi> value(i::Smi::FromInt(2)); - i::SetElement(jsobj, 1, value); + i::SetElement(jsobj, 1, value, i::kNonStrictMode); CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value()); *value.location() = i::Smi::FromInt(256); - i::SetElement(jsobj, 1, value); + i::SetElement(jsobj, 1, value, i::kNonStrictMode); CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value()); *value.location() = i::Smi::FromInt(-1); - i::SetElement(jsobj, 1, value); + i::SetElement(jsobj, 1, value, i::kNonStrictMode); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value()); result = CompileRun("for (var i = 0; i < 8; i++) {" @@ -10691,7 +10826,62 @@ THREADED_TEST(PixelArray) { "result"); CHECK_EQ(32640, result->Int32Value()); - // Make sure that pixel array loads are optimized by crankshaft. + // Make sure that pixel array store ICs clamp values correctly. + result = CompileRun("function pa_store(p) {" + " for (var j = 0; j < 256; j++) { p[j] = j * 2; }" + "}" + "pa_store(pixels);" + "var sum = 0;" + "for (var j = 0; j < 256; j++) { sum += pixels[j]; }" + "sum"); + CHECK_EQ(48896, result->Int32Value()); + + // Make sure that pixel array stores correctly handle accesses outside + // of the pixel array.. + result = CompileRun("function pa_store(p,start) {" + " for (var j = 0; j < 256; j++) {" + " p[j+start] = j * 2;" + " }" + "}" + "pa_store(pixels,0);" + "pa_store(pixels,-128);" + "var sum = 0;" + "for (var j = 0; j < 256; j++) { sum += pixels[j]; }" + "sum"); + CHECK_EQ(65280, result->Int32Value()); + + // Make sure that the generic store stub correctly handle accesses outside + // of the pixel array.. + result = CompileRun("function pa_store(p,start) {" + " for (var j = 0; j < 256; j++) {" + " p[j+start] = j * 2;" + " }" + "}" + "pa_store(pixels,0);" + "just_ints = new Object();" + "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }" + "pa_store(just_ints, 0);" + "pa_store(pixels,-128);" + "var sum = 0;" + "for (var j = 0; j < 256; j++) { sum += pixels[j]; }" + "sum"); + CHECK_EQ(65280, result->Int32Value()); + + // Make sure that the generic keyed store stub clamps pixel array values + // correctly. + result = CompileRun("function pa_store(p) {" + " for (var j = 0; j < 256; j++) { p[j] = j * 2; }" + "}" + "pa_store(pixels);" + "just_ints = new Object();" + "pa_store(just_ints);" + "pa_store(pixels);" + "var sum = 0;" + "for (var j = 0; j < 256; j++) { sum += pixels[j]; }" + "sum"); + CHECK_EQ(48896, result->Int32Value()); + + // Make sure that pixel array loads are optimized by crankshaft. result = CompileRun("function pa_load(p) {" " var sum = 0;" " for (var i=0; i<256; ++i) {" @@ -10706,6 +10896,24 @@ THREADED_TEST(PixelArray) { "result"); CHECK_EQ(32640, result->Int32Value()); + // Make sure that pixel array stores are optimized by crankshaft. + result = CompileRun("function pa_init(p) {" + "for (var i = 0; i < 256; ++i) { p[i] = i; }" + "}" + "function pa_load(p) {" + " var sum = 0;" + " for (var i=0; i<256; ++i) {" + " sum += p[i];" + " }" + " return sum; " + "}" + "for (var i = 0; i < 100000; ++i) {" + " pa_init(pixels);" + "}" + "result = pa_load(pixels);" + "result"); + CHECK_EQ(32640, result->Int32Value()); + free(pixel_data); } @@ -10725,6 +10933,53 @@ THREADED_TEST(PixelArrayInfo) { } +static v8::Handle<Value> NotHandledIndexedPropertyGetter( + uint32_t index, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + return v8::Handle<Value>(); +} + + +static v8::Handle<Value> NotHandledIndexedPropertySetter( + uint32_t index, + Local<Value> value, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + return v8::Handle<Value>(); +} + + +THREADED_TEST(PixelArrayWithInterceptor) { + v8::HandleScope scope; + LocalContext context; + const int kElementCount = 260; + uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount)); + i::Handle<i::PixelArray> pixels = + i::Factory::NewPixelArray(kElementCount, pixel_data); + for (int i = 0; i < kElementCount; i++) { + pixels->set(i, i % 256); + } + v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(); + templ->SetIndexedPropertyHandler(NotHandledIndexedPropertyGetter, + NotHandledIndexedPropertySetter); + v8::Handle<v8::Object> obj = templ->NewInstance(); + obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount); + context->Global()->Set(v8_str("pixels"), obj); + v8::Handle<v8::Value> result = CompileRun("pixels[1]"); + CHECK_EQ(1, result->Int32Value()); + result = CompileRun("var sum = 0;" + "for (var i = 0; i < 8; i++) {" + " sum += pixels[i] = pixels[i] = -i;" + "}" + "sum;"); + CHECK_EQ(-28, result->Int32Value()); + result = CompileRun("pixels.hasOwnProperty('1')"); + CHECK(result->BooleanValue()); + free(pixel_data); +} + + static int ExternalArrayElementSize(v8::ExternalArrayType array_type) { switch (array_type) { case v8::kExternalByteArray: diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc index b424b7f9..9f21b78d 100644 --- a/test/cctest/test-compiler.cc +++ b/test/cctest/test-compiler.cc @@ -108,7 +108,7 @@ static void SetGlobalProperty(const char* name, Object* value) { Handle<Object> object(value); Handle<String> symbol = Factory::LookupAsciiSymbol(name); Handle<JSObject> global(Top::context()->global()); - SetProperty(global, symbol, object, NONE); + SetProperty(global, symbol, object, NONE, kNonStrictMode); } diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc index 239d8ae6..7f06bc34 100644 --- a/test/cctest/test-cpu-profiler.cc +++ b/test/cctest/test-cpu-profiler.cc @@ -50,7 +50,7 @@ static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc, i::Address frame3 = NULL) { i::TickSample* sample = proc->TickSampleEvent(); sample->pc = frame1; - sample->function = frame1; + sample->tos = frame1; sample->frames_count = 0; if (frame2 != NULL) { sample->stack[0] = frame2; @@ -103,7 +103,8 @@ TEST(CodeEvents) { i::Heap::empty_string(), 0, ToAddress(0x1000), - 0x100); + 0x100, + ToAddress(0x10000)); processor.CodeCreateEvent(i::Logger::BUILTIN_TAG, "bbb", ToAddress(0x1200), @@ -116,8 +117,6 @@ TEST(CodeEvents) { processor.CodeMoveEvent(ToAddress(0x1400), ToAddress(0x1500)); processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10); processor.CodeDeleteEvent(ToAddress(0x1600)); - processor.FunctionCreateEvent(ToAddress(0x1700), ToAddress(0x1000), - TokenEnumerator::kNoSecurityToken); // Enqueue a tick event to enable code events processing. EnqueueTickSampleEvent(&processor, ToAddress(0x1000)); @@ -139,9 +138,6 @@ TEST(CodeEvents) { CHECK_NE(NULL, entry4); CHECK_EQ("ddd", entry4->name()); CHECK_EQ(NULL, generator.code_map()->FindEntry(ToAddress(0x1600))); - CodeEntry* entry5 = generator.code_map()->FindEntry(ToAddress(0x1700)); - CHECK_NE(NULL, entry5); - CHECK_EQ(aaa_str, entry5->name()); } diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc index 441aae63..7245e54b 100644 --- a/test/cctest/test-debug.cc +++ b/test/cctest/test-debug.cc @@ -153,7 +153,8 @@ class DebugLocalContext { Handle<v8::internal::String> debug_string = v8::internal::Factory::LookupAsciiSymbol("debug"); SetProperty(global, debug_string, - Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM); + Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM, + ::v8::internal::kNonStrictMode); } private: v8::Persistent<v8::Context> context_; diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc index 88fa79b7..6ea4c849 100644 --- a/test/cctest/test-decls.cc +++ b/test/cctest/test-decls.cc @@ -223,7 +223,7 @@ TEST(Unknown) { { DeclarationContext context; context.Check("function x() { }; x", 1, // access - 1, // declaration + 0, 0, EXPECT_RESULT); } @@ -278,7 +278,7 @@ TEST(Present) { { PresentPropertyContext context; context.Check("function x() { }; x", 1, // access - 1, // declaration + 0, 0, EXPECT_RESULT); } @@ -332,7 +332,7 @@ TEST(Absent) { { AbsentPropertyContext context; context.Check("function x() { }; x", 1, // access - 1, // declaration + 0, 0, EXPECT_RESULT); } @@ -422,7 +422,7 @@ TEST(Appearing) { { AppearingPropertyContext context; context.Check("function x() { }; x", 1, // access - 1, // declaration + 0, 0, EXPECT_RESULT); } diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc index a23ee171..4cc7f8ba 100644 --- a/test/cctest/test-heap.cc +++ b/test/cctest/test-heap.cc @@ -212,13 +212,14 @@ TEST(GarbageCollection) { Handle<Map> initial_map = Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); function->set_initial_map(*initial_map); - Top::context()->global()->SetProperty(*name, - *function, - NONE)->ToObjectChecked(); + Top::context()->global()->SetProperty( + *name, *function, NONE, kNonStrictMode)->ToObjectChecked(); // Allocate an object. Unrooted after leaving the scope. Handle<JSObject> obj = Factory::NewJSObject(function); - obj->SetProperty(*prop_name, Smi::FromInt(23), NONE)->ToObjectChecked(); - obj->SetProperty(*prop_namex, Smi::FromInt(24), NONE)->ToObjectChecked(); + obj->SetProperty( + *prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked(); + obj->SetProperty( + *prop_namex, Smi::FromInt(24), NONE, kNonStrictMode)->ToObjectChecked(); CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name)); CHECK_EQ(Smi::FromInt(24), obj->GetProperty(*prop_namex)); @@ -238,10 +239,10 @@ TEST(GarbageCollection) { HandleScope inner_scope; // Allocate another object, make it reachable from global. Handle<JSObject> obj = Factory::NewJSObject(function); - Top::context()->global()->SetProperty(*obj_name, - *obj, - NONE)->ToObjectChecked(); - obj->SetProperty(*prop_name, Smi::FromInt(23), NONE)->ToObjectChecked(); + Top::context()->global()->SetProperty( + *obj_name, *obj, NONE, kNonStrictMode)->ToObjectChecked(); + obj->SetProperty( + *prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked(); } // After gc, it should survive. @@ -540,12 +541,12 @@ TEST(FunctionAllocation) { Handle<String> prop_name = Factory::LookupAsciiSymbol("theSlot"); Handle<JSObject> obj = Factory::NewJSObject(function); - obj->SetProperty(*prop_name, Smi::FromInt(23), NONE)->ToObjectChecked(); + obj->SetProperty( + *prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked(); CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name)); // Check that we can add properties to function objects. - function->SetProperty(*prop_name, - Smi::FromInt(24), - NONE)->ToObjectChecked(); + function->SetProperty( + *prop_name, Smi::FromInt(24), NONE, kNonStrictMode)->ToObjectChecked(); CHECK_EQ(Smi::FromInt(24), function->GetProperty(*prop_name)); } @@ -567,7 +568,8 @@ TEST(ObjectProperties) { CHECK(!obj->HasLocalProperty(*first)); // add first - obj->SetProperty(*first, Smi::FromInt(1), NONE)->ToObjectChecked(); + obj->SetProperty( + *first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked(); CHECK(obj->HasLocalProperty(*first)); // delete first @@ -575,8 +577,10 @@ TEST(ObjectProperties) { CHECK(!obj->HasLocalProperty(*first)); // add first and then second - obj->SetProperty(*first, Smi::FromInt(1), NONE)->ToObjectChecked(); - obj->SetProperty(*second, Smi::FromInt(2), NONE)->ToObjectChecked(); + obj->SetProperty( + *first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked(); + obj->SetProperty( + *second, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked(); CHECK(obj->HasLocalProperty(*first)); CHECK(obj->HasLocalProperty(*second)); @@ -588,8 +592,10 @@ TEST(ObjectProperties) { CHECK(!obj->HasLocalProperty(*second)); // add first and then second - obj->SetProperty(*first, Smi::FromInt(1), NONE)->ToObjectChecked(); - obj->SetProperty(*second, Smi::FromInt(2), NONE)->ToObjectChecked(); + obj->SetProperty( + *first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked(); + obj->SetProperty( + *second, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked(); CHECK(obj->HasLocalProperty(*first)); CHECK(obj->HasLocalProperty(*second)); @@ -603,14 +609,16 @@ TEST(ObjectProperties) { // check string and symbol match static const char* string1 = "fisk"; Handle<String> s1 = Factory::NewStringFromAscii(CStrVector(string1)); - obj->SetProperty(*s1, Smi::FromInt(1), NONE)->ToObjectChecked(); + obj->SetProperty( + *s1, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked(); Handle<String> s1_symbol = Factory::LookupAsciiSymbol(string1); CHECK(obj->HasLocalProperty(*s1_symbol)); // check symbol and string match static const char* string2 = "fugl"; Handle<String> s2_symbol = Factory::LookupAsciiSymbol(string2); - obj->SetProperty(*s2_symbol, Smi::FromInt(1), NONE)->ToObjectChecked(); + obj->SetProperty( + *s2_symbol, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked(); Handle<String> s2 = Factory::NewStringFromAscii(CStrVector(string2)); CHECK(obj->HasLocalProperty(*s2)); } @@ -631,7 +639,8 @@ TEST(JSObjectMaps) { Handle<JSObject> obj = Factory::NewJSObject(function); // Set a propery - obj->SetProperty(*prop_name, Smi::FromInt(23), NONE)->ToObjectChecked(); + obj->SetProperty( + *prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked(); CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name)); // Check the map has changed @@ -661,7 +670,7 @@ TEST(JSArray) { CHECK(array->HasFastElements()); // Must be in fast mode. // array[length] = name. - ok = array->SetElement(0, *name)->ToObjectChecked(); + ok = array->SetElement(0, *name, kNonStrictMode)->ToObjectChecked(); CHECK_EQ(Smi::FromInt(1), array->length()); CHECK_EQ(array->GetElement(0), *name); @@ -676,7 +685,7 @@ TEST(JSArray) { CHECK(array->HasDictionaryElements()); // Must be in slow mode. // array[length] = name. - ok = array->SetElement(int_length, *name)->ToObjectChecked(); + ok = array->SetElement(int_length, *name, kNonStrictMode)->ToObjectChecked(); uint32_t new_int_length = 0; CHECK(array->length()->ToArrayIndex(&new_int_length)); CHECK_EQ(static_cast<double>(int_length), new_int_length - 1); @@ -698,12 +707,14 @@ TEST(JSObjectCopy) { Handle<String> first = Factory::LookupAsciiSymbol("first"); Handle<String> second = Factory::LookupAsciiSymbol("second"); - obj->SetProperty(*first, Smi::FromInt(1), NONE)->ToObjectChecked(); - obj->SetProperty(*second, Smi::FromInt(2), NONE)->ToObjectChecked(); + obj->SetProperty( + *first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked(); + obj->SetProperty( + *second, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked(); - Object* ok = obj->SetElement(0, *first)->ToObjectChecked(); + Object* ok = obj->SetElement(0, *first, kNonStrictMode)->ToObjectChecked(); - ok = obj->SetElement(1, *second)->ToObjectChecked(); + ok = obj->SetElement(1, *second, kNonStrictMode)->ToObjectChecked(); // Make the clone. Handle<JSObject> clone = Copy(obj); @@ -716,11 +727,13 @@ TEST(JSObjectCopy) { CHECK_EQ(obj->GetProperty(*second), clone->GetProperty(*second)); // Flip the values. - clone->SetProperty(*first, Smi::FromInt(2), NONE)->ToObjectChecked(); - clone->SetProperty(*second, Smi::FromInt(1), NONE)->ToObjectChecked(); + clone->SetProperty( + *first, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked(); + clone->SetProperty( + *second, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked(); - ok = clone->SetElement(0, *second)->ToObjectChecked(); - ok = clone->SetElement(1, *first)->ToObjectChecked(); + ok = clone->SetElement(0, *second, kNonStrictMode)->ToObjectChecked(); + ok = clone->SetElement(1, *first, kNonStrictMode)->ToObjectChecked(); CHECK_EQ(obj->GetElement(1), clone->GetElement(0)); CHECK_EQ(obj->GetElement(0), clone->GetElement(1)); diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc index c85f6c0b..bf72184f 100644 --- a/test/cctest/test-log-stack-tracer.cc +++ b/test/cctest/test-log-stack-tracer.cc @@ -33,6 +33,7 @@ #include "v8.h" +#include "api.h" #include "codegen.h" #include "log.h" #include "top.h" @@ -200,16 +201,16 @@ static void InitializeVM() { } -static void CheckJSFunctionAtAddress(const char* func_name, Address addr) { - CHECK(i::Heap::Contains(addr)); - i::Object* obj = i::HeapObject::FromAddress(addr); - CHECK(obj->IsJSFunction()); - CHECK(JSFunction::cast(obj)->shared()->name()->IsString()); - i::SmartPointer<char> found_name = - i::String::cast( - JSFunction::cast( - obj)->shared()->name())->ToCString(); - CHECK_EQ(func_name, *found_name); +static bool IsAddressWithinFuncCode(JSFunction* function, Address addr) { + i::Code* code = function->code(); + return code->contains(addr); +} + +static bool IsAddressWithinFuncCode(const char* func_name, Address addr) { + v8::Local<v8::Value> func = env->Global()->Get(v8_str(func_name)); + CHECK(func->IsFunction()); + JSFunction* js_func = JSFunction::cast(*v8::Utils::OpenHandle(*func)); + return IsAddressWithinFuncCode(js_func, addr); } @@ -309,8 +310,8 @@ TEST(CFromJSStackTrace) { // Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace" CHECK_GT(sample.frames_count, base + 1); - CheckJSFunctionAtAddress("JSFuncDoTrace", sample.stack[base + 0]); - CheckJSFunctionAtAddress("JSTrace", sample.stack[base + 1]); + CHECK(IsAddressWithinFuncCode("JSFuncDoTrace", sample.stack[base + 0])); + CHECK(IsAddressWithinFuncCode("JSTrace", sample.stack[base + 1])); } @@ -351,9 +352,6 @@ TEST(PureJSStackTrace) { // DoTraceHideCEntryFPAddress(EBP) [native] // StackTracer::Trace // - // The last JS function called. It is only visible through - // sample.function, as its return address is above captured EBP value. - CheckJSFunctionAtAddress("JSFuncDoTrace", sample.function); // The VM state tracking keeps track of external callbacks and puts // them at the top of the sample stack. @@ -363,8 +361,8 @@ TEST(PureJSStackTrace) { // Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace" CHECK_GT(sample.frames_count, base + 1); - CheckJSFunctionAtAddress("JSTrace", sample.stack[base + 0]); - CheckJSFunctionAtAddress("OuterJSTrace", sample.stack[base + 1]); + CHECK(IsAddressWithinFuncCode("JSTrace", sample.stack[base + 0])); + CHECK(IsAddressWithinFuncCode("OuterJSTrace", sample.stack[base + 1])); } diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc index 032a1836..30b8a48d 100644 --- a/test/cctest/test-log.cc +++ b/test/cctest/test-log.cc @@ -1053,10 +1053,10 @@ static bool AreFuncNamesEqual(CodeEntityInfo ref_s, CodeEntityInfo new_s) { // Skip size. ref_s = strchr(ref_s, ',') + 1; new_s = strchr(new_s, ',') + 1; - int ref_len = StrChrLen(ref_s, '\n'); - int new_len = StrChrLen(new_s, '\n'); - // If reference is anonymous (""), it's OK to have anything in new. - if (ref_len == 2) return true; + CHECK_EQ('"', ref_s[0]); + CHECK_EQ('"', new_s[0]); + int ref_len = StrChrLen(ref_s + 1, '\"'); + int new_len = StrChrLen(new_s + 1, '\"'); // A special case for ErrorPrototype. Haven't yet figured out why they // are different. const char* error_prototype = "\"ErrorPrototype"; @@ -1074,21 +1074,6 @@ static bool AreFuncNamesEqual(CodeEntityInfo ref_s, CodeEntityInfo new_s) { return true; } } - // Code objects can change their optimizability: code object may start - // as optimizable, but later be discovered to be actually not optimizable. - // Alas, we don't record this info as of now, so we allow cases when - // ref is thought to be optimizable while traverse finds it to be - // not optimizable. - if (ref_s[1] == '~') { // Code object used to be optimizable - if (new_s[1] == ' ') { // ...but later was set unoptimizable. - CHECK_EQ('"', ref_s[0]); - CHECK_EQ('"', new_s[0]); - ref_s += 2; // Cut the leading quote and the marker - ref_len -= 2; - new_s += 1; // Cut the leading quote only. - new_len -= 1; - } - } return ref_len == new_len && strncmp(ref_s, new_s, ref_len) == 0; } diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc index 86f105f2..3e3175e7 100644 --- a/test/cctest/test-mark-compact.cc +++ b/test/cctest/test-mark-compact.cc @@ -189,7 +189,8 @@ TEST(MarkCompactCollector) { function->set_initial_map(initial_map); Top::context()->global()->SetProperty(func_name, function, - NONE)->ToObjectChecked(); + NONE, + kNonStrictMode)->ToObjectChecked(); JSObject* obj = JSObject::cast(Heap::AllocateJSObject(function)->ToObjectChecked()); @@ -208,10 +209,14 @@ TEST(MarkCompactCollector) { String::cast(Heap::LookupAsciiSymbol("theObject")->ToObjectChecked()); Top::context()->global()->SetProperty(obj_name, obj, - NONE)->ToObjectChecked(); + NONE, + kNonStrictMode)->ToObjectChecked(); String* prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot")->ToObjectChecked()); - obj->SetProperty(prop_name, Smi::FromInt(23), NONE)->ToObjectChecked(); + obj->SetProperty(prop_name, + Smi::FromInt(23), + NONE, + kNonStrictMode)->ToObjectChecked(); Heap::CollectGarbage(OLD_POINTER_SPACE); diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc index 151cf50a..8ee40385 100755 --- a/test/cctest/test-parsing.cc +++ b/test/cctest/test-parsing.cc @@ -321,14 +321,17 @@ TEST(Regress928) { data->Initialize(); - int first_function = strstr(program, "function") - program; - int first_lbrace = first_function + strlen("function () "); + int first_function = + static_cast<int>(strstr(program, "function") - program); + int first_lbrace = first_function + static_cast<int>(strlen("function () ")); CHECK_EQ('{', program[first_lbrace]); i::FunctionEntry entry1 = data->GetFunctionEntry(first_lbrace); CHECK(!entry1.is_valid()); - int second_function = strstr(program + first_lbrace, "function") - program; - int second_lbrace = second_function + strlen("function () "); + int second_function = + static_cast<int>(strstr(program + first_lbrace, "function") - program); + int second_lbrace = + second_function + static_cast<int>(strlen("function () ")); CHECK_EQ('{', program[second_lbrace]); i::FunctionEntry entry2 = data->GetFunctionEntry(second_lbrace); CHECK(entry2.is_valid()); diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc index f849d404..c60d0720 100644 --- a/test/cctest/test-profile-generator.cc +++ b/test/cctest/test-profile-generator.cc @@ -600,13 +600,13 @@ TEST(RecordTickSample) { // -> ccc -> aaa - sample3 TickSample sample1; sample1.pc = ToAddress(0x1600); - sample1.function = ToAddress(0x1500); + sample1.tos = ToAddress(0x1500); sample1.stack[0] = ToAddress(0x1510); sample1.frames_count = 1; generator.RecordTickSample(sample1); TickSample sample2; sample2.pc = ToAddress(0x1925); - sample2.function = ToAddress(0x1900); + sample2.tos = ToAddress(0x1900); sample2.stack[0] = ToAddress(0x1780); sample2.stack[1] = ToAddress(0x10000); // non-existent. sample2.stack[2] = ToAddress(0x1620); @@ -614,7 +614,7 @@ TEST(RecordTickSample) { generator.RecordTickSample(sample2); TickSample sample3; sample3.pc = ToAddress(0x1510); - sample3.function = ToAddress(0x1500); + sample3.tos = ToAddress(0x1500); sample3.stack[0] = ToAddress(0x1910); sample3.stack[1] = ToAddress(0x1610); sample3.frames_count = 2; diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc index 1cbaf2bf..80910c25 100644 --- a/test/cctest/test-serialize.cc +++ b/test/cctest/test-serialize.cc @@ -104,7 +104,7 @@ TEST(ExternalReferenceEncoder) { ExternalReferenceEncoder encoder; CHECK_EQ(make_code(BUILTIN, Builtins::ArrayCode), Encode(encoder, Builtins::ArrayCode)); - CHECK_EQ(make_code(RUNTIME_FUNCTION, Runtime::kAbort), + CHECK_EQ(make_code(v8::internal::RUNTIME_FUNCTION, Runtime::kAbort), Encode(encoder, Runtime::kAbort)); CHECK_EQ(make_code(IC_UTILITY, IC::kLoadCallbackProperty), Encode(encoder, IC_Utility(IC::kLoadCallbackProperty))); @@ -142,7 +142,8 @@ TEST(ExternalReferenceDecoder) { CHECK_EQ(AddressOf(Builtins::ArrayCode), decoder.Decode(make_code(BUILTIN, Builtins::ArrayCode))); CHECK_EQ(AddressOf(Runtime::kAbort), - decoder.Decode(make_code(RUNTIME_FUNCTION, Runtime::kAbort))); + decoder.Decode(make_code(v8::internal::RUNTIME_FUNCTION, + Runtime::kAbort))); CHECK_EQ(AddressOf(IC_Utility(IC::kLoadCallbackProperty)), decoder.Decode(make_code(IC_UTILITY, IC::kLoadCallbackProperty))); ExternalReference keyed_load_function = diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status index 39754ca5..d6f7caf5 100644 --- a/test/es5conform/es5conform.status +++ b/test/es5conform/es5conform.status @@ -239,15 +239,6 @@ chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-2: FAIL_OK # Incorrect test - need double escape in eval. chapter07/7.8/7.8.4/7.8.4-1-s: FAIL -# this is not coerced to an object in strict mode (Number) -chapter10/10.4/10.4.3/10.4.3-1-1-s: FAIL -# this is not coerced to an object in strict mode (string) -chapter10/10.4/10.4.3/10.4.3-1-2-s: FAIL -# this is not coerced to an object in strict mode (undefined) -chapter10/10.4/10.4.3/10.4.3-1-3-s: FAIL -# this is not coerced to an object in strict mode (boolean) -chapter10/10.4/10.4.3/10.4.3-1-4-s: FAIL - # arguments[i] remains same after changing actual parameters in strict mode chapter10/10.6/10.6-10-c-ii-1-s: FAIL # arguments[i] doesn't map to actual parameters in strict mode @@ -278,97 +269,30 @@ chapter11/11.13/11.13.1/11.13.1-4-3-s: FAIL # in strict mode (Global.length) chapter11/11.13/11.13.1/11.13.1-4-4-s: FAIL # simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Object.length) -chapter11/11.13/11.13.1/11.13.1-4-5-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Function.length) -chapter11/11.13/11.13.1/11.13.1-4-6-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Array.length) -chapter11/11.13/11.13.1/11.13.1-4-7-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (String.length) -chapter11/11.13/11.13.1/11.13.1-4-8-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Boolean.length) -chapter11/11.13/11.13.1/11.13.1-4-9-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Number.length) -chapter11/11.13/11.13.1/11.13.1-4-10-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Date.length) -chapter11/11.13/11.13.1/11.13.1-4-11-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (RegExp.length) -chapter11/11.13/11.13.1/11.13.1-4-12-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Error.length) -chapter11/11.13/11.13.1/11.13.1-4-13-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Number.MAX_VALUE) -chapter11/11.13/11.13.1/11.13.1-4-14-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Number.MIN_VALUE) -chapter11/11.13/11.13.1/11.13.1-4-15-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Number.NaN) -chapter11/11.13/11.13.1/11.13.1-4-16-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Number.NEGATIVE_INFINITY) -chapter11/11.13/11.13.1/11.13.1-4-17-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Number.POSITIVE_INFINITY) -chapter11/11.13/11.13.1/11.13.1-4-18-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Math.E) -chapter11/11.13/11.13.1/11.13.1-4-19-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Math.LN10) -chapter11/11.13/11.13.1/11.13.1-4-20-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Math.LN2) -chapter11/11.13/11.13.1/11.13.1-4-21-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Math.LOG2E) -chapter11/11.13/11.13.1/11.13.1-4-22-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Math.LOG10E) -chapter11/11.13/11.13.1/11.13.1-4-23-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Math.PI) -chapter11/11.13/11.13.1/11.13.1-4-24-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Math.SQRT1_2) -chapter11/11.13/11.13.1/11.13.1-4-25-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property -# in strict mode (Math.SQRT2) -chapter11/11.13/11.13.1/11.13.1-4-26-s: FAIL -# simple assignment throws TypeError if LeftHandSide is a readonly property # in strict mode (Global.undefined) chapter11/11.13/11.13.1/11.13.1-4-27-s: FAIL -# delete operator throws TypeError when deleting a non-configurable data -# property in strict mode -chapter11/11.4/11.4.1/11.4.1-4.a-3-s: FAIL # delete operator throws TypeError when when deleting a non-configurable # data property in strict mode (Global.NaN) -chapter11/11.4/11.4.1/11.4.1-4.a-4-s: FAIL -# delete operator throws TypeError when deleting a non-configurable data -# property in strict mode (Math.LN2) -chapter11/11.4/11.4.1/11.4.1-4.a-9-s: FAIL +# Invalid test case - "this" is not a global object within the test case. +# (http://es5conform.codeplex.com/workitem/29151) +chapter11/11.4/11.4.1/11.4.1-4.a-4-s: FAIL_OK # delete operator throws ReferenceError when deleting a direct reference # to a var in strict mode +# Invalid test case. Test expects ReferenceError instead of SyntaxError. +# http://es5conform.codeplex.com/workitem/29084 chapter11/11.4/11.4.1/11.4.1-5-1-s: FAIL # delete operator throws ReferenceError when deleting a direct reference # to a function argument in strict mode +# Invalid test case. Test expects ReferenceError instead of SyntaxError. +# http://es5conform.codeplex.com/workitem/29084 chapter11/11.4/11.4.1/11.4.1-5-2-s: FAIL # delete operator throws ReferenceError when deleting a direct reference # to a function name in strict mode +# Invalid test case. Test expects ReferenceError instead of SyntaxError. +# http://es5conform.codeplex.com/workitem/29084 chapter11/11.4/11.4.1/11.4.1-5-3-s: FAIL -# delete operator throws SyntaxError when deleting a direct reference -# to a function argument(object) in strict mode -chapter11/11.4/11.4.1/11.4.1-5-4-s: FAIL # eval - a function declaring a var named 'eval' throws EvalError in strict mode # Invalid test case. SyntaxError should be expected instead of EvalError. @@ -437,53 +361,13 @@ chapter13/13.1/13.1-3-11-s: FAIL # Test fails to return true on success (invalid test case). chapter13/13.1/13.1-3-12-s: FAIL -# 'use strict' directive - correct usage -# depends on "this is not coerced to an object in strict mode (undefined)" -chapter14/14.1/14.1-1-s: FAIL -# "use strict" directive - correct usage double quotes -# depends on "this is not coerced to an object in strict mode (undefined)" -chapter14/14.1/14.1-2-s: FAIL -# 'use strict' directive - may follow other directives -# depends on "this is not coerced to an object in strict mode (undefined)" -chapter14/14.1/14.1-8-s: FAIL -# 'use strict' directive - may occur multiple times -# depends on "this is not coerced to an object in strict mode (undefined)" -chapter14/14.1/14.1-9-s: FAIL -# other directives - may follow 'use strict' directive -# depends on "this is not coerced to an object in strict mode (undefined)" -chapter14/14.1/14.1-10-s: FAIL -# comments may preceed 'use strict' directive -# depends on "this is not coerced to an object in strict mode (undefined)" -chapter14/14.1/14.1-11-s: FAIL -# comments may follow 'use strict' directive -# depends on "this is not coerced to an object in strict mode (undefined)" -chapter14/14.1/14.1-12-s: FAIL -# semicolon insertion works for'use strict' directive -# depends on "this is not coerced to an object in strict mode (undefined)" -chapter14/14.1/14.1-13-s: FAIL -# semicolon insertion may come before 'use strict' directive -# depends on "this is not coerced to an object in strict mode (undefined)" -chapter14/14.1/14.1-14-s: FAIL -# blank lines may come before 'use strict' directive -# depends on "this is not coerced to an object in strict mode (undefined)" -chapter14/14.1/14.1-15-s: FAIL - # Duplicate combined parameter name allowed in Function constructor called # in strict mode if body not strict # Test fails to return true on success (invalid test case). chapter15/15.3/15.3.2/15.3.2.1/15.3.2.1-11-6-s: FAIL -# Array.prototype.every - thisArg not passed to strict callbackfn -chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-5-1-s: FAIL -# Array.prototype.some - thisArg not passed to strict callbackfn -chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-5-1-s: FAIL -# Array.prototype.forEach - thisArg not passed to strict callbackfn -chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-5-1-s: FAIL -# Array.prototype.map - thisArg not passed to strict callbackfn -chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-5-1-s: FAIL -# Array.prototype.filter - thisArg not passed to strict callbackfn -chapter15/15.4/15.4.4/15.4.4.20/15.4.4.20-5-1-s: FAIL # Array.prototype.reduce - null passed as thisValue to strict callbackfn +# Invalid test case: http://es5conform.codeplex.com/workitem/29085 chapter15/15.4/15.4.4/15.4.4.21/15.4.4.21-9-c-ii-4-s: FAIL [ $arch == mips ] diff --git a/test/mjsunit/array-concat.js b/test/mjsunit/array-concat.js index db89f4d0..97bd85ac 100644 --- a/test/mjsunit/array-concat.js +++ b/test/mjsunit/array-concat.js @@ -101,7 +101,6 @@ while (pos = poses.shift()) { assertEquals("undefined", typeof(c[-1])); assertEquals("undefined", typeof(c[0xffffffff])); assertEquals(c.length, a.length + 1); - } poses = [140, 4000000000]; @@ -193,3 +192,46 @@ for (var i = 0; i < holey.length; i++) { assertTrue(i in holey); } } + +// Polluted prototype from prior tests. +delete Array.prototype[123]; + +// Check that concat reads getters in the correct order. +var arr1 = [,2]; +var arr2 = [1,3]; +var r1 = [].concat(arr1, arr2); // [,2,1,3] +assertEquals([,2,1,3], r1); + +// Make first array change length of second array. +Object.defineProperty(arr1, 0, {get: function() { + arr2.push("X"); + return undefined; + }, configurable: true}) +var r2 = [].concat(arr1, arr2); // [undefined,2,1,3,"X"] +assertEquals([undefined,2,1,3,"X"], r2); + +// Make first array change length of second array massively. +arr2.length = 2; +Object.defineProperty(arr1, 0, {get: function() { + arr2[500000] = "X"; + return undefined; + }, configurable: true}) +var r3 = [].concat(arr1, arr2); // [undefined,2,1,3,"X"] +var expected = [undefined,2,1,3]; +expected[500000 + 2] = "X"; + +assertEquals(expected, r3); + +var arr3 = []; +var trace = []; +var expectedTrace = [] +function mkGetter(i) { return function() { trace.push(i); }; } +arr3.length = 10000; +for (var i = 0; i < 100; i++) { + Object.defineProperty(arr3, i * i, {get: mkGetter(i)}); + expectedTrace[i] = i; + expectedTrace[100 + i] = i; +} +var r4 = [0].concat(arr3, arr3); +assertEquals(1 + arr3.length * 2, r4.length); +assertEquals(expectedTrace, trace); diff --git a/test/mjsunit/array-join.js b/test/mjsunit/array-join.js index c66e4626..ddd14967 100644 --- a/test/mjsunit/array-join.js +++ b/test/mjsunit/array-join.js @@ -27,19 +27,44 @@ // Test that array join calls toString on subarrays. var a = [[1,2],3,4,[5,6]]; +assertEquals('1,2345,6', a.join('')); assertEquals('1,2*3*4*5,6', a.join('*')); +assertEquals('1,2**3**4**5,6', a.join('**')); +assertEquals('1,2****3****4****5,6', a.join('****')); +assertEquals('1,2********3********4********5,6', a.join('********')); +assertEquals('1,2**********3**********4**********5,6', a.join('**********')); // Create a cycle. a.push(a); +assertEquals('1,2345,6', a.join('')); assertEquals('1,2*3*4*5,6*', a.join('*')); +assertEquals('1,2**3**4**5,6**', a.join('**')); +assertEquals('1,2****3****4****5,6****', a.join('****')); +assertEquals('1,2********3********4********5,6********', a.join('********')); +assertEquals('1,2**********3**********4**********5,6**********', a.join('**********')); // Replace array.prototype.toString. Array.prototype.toString = function() { return "array"; } +assertEquals('array34arrayarray', a.join('')); assertEquals('array*3*4*array*array', a.join('*')); +assertEquals('array**3**4**array**array', a.join('**')); +assertEquals('array****3****4****array****array', a.join('****')); +assertEquals('array********3********4********array********array', a.join('********')); +assertEquals('array**********3**********4**********array**********array', a.join('**********')); Array.prototype.toString = function() { throw 42; } +assertThrows("a.join('')"); assertThrows("a.join('*')"); +assertThrows("a.join('**')"); +assertThrows("a.join('****')"); +assertThrows("a.join('********')"); +assertThrows("a.join('**********')"); Array.prototype.toString = function() { return "array"; } +assertEquals('array34arrayarray', a.join('')); assertEquals('array*3*4*array*array', a.join('*')); +assertEquals('array**3**4**array**array', a.join('**')); +assertEquals('array****3****4****array****array', a.join('****')); +assertEquals('array********3********4********array********array', a.join('********')); +assertEquals('array**********3**********4**********array**********array', a.join('**********')); diff --git a/test/mjsunit/compiler/regress-valueof.js b/test/mjsunit/compiler/regress-valueof.js new file mode 100644 index 00000000..7b29b46a --- /dev/null +++ b/test/mjsunit/compiler/regress-valueof.js @@ -0,0 +1,35 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax + +// Test valueof with integer input. +function f(x) { var y = x + 1; return %_ValueOf(y); } + +for (var i=0; i<100000; i++) f(42); + +assertEquals(43, f(42)); diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js index 020e3c0c..cefef0a4 100644 --- a/test/mjsunit/fuzz-natives.js +++ b/test/mjsunit/fuzz-natives.js @@ -118,8 +118,9 @@ var knownProblems = { "Abort": true, // Avoid calling the concat operation, because weird lengths - // may lead to out-of-memory. + // may lead to out-of-memory. Ditto for StringBuilderJoin. "StringBuilderConcat": true, + "StringBuilderJoin": true, // These functions use pseudo-stack-pointers and are not robust // to unexpected integer values. diff --git a/test/mjsunit/indexed-value-properties.js b/test/mjsunit/indexed-value-properties.js new file mode 100644 index 00000000..92bb896e --- /dev/null +++ b/test/mjsunit/indexed-value-properties.js @@ -0,0 +1,56 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test that the Number, String and Boolean prototypes are searched +// for indexed properties on value objects. + +function return_one() { return 1; }; + +function test(value) { + for (var i = 0; i < 10; i++) { + assertEquals(0, (value)[0]); + assertEquals(0, (value)["0"]); + assertEquals(return_one, (value)[1]); + assertEquals(return_one, (value)["1"]); + assertEquals(1, (value)[1]()); + assertEquals(1, (value)["1"]()); + } +} + +Number.prototype[0] = 0; +Number.prototype[1] = return_one; +test(0); +test(0.1); + +String.prototype[0] = 0; +String.prototype[1] = return_one; +test(""); + +Boolean.prototype[0] = 0; +Boolean.prototype[1] = return_one; +test(true); +test(false); diff --git a/test/mjsunit/mjsunit.js b/test/mjsunit/mjsunit.js index 558282f5..fe580f35 100644 --- a/test/mjsunit/mjsunit.js +++ b/test/mjsunit/mjsunit.js @@ -104,6 +104,13 @@ function deepEquals(a, b) { } +function assertSame(expected, found, name_opt) { + if (found !== expected) { + fail(expected, found, name_opt); + } +} + + function assertEquals(expected, found, name_opt) { if (!deepEquals(found, expected)) { fail(expected, found, name_opt); diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index 40127994..8f042ced 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -111,24 +111,6 @@ regress/regress-3247124: SKIP regress/regress-1132: SKIP ############################################################################## -[ $arch == arm && $crankshaft ] - -# Test that currently fails with crankshaft on ARM. -compiler/simple-osr: FAIL - -# BUG (1094) -regress/regress-deopt-gc: SKIP - -############################################################################## -[ $arch == x64 && $crankshaft ] - -# BUG (1026) This test is currently flaky. -compiler/simple-osr: SKIP - -# BUG (1094) -regress/regress-deopt-gc: SKIP - -############################################################################## [ $arch == mips ] # Skip all tests on MIPS. diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/override-eval-with-non-function.js new file mode 100644 index 00000000..aa93b253 --- /dev/null +++ b/test/mjsunit/override-eval-with-non-function.js @@ -0,0 +1,36 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// When 'eval' is overridden with a non-function object we should +// check whether the object is callable. + +function test() { + eval = /foo/; + assertEquals("foo", eval("foobar")); +} + +test(); diff --git a/test/mjsunit/regexp.js b/test/mjsunit/regexp.js index 8d776ad5..24e1b21e 100644 --- a/test/mjsunit/regexp.js +++ b/test/mjsunit/regexp.js @@ -676,3 +676,17 @@ assertEquals(["bc"], re.exec("zimzomzumbc")); assertFalse(re.test("c")); assertFalse(re.test("")); +// Valid syntax in ES5. +re = RegExp("(?:x)*"); +re = RegExp("(x)*"); + +// Syntax extension relative to ES5, for matching JSC (and ES3). +// Shouldn't throw. +re = RegExp("(?=x)*"); +re = RegExp("(?!x)*"); + +// Should throw. Shouldn't hit asserts in debug mode. +assertThrows("RegExp('(*)')"); +assertThrows("RegExp('(?:*)')"); +assertThrows("RegExp('(?=*)')"); +assertThrows("RegExp('(?!*)')"); diff --git a/test/mjsunit/regress/regress-1145.js b/test/mjsunit/regress/regress-1145.js new file mode 100644 index 00000000..16d5527b --- /dev/null +++ b/test/mjsunit/regress/regress-1145.js @@ -0,0 +1,54 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --opt-eagerly --debug-code --lazy + +// See: http://code.google.com/p/v8/issues/detail?id=1145 +// Should not throw a syntax error exception (change this if we make lazily +// compiled functions with syntax errors into early errors). +// Should not hit an assertion in debug mode. + +// A lazily compiled function with a syntax error that is attempted inlined +// would set a pending exception that is then ignored (until it triggers +// an assert). +// This file must be at least 1024 bytes long to trigger lazy compilation. + +function f() { return 1; } + +// Must be lazy. Must throw SyntaxError during compilation. +function fail() { continue; } + +function opt_me() { + var x = 1; + // Do lots of function calls and hope to be optimized. + for (var i = 0; i < 1000000; i++) { + x = f(); + } + if (x == 0) fail(); // Hope to be inlined during optimization. +} + +opt_me(); diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/regress/regress-1146.js new file mode 100644 index 00000000..e8028ce1 --- /dev/null +++ b/test/mjsunit/regress/regress-1146.js @@ -0,0 +1,48 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test keyed calls with different key types. +function F() {} +var a = new F(); +function f(i) { return a[i](); } + +a.first = function() { return 11; } +a[0] = function() { return 22; } +var obj = {}; +a[obj] = function() { return 33; } + +// Make object slow-case. +a.foo = 0; +delete a.foo; +// Do multiple calls for IC transitions. +var b = "first"; +f(b); +f(b); + +assertEquals(11, f(b)); +assertEquals(22, f(0)); +assertEquals(33, f(obj)); diff --git a/test/mjsunit/regress/regress-1149.js b/test/mjsunit/regress/regress-1149.js new file mode 100644 index 00000000..d7a7d1b9 --- /dev/null +++ b/test/mjsunit/regress/regress-1149.js @@ -0,0 +1,39 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// We should not try to record duplicate bailout IDs for the 'left-hand +// side' of a for/in, even if it is a parameter in a function using the +// arguments object. + +function f(x) { + for (x in arguments) { + for (x in arguments) { + } + } +} + +f(); diff --git a/test/mjsunit/regress/regress-1151.js b/test/mjsunit/regress/regress-1151.js new file mode 100644 index 00000000..8d0bca90 --- /dev/null +++ b/test/mjsunit/regress/regress-1151.js @@ -0,0 +1,49 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test that we do not try to create prototypes on objects that has the, +// should_have_prototype flag set to false. + +__defineSetter__.__proto__ = function() {}; +__defineSetter__['prototype'] + +eval.__proto__ = function () { }; +eval['prototype'] = {}; + +// Test that we are compatible with Safari on prototypes set locally and +// on the actual prototype set using __proto__ on objects that has the +// should_have_prototype set to false. +function f() { return 42; } +f.prototype = 43; +__defineGetter__.__proto__ = f; + +// Regression test for not returning undefined. +assertEquals(__defineGetter__.prototype, 43); + +// Regression test for not crashing. +__defineGetter__.prototype = "foo"; +assertEquals(__defineGetter__.prototype, "foo"); diff --git a/test/mjsunit/regress/regress-1156.js b/test/mjsunit/regress/regress-1156.js new file mode 100644 index 00000000..8ec7f817 --- /dev/null +++ b/test/mjsunit/regress/regress-1156.js @@ -0,0 +1,49 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax --nouse-inlining + +// Test that we do not crash we invoke builtins from optimized code that +// is then deoptimized. + +function foo(a) { + delete a[1]; + delete a[2]; + delete a[3]; + delete a[4]; + delete a[5]; + return void 0; +} + +function call_and_deopt() { + var b = [1,2,3]; + foo(b); + foo(b); + %DeoptimizeFunction(foo); +} + +call_and_deopt(); diff --git a/test/mjsunit/regress/regress-1160.js b/test/mjsunit/regress/regress-1160.js new file mode 100644 index 00000000..8e6e29bd --- /dev/null +++ b/test/mjsunit/regress/regress-1160.js @@ -0,0 +1,46 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See: http://code.google.com/p/v8/issues/detail?id=1160 + +// Array.prototype.join uses a temporary array internally. Verify it +// does not crash and throws an illegal argument exception instead +// when keyed store on the array does not work as expected because of +// the setter on its prototype. + +try { + var N = 100; + var array = Array(N); + for (var i = 0; i < N; ++i) { + array[i] = i; + } + Array.prototype.__defineSetter__(32, function() { }); + // The next line throws. We should make it work even with changed + // prototype. See http://code.google.com/p/v8/issues/detail?id=1161 + array.join(","); + assertUnreachable(); +} catch (e) { } diff --git a/test/mjsunit/regress/regress-1166.js b/test/mjsunit/regress/regress-1166.js new file mode 100644 index 00000000..d75d397e --- /dev/null +++ b/test/mjsunit/regress/regress-1166.js @@ -0,0 +1,35 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Deoptimization after a short-circuit logical operation in an effect +// context should not see the value of the expression. +function observe(x, y) { return x; } + +function test(x) { return observe(1, ((false || false), x + 1)); } + +for (var i = 0; i < 10000000; ++i) test(0); +test("a"); diff --git a/test/mjsunit/regress/regress-1167.js b/test/mjsunit/regress/regress-1167.js new file mode 100644 index 00000000..8437d83b --- /dev/null +++ b/test/mjsunit/regress/regress-1167.js @@ -0,0 +1,72 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Deoptimization after a logical not in an effect context should not see a +// value for the logical not expression. +function test0(n) { + var a = new Array(n); + for (var i = 0; i < n; ++i) { + // ~ of a non-numeric value is used to trigger deoptimization. + a[i] = void(!(delete 'object')) % ~(delete 4); + } +} + +// OSR (after deoptimization) is used to observe the stack height mismatch. +for (var i = 0; i < 5; ++i) { + for (var j = 1; j < 12; ++j) { + test0(j * 1000); + } +} + + +// Similar test with a different subexpression of unary !. +function test1(n) { + var a = new Array(n); + for (var i = 0; i < n; ++i) { + a[i] = void(!(- 'object')) % ~(delete 4); + } +} + +for (i = 0; i < 5; ++i) { + for (j = 1; j < 12; ++j) { + test1(j * 1000); + } +} + + +// A similar issue, different subexpression of unary ! (e0 !== e1 is +// translated into !(e0 == e1)) and different effect context. +function side_effect() { } +function observe(x, y) { return x; } +function test2(x) { + return observe(this, + (((side_effect.observe <= side_effect.side_effect) !== false), + x + 1)); +} + +for (var i = 0; i < 1000000; ++i) test2(0); +test2(test2); diff --git a/test/mjsunit/regress/regress-1170.js b/test/mjsunit/regress/regress-1170.js new file mode 100644 index 00000000..8a5a9cfb --- /dev/null +++ b/test/mjsunit/regress/regress-1170.js @@ -0,0 +1,66 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +var setter_value = 0; + +__proto__.__defineSetter__("a", function(v) { setter_value = v; }); +eval("var a = 1"); +assertEquals(1, setter_value); +assertFalse(hasOwnProperty("a")); + +eval("with({}) { eval('var a = 2') }"); +assertEquals(2, setter_value); +assertFalse(hasOwnProperty("a")); + +// Function declarations are treated specially to match Safari. We do +// not call setters for them. +eval("function a() {}"); +assertTrue(hasOwnProperty("a")); + +__proto__.__defineSetter__("b", function(v) { assertUnreachable(); }); +try { + eval("const b = 23"); + assertUnreachable(); +} catch(e) { + assertTrue(/TypeError/.test(e)); +} +try { + eval("with({}) { eval('const b = 23') }"); + assertUnreachable(); +} catch(e) { + assertTrue(/TypeError/.test(e)); +} + +__proto__.__defineSetter__("c", function(v) { throw 42; }); +try { + eval("var c = 1"); + assertUnreachable(); +} catch(e) { + assertEquals(42, e); + assertFalse(hasOwnProperty("c")); +} + diff --git a/test/mjsunit/regress/regress-1105.js b/test/mjsunit/regress/regress-1172-bis.js index cfe2bd38..e8d5c812 100644 --- a/test/mjsunit/regress/regress-1105.js +++ b/test/mjsunit/regress/regress-1172-bis.js @@ -25,14 +25,13 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// This should properly catch the exception from the setter triggered -// by the loaded file, and it should not fail an assertion in debug mode. - -__defineSetter__("x", function(){ throw 42; }); +// Verifies that exception thrown from JS accessors when attempting a call +// are properly treated. +Object.prototype.__defineGetter__(0, function() { throw 42; }); try { - this.eval('function x(){}'); - assertUnreachable(); -} catch (e) { - assertEquals(42, e); + Object[0](); + assertUnreachable(); +} catch(e) { + assertEquals(42, e); } diff --git a/test/mjsunit/regress/regress-1172.js b/test/mjsunit/regress/regress-1172.js new file mode 100644 index 00000000..f5ef67b8 --- /dev/null +++ b/test/mjsunit/regress/regress-1172.js @@ -0,0 +1,39 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Check that 'constructor' property is forcefully installed on +// function's prototype even in the presence of JS accessors. + +// Note: no setters would lead to runtime exception if we ever attempt +// to use JS accessors to set 'constructor' property. +Object.prototype.__defineGetter__('constructor', function() { throw 42; }); + +function f() {} +assertSame(f, f.prototype.constructor); + +var o = new f(); +assertSame(f, o.constructor); diff --git a/test/mjsunit/regress/regress-1174.js b/test/mjsunit/regress/regress-1174.js new file mode 100644 index 00000000..7c014bf0 --- /dev/null +++ b/test/mjsunit/regress/regress-1174.js @@ -0,0 +1,43 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax + +// Test that we do not crash when doing deoptimization of a function that has +// reloc info that only take up 1 byte per call (like KeyedStoreIC). + +function Regular() { + this[0] >>= 0; + this[1] ^= 1; +} + +function foo() { + var regular = new Regular(); + %DeoptimizeFunction(Regular); +} + +foo(); diff --git a/test/mjsunit/regress/regress-1176.js b/test/mjsunit/regress/regress-1176.js new file mode 100644 index 00000000..58eda1bf --- /dev/null +++ b/test/mjsunit/regress/regress-1176.js @@ -0,0 +1,33 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"use strict"; +function strict_delete_this() { + // "delete this" is allowed in strict mode. + delete this; +} +strict_delete_this(); diff --git a/test/mjsunit/regress/regress-1181.js b/test/mjsunit/regress/regress-1181.js new file mode 100644 index 00000000..d45a0bee --- /dev/null +++ b/test/mjsunit/regress/regress-1181.js @@ -0,0 +1,54 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The first count times, test is called with an integer argument and +// crankshaft produces code for int32 representation. Test that the +// implementation correctly deoptimizes. + +// Flags: --allow-natives-syntax + +function test(x) { + var xp = x * 1 - 1; + return xp; +} + + +function check(count) { + %DeoptimizeFunction(test); + var i; + for(var x=0; x < count; x++){ + for(var y=0; y < count; y++){ + i = test(x / 100); + } + } + assertEquals((count - 1) / 100, i + 1); +} + + +check(150); +check(200); +check(350);
\ No newline at end of file diff --git a/test/mjsunit/regress/regress-1184.js b/test/mjsunit/regress/regress-1184.js new file mode 100644 index 00000000..0bb1b3c0 --- /dev/null +++ b/test/mjsunit/regress/regress-1184.js @@ -0,0 +1,47 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test the case when finally clause throws another exception (stack overflow) +// which goes through some try/catch block---we need to clear v8::TryCatch +// catcher as it doesn't catch original exception any more. + +o = {}; +o.__defineGetter__('foo', function() { throw 42; }); +function f() { + try { + // throw below sets up Top::thread_local_.catcher_... + throw 42; + } finally { + // ...JS accessor traverses v8 runtime/JS boundary and + // when coming back from JS to v8 runtime, retraverses + // stack with catcher set while processing exception + // which is not caught by external try catch. + try { o.foo; } catch(e) { }; + return; + } +}; +f(); diff --git a/test/mjsunit/regress/regress-1207.js b/test/mjsunit/regress/regress-1207.js new file mode 100644 index 00000000..102178ab --- /dev/null +++ b/test/mjsunit/regress/regress-1207.js @@ -0,0 +1,35 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test throwing an exception from instanceof. +try { +var object = { }; +function fib(n) { + var f0 = (object instanceof encodeURI)('#2: var x = 1; x <= 1 === true'), f1 = 1; +} +fib(75); +} catch (o) { } diff --git a/test/mjsunit/regress/regress-1209.js b/test/mjsunit/regress/regress-1209.js new file mode 100644 index 00000000..c017fb51 --- /dev/null +++ b/test/mjsunit/regress/regress-1209.js @@ -0,0 +1,34 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +function crashMe(n) { + var nasty = []; + while (n--) + nasty.push("a" + 0); + return Function.apply(null, nasty); +} +crashMe(64 + 1).length; diff --git a/test/mjsunit/regress/regress-1210.js b/test/mjsunit/regress/regress-1210.js new file mode 100644 index 00000000..9c708a58 --- /dev/null +++ b/test/mjsunit/regress/regress-1210.js @@ -0,0 +1,48 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Deoptimization of the key expression in an arguments access should see +// the arguments object as the value of the receiver. + +var a = 0; + +function observe(x, y) { return x; } + +function side_effect(x) { a = x; } + +function test() { + // We will trigger deoptimization of 'a + 0' which should bail out to + // immediately after the call to 'side_effect' (i.e., still in the key + // subexpression of the arguments access). + return observe(a, arguments[side_effect(a), a + 0]); +} + +// Run enough to optimize assuming global 'a' is a smi. +for (var i = 0; i < 1000000; ++i) test(0); + +a = "hello"; +test(0); diff --git a/test/mjsunit/regress/regress-1213.js b/test/mjsunit/regress/regress-1213.js new file mode 100644 index 00000000..d66e3cef --- /dev/null +++ b/test/mjsunit/regress/regress-1213.js @@ -0,0 +1,43 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test that we do not allow overwriting a global property with a +// redeclaration that makes the property configurable (and hence +// deletable). + +var x = 0; + +function TestGlobal() { + for (var i = 0; i < 2; i++) { + x = x + 1; + } + this.eval('function x() {};'); + delete this['x']; +} + +TestGlobal(); +TestGlobal(); diff --git a/test/mjsunit/regress/regress-1218.js b/test/mjsunit/regress/regress-1218.js new file mode 100644 index 00000000..dd036edb --- /dev/null +++ b/test/mjsunit/regress/regress-1218.js @@ -0,0 +1,29 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Builtin functions should not have prototype objects. +assertFalse(Error.prototype.toString.hasOwnProperty("prototype")); diff --git a/test/mjsunit/regress/regress-crbug-72736.js b/test/mjsunit/regress/regress-crbug-72736.js new file mode 100644 index 00000000..4b4b1457 --- /dev/null +++ b/test/mjsunit/regress/regress-crbug-72736.js @@ -0,0 +1,37 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See http://crbug.com/72736 + +// This tests that Object.defineProperty actually allows to change the value of +// a non-writable property if configurable is true. + +var obj = {}; +Object.defineProperty(obj, 'foo', { value: 10, configurable: true }); +assertEquals(obj.foo, 10); +Object.defineProperty(obj, 'foo', { value: 20, configurable: true }); +assertEquals(obj.foo, 20); diff --git a/test/mjsunit/strict-mode.js b/test/mjsunit/strict-mode.js index 6b775fcb..69be19c2 100644 --- a/test/mjsunit/strict-mode.js +++ b/test/mjsunit/strict-mode.js @@ -169,13 +169,20 @@ CheckStrictMode("var x = { '1234' : 1, '2345' : 2, '1234' : 3 };", SyntaxError); CheckStrictMode("var x = { '1234' : 1, '2345' : 2, 1234 : 3 };", SyntaxError); CheckStrictMode("var x = { 3.14 : 1, 2.71 : 2, 3.14 : 3 };", SyntaxError); CheckStrictMode("var x = { 3.14 : 1, '3.14' : 2 };", SyntaxError); -CheckStrictMode("var x = { 123: 1, 123.00000000000000000000000000000000000000000000000000000000000000000001 : 2 }", SyntaxError); +CheckStrictMode("var x = { \ + 123: 1, \ + 123.00000000000000000000000000000000000000000000000000000000000000000001: 2 \ +}", SyntaxError); // Non-conflicting data properties. (function StrictModeNonDuplicate() { "use strict"; var x = { 123 : 1, "0123" : 2 }; - var x = { 123: 1, '123.00000000000000000000000000000000000000000000000000000000000000000001' : 2 } + var x = { + 123: 1, + '123.00000000000000000000000000000000000000000000000000000000000000000001': + 2 + }; })(); // Two getters (non-strict) @@ -214,23 +221,32 @@ assertThrows("var x = { '12': 1, get 12(){}};", SyntaxError); CheckStrictMode("function strict() { eval = undefined; }", SyntaxError); CheckStrictMode("function strict() { arguments = undefined; }", SyntaxError); CheckStrictMode("function strict() { print(eval = undefined); }", SyntaxError); -CheckStrictMode("function strict() { print(arguments = undefined); }", SyntaxError); +CheckStrictMode("function strict() { print(arguments = undefined); }", + SyntaxError); CheckStrictMode("function strict() { var x = eval = undefined; }", SyntaxError); -CheckStrictMode("function strict() { var x = arguments = undefined; }", SyntaxError); +CheckStrictMode("function strict() { var x = arguments = undefined; }", + SyntaxError); // Compound assignment to eval or arguments CheckStrictMode("function strict() { eval *= undefined; }", SyntaxError); CheckStrictMode("function strict() { arguments /= undefined; }", SyntaxError); CheckStrictMode("function strict() { print(eval %= undefined); }", SyntaxError); -CheckStrictMode("function strict() { print(arguments %= undefined); }", SyntaxError); -CheckStrictMode("function strict() { var x = eval += undefined; }", SyntaxError); -CheckStrictMode("function strict() { var x = arguments -= undefined; }", SyntaxError); +CheckStrictMode("function strict() { print(arguments %= undefined); }", + SyntaxError); +CheckStrictMode("function strict() { var x = eval += undefined; }", + SyntaxError); +CheckStrictMode("function strict() { var x = arguments -= undefined; }", + SyntaxError); CheckStrictMode("function strict() { eval <<= undefined; }", SyntaxError); CheckStrictMode("function strict() { arguments >>= undefined; }", SyntaxError); -CheckStrictMode("function strict() { print(eval >>>= undefined); }", SyntaxError); -CheckStrictMode("function strict() { print(arguments &= undefined); }", SyntaxError); -CheckStrictMode("function strict() { var x = eval ^= undefined; }", SyntaxError); -CheckStrictMode("function strict() { var x = arguments |= undefined; }", SyntaxError); +CheckStrictMode("function strict() { print(eval >>>= undefined); }", + SyntaxError); +CheckStrictMode("function strict() { print(arguments &= undefined); }", + SyntaxError); +CheckStrictMode("function strict() { var x = eval ^= undefined; }", + SyntaxError); +CheckStrictMode("function strict() { var x = arguments |= undefined; }", + SyntaxError); // Postfix increment with eval or arguments CheckStrictMode("function strict() { eval++; }", SyntaxError); @@ -264,6 +280,37 @@ CheckStrictMode("function strict() { print(--arguments); }", SyntaxError); CheckStrictMode("function strict() { var x = --eval; }", SyntaxError); CheckStrictMode("function strict() { var x = --arguments; }", SyntaxError); +// Use of const in strict mode is disallowed in anticipation of ES Harmony. +CheckStrictMode("const x = 0;", SyntaxError); +CheckStrictMode("for (const x = 0; false;) {}", SyntaxError); +CheckStrictMode("function strict() { const x = 0; }", SyntaxError); + +// Strict mode only allows functions in SourceElements +CheckStrictMode("if (true) { function invalid() {} }", SyntaxError); +CheckStrictMode("for (;false;) { function invalid() {} }", SyntaxError); +CheckStrictMode("{ function invalid() {} }", SyntaxError); +CheckStrictMode("try { function invalid() {} } catch(e) {}", SyntaxError); +CheckStrictMode("try { } catch(e) { function invalid() {} }", SyntaxError); +CheckStrictMode("function outer() {{ function invalid() {} }}", SyntaxError); + +// Delete of an unqualified identifier +CheckStrictMode("delete unqualified;", SyntaxError); +CheckStrictMode("function strict() { delete unqualified; }", SyntaxError); +CheckStrictMode("function function_name() { delete function_name; }", + SyntaxError); +CheckStrictMode("function strict(parameter) { delete parameter; }", + SyntaxError); +CheckStrictMode("function strict() { var variable; delete variable; }", + SyntaxError); +CheckStrictMode("var variable; delete variable;", SyntaxError); + +(function TestStrictDelete() { + "use strict"; + // "delete this" is allowed in strict mode and should work. + function strict_delete() { delete this; } + strict_delete(); +})(); + // Prefix unary operators other than delete, ++, -- are valid in strict mode (function StrictModeUnaryOperators() { "use strict"; @@ -318,17 +365,22 @@ function testFutureReservedWord(word) { // Function names and arguments when the body is strict assertThrows("function " + word + " () { 'use strict'; }", SyntaxError); assertThrows("function foo (" + word + ") 'use strict'; {}", SyntaxError); - assertThrows("function foo (" + word + ", " + word + ") { 'use strict'; }", SyntaxError); + assertThrows("function foo (" + word + ", " + word + ") { 'use strict'; }", + SyntaxError); assertThrows("function foo (a, " + word + ") { 'use strict'; }", SyntaxError); assertThrows("function foo (" + word + ", a) { 'use strict'; }", SyntaxError); - assertThrows("function foo (a, " + word + ", b) { 'use strict'; }", SyntaxError); - assertThrows("var foo = function (" + word + ") { 'use strict'; }", SyntaxError); + assertThrows("function foo (a, " + word + ", b) { 'use strict'; }", + SyntaxError); + assertThrows("var foo = function (" + word + ") { 'use strict'; }", + SyntaxError); // get/set when the body is strict eval("var x = { get " + word + " () { 'use strict'; } };"); eval("var x = { set " + word + " (value) { 'use strict'; } };"); - assertThrows("var x = { get foo(" + word + ") { 'use strict'; } };", SyntaxError); - assertThrows("var x = { set foo(" + word + ") { 'use strict'; } };", SyntaxError); + assertThrows("var x = { get foo(" + word + ") { 'use strict'; } };", + SyntaxError); + assertThrows("var x = { set foo(" + word + ") { 'use strict'; } };", + SyntaxError); } for (var i = 0; i < future_reserved_words.length; i++) { @@ -374,3 +426,534 @@ delete possibly_undefined_variable_for_strict_mode_test; repeat(10, function() { testAssignToUndefined(true); }); possibly_undefined_variable_for_strict_mode_test = undefined; repeat(10, function() { testAssignToUndefined(false); }); + +(function testDeleteNonConfigurable() { + function delete_property(o) { + "use strict"; + delete o.property; + } + function delete_element(o, i) { + "use strict"; + delete o[i]; + } + + var object = {}; + + Object.defineProperty(object, "property", { value: "property_value" }); + Object.defineProperty(object, "1", { value: "one" }); + Object.defineProperty(object, 7, { value: "seven" }); + Object.defineProperty(object, 3.14, { value: "pi" }); + + assertThrows(function() { delete_property(object); }, TypeError); + assertEquals(object.property, "property_value"); + assertThrows(function() { delete_element(object, "1"); }, TypeError); + assertThrows(function() { delete_element(object, 1); }, TypeError); + assertEquals(object[1], "one"); + assertThrows(function() { delete_element(object, "7"); }, TypeError); + assertThrows(function() { delete_element(object, 7); }, TypeError); + assertEquals(object[7], "seven"); + assertThrows(function() { delete_element(object, "3.14"); }, TypeError); + assertThrows(function() { delete_element(object, 3.14); }, TypeError); + assertEquals(object[3.14], "pi"); +})(); + +// Not transforming this in Function.call and Function.apply. +(function testThisTransformCallApply() { + function non_strict() { + return this; + } + function strict() { + "use strict"; + return this; + } + + var global_object = (function() { return this; })(); + var object = {}; + + // Non-strict call. + assertTrue(non_strict.call(null) === global_object); + assertTrue(non_strict.call(undefined) === global_object); + assertEquals(typeof non_strict.call(7), "object"); + assertEquals(typeof non_strict.call("Hello"), "object"); + assertTrue(non_strict.call(object) === object); + + // Non-strict apply. + assertTrue(non_strict.apply(null) === global_object); + assertTrue(non_strict.apply(undefined) === global_object); + assertEquals(typeof non_strict.apply(7), "object"); + assertEquals(typeof non_strict.apply("Hello"), "object"); + assertTrue(non_strict.apply(object) === object); + + // Strict call. + assertTrue(strict.call(null) === null); + assertTrue(strict.call(undefined) === undefined); + assertEquals(typeof strict.call(7), "number"); + assertEquals(typeof strict.call("Hello"), "string"); + assertTrue(strict.call(object) === object); + + // Strict apply. + assertTrue(strict.apply(null) === null); + assertTrue(strict.apply(undefined) === undefined); + assertEquals(typeof strict.apply(7), "number"); + assertEquals(typeof strict.apply("Hello"), "string"); + assertTrue(strict.apply(object) === object); +})(); + +(function testThisTransform() { + try { + function strict() { + "use strict"; + return typeof(this); + } + function nonstrict() { + return typeof(this); + } + + // Concat to avoid symbol. + var strict_name = "str" + "ict"; + var nonstrict_name = "non" + "str" + "ict"; + var strict_number = 17; + var nonstrict_number = 19; + var strict_name_get = "str" + "ict" + "get"; + var nonstrict_name_get = "non" + "str" + "ict" + "get" + var strict_number_get = 23; + var nonstrict_number_get = 29; + + function install(t) { + t.prototype.strict = strict; + t.prototype.nonstrict = nonstrict; + t.prototype[strict_number] = strict; + t.prototype[nonstrict_number] = nonstrict; + Object.defineProperty(t.prototype, strict_name_get, + { get: function() { return strict; }, + configurable: true }); + Object.defineProperty(t.prototype, nonstrict_name_get, + { get: function() { return nonstrict; }, + configurable: true }); + Object.defineProperty(t.prototype, strict_number_get, + { get: function() { return strict; }, + configurable: true }); + Object.defineProperty(t.prototype, nonstrict_number_get, + { get: function() { return nonstrict; }, + configurable: true }); + } + + function cleanup(t) { + delete t.prototype.strict; + delete t.prototype.nonstrict; + delete t.prototype[strict_number]; + delete t.prototype[nonstrict_number]; + delete t.prototype[strict_name_get]; + delete t.prototype[nonstrict_name_get]; + delete t.prototype[strict_number_get]; + delete t.prototype[nonstrict_number_get]; + } + + // Set up fakes + install(String); + install(Number); + install(Boolean) + + function callStrict(o) { + return o.strict(); + } + function callNonStrict(o) { + return o.nonstrict(); + } + function callKeyedStrict(o) { + return o[strict_name](); + } + function callKeyedNonStrict(o) { + return o[nonstrict_name](); + } + function callIndexedStrict(o) { + return o[strict_number](); + } + function callIndexedNonStrict(o) { + return o[nonstrict_number](); + } + function callStrictGet(o) { + return o.strictget(); + } + function callNonStrictGet(o) { + return o.nonstrictget(); + } + function callKeyedStrictGet(o) { + return o[strict_name_get](); + } + function callKeyedNonStrictGet(o) { + return o[nonstrict_name_get](); + } + function callIndexedStrictGet(o) { + return o[strict_number_get](); + } + function callIndexedNonStrictGet(o) { + return o[nonstrict_number_get](); + } + + for (var i = 0; i < 10; i ++) { + assertEquals(("hello").strict(), "string"); + assertEquals(("hello").nonstrict(), "object"); + assertEquals(("hello")[strict_name](), "string"); + assertEquals(("hello")[nonstrict_name](), "object"); + assertEquals(("hello")[strict_number](), "string"); + assertEquals(("hello")[nonstrict_number](), "object"); + + assertEquals((10 + i).strict(), "number"); + assertEquals((10 + i).nonstrict(), "object"); + assertEquals((10 + i)[strict_name](), "number"); + assertEquals((10 + i)[nonstrict_name](), "object"); + assertEquals((10 + i)[strict_number](), "number"); + assertEquals((10 + i)[nonstrict_number](), "object"); + + assertEquals((true).strict(), "boolean"); + assertEquals((true).nonstrict(), "object"); + assertEquals((true)[strict_name](), "boolean"); + assertEquals((true)[nonstrict_name](), "object"); + assertEquals((true)[strict_number](), "boolean"); + assertEquals((true)[nonstrict_number](), "object"); + + assertEquals((false).strict(), "boolean"); + assertEquals((false).nonstrict(), "object"); + assertEquals((false)[strict_name](), "boolean"); + assertEquals((false)[nonstrict_name](), "object"); + assertEquals((false)[strict_number](), "boolean"); + assertEquals((false)[nonstrict_number](), "object"); + + assertEquals(callStrict("howdy"), "string"); + assertEquals(callNonStrict("howdy"), "object"); + assertEquals(callKeyedStrict("howdy"), "string"); + assertEquals(callKeyedNonStrict("howdy"), "object"); + assertEquals(callIndexedStrict("howdy"), "string"); + assertEquals(callIndexedNonStrict("howdy"), "object"); + + assertEquals(callStrict(17 + i), "number"); + assertEquals(callNonStrict(17 + i), "object"); + assertEquals(callKeyedStrict(17 + i), "number"); + assertEquals(callKeyedNonStrict(17 + i), "object"); + assertEquals(callIndexedStrict(17 + i), "number"); + assertEquals(callIndexedNonStrict(17 + i), "object"); + + assertEquals(callStrict(true), "boolean"); + assertEquals(callNonStrict(true), "object"); + assertEquals(callKeyedStrict(true), "boolean"); + assertEquals(callKeyedNonStrict(true), "object"); + assertEquals(callIndexedStrict(true), "boolean"); + assertEquals(callIndexedNonStrict(true), "object"); + + assertEquals(callStrict(false), "boolean"); + assertEquals(callNonStrict(false), "object"); + assertEquals(callKeyedStrict(false), "boolean"); + assertEquals(callKeyedNonStrict(false), "object"); + assertEquals(callIndexedStrict(false), "boolean"); + assertEquals(callIndexedNonStrict(false), "object"); + + // All of the above, with getters + assertEquals(("hello").strictget(), "string"); + assertEquals(("hello").nonstrictget(), "object"); + assertEquals(("hello")[strict_name_get](), "string"); + assertEquals(("hello")[nonstrict_name_get](), "object"); + assertEquals(("hello")[strict_number_get](), "string"); + assertEquals(("hello")[nonstrict_number_get](), "object"); + + assertEquals((10 + i).strictget(), "number"); + assertEquals((10 + i).nonstrictget(), "object"); + assertEquals((10 + i)[strict_name_get](), "number"); + assertEquals((10 + i)[nonstrict_name_get](), "object"); + assertEquals((10 + i)[strict_number_get](), "number"); + assertEquals((10 + i)[nonstrict_number_get](), "object"); + + assertEquals((true).strictget(), "boolean"); + assertEquals((true).nonstrictget(), "object"); + assertEquals((true)[strict_name_get](), "boolean"); + assertEquals((true)[nonstrict_name_get](), "object"); + assertEquals((true)[strict_number_get](), "boolean"); + assertEquals((true)[nonstrict_number_get](), "object"); + + assertEquals((false).strictget(), "boolean"); + assertEquals((false).nonstrictget(), "object"); + assertEquals((false)[strict_name_get](), "boolean"); + assertEquals((false)[nonstrict_name_get](), "object"); + assertEquals((false)[strict_number_get](), "boolean"); + assertEquals((false)[nonstrict_number_get](), "object"); + + assertEquals(callStrictGet("howdy"), "string"); + assertEquals(callNonStrictGet("howdy"), "object"); + assertEquals(callKeyedStrictGet("howdy"), "string"); + assertEquals(callKeyedNonStrictGet("howdy"), "object"); + assertEquals(callIndexedStrictGet("howdy"), "string"); + assertEquals(callIndexedNonStrictGet("howdy"), "object"); + + assertEquals(callStrictGet(17 + i), "number"); + assertEquals(callNonStrictGet(17 + i), "object"); + assertEquals(callKeyedStrictGet(17 + i), "number"); + assertEquals(callKeyedNonStrictGet(17 + i), "object"); + assertEquals(callIndexedStrictGet(17 + i), "number"); + assertEquals(callIndexedNonStrictGet(17 + i), "object"); + + assertEquals(callStrictGet(true), "boolean"); + assertEquals(callNonStrictGet(true), "object"); + assertEquals(callKeyedStrictGet(true), "boolean"); + assertEquals(callKeyedNonStrictGet(true), "object"); + assertEquals(callIndexedStrictGet(true), "boolean"); + assertEquals(callIndexedNonStrictGet(true), "object"); + + assertEquals(callStrictGet(false), "boolean"); + assertEquals(callNonStrictGet(false), "object"); + assertEquals(callKeyedStrictGet(false), "boolean"); + assertEquals(callKeyedNonStrictGet(false), "object"); + assertEquals(callIndexedStrictGet(false), "boolean"); + assertEquals(callIndexedNonStrictGet(false), "object"); + + } + } finally { + // Cleanup + cleanup(String); + cleanup(Number); + cleanup(Boolean); + } +})(); + + +(function ObjectEnvironment() { + var o = {}; + Object.defineProperty(o, "foo", { value: "FOO", writable: false }); + assertThrows( + function () { + with (o) { + (function() { + "use strict"; + foo = "Hello"; + })(); + } + }, + TypeError); +})(); + + +(function TestSetPropertyWithoutSetter() { + var o = { get foo() { return "Yey"; } }; + assertThrows( + function broken() { + "use strict"; + o.foo = (0xBADBAD00 >> 1); + }, + TypeError); +})(); + + +(function TestSetPropertyNonConfigurable() { + var frozen = Object.freeze({}); + var sealed = Object.seal({}); + + function strict(o) { + "use strict"; + o.property = "value"; + } + + assertThrows(function() { strict(frozen); }, TypeError); + assertThrows(function() { strict(sealed); }, TypeError); +})(); + + +(function TestAssignmentToReadOnlyProperty() { + "use strict"; + + var o = {}; + Object.defineProperty(o, "property", { value: 7 }); + + assertThrows(function() { o.property = "new value"; }, TypeError); + assertThrows(function() { o.property += 10; }, TypeError); + assertThrows(function() { o.property -= 10; }, TypeError); + assertThrows(function() { o.property *= 10; }, TypeError); + assertThrows(function() { o.property /= 10; }, TypeError); + assertThrows(function() { o.property++; }, TypeError); + assertThrows(function() { o.property--; }, TypeError); + assertThrows(function() { ++o.property; }, TypeError); + assertThrows(function() { --o.property; }, TypeError); + + var name = "prop" + "erty"; // to avoid symbol path. + assertThrows(function() { o[name] = "new value"; }, TypeError); + assertThrows(function() { o[name] += 10; }, TypeError); + assertThrows(function() { o[name] -= 10; }, TypeError); + assertThrows(function() { o[name] *= 10; }, TypeError); + assertThrows(function() { o[name] /= 10; }, TypeError); + assertThrows(function() { o[name]++; }, TypeError); + assertThrows(function() { o[name]--; }, TypeError); + assertThrows(function() { ++o[name]; }, TypeError); + assertThrows(function() { --o[name]; }, TypeError); + + assertEquals(o.property, 7); +})(); + + +(function TestAssignmentToReadOnlyLoop() { + var name = "prop" + "erty"; // to avoid symbol path. + var o = {}; + Object.defineProperty(o, "property", { value: 7 }); + + function strict(o, name) { + "use strict"; + o[name] = "new value"; + } + + for (var i = 0; i < 10; i ++) { + try { + strict(o, name); + assertUnreachable(); + } catch(e) { + assertInstanceof(e, TypeError); + } + } +})(); + + +// Specialized KeyedStoreIC experiencing miss. +(function testKeyedStoreICStrict() { + var o = [9,8,7,6,5,4,3,2,1]; + + function test(o, i, v) { + "use strict"; + o[i] = v; + } + + for (var i = 0; i < 10; i ++) { + test(o, 5, 17); // start specialized for smi indices + assertEquals(o[5], 17); + test(o, "a", 19); + assertEquals(o["a"], 19); + test(o, "5", 29); + assertEquals(o[5], 29); + test(o, 100000, 31); + assertEquals(o[100000], 31); + } +})(); + + +(function TestSetElementWithoutSetter() { + "use strict"; + + var o = { }; + Object.defineProperty(o, 0, { get : function() { } }); + + var zero_smi = 0; + var zero_number = new Number(0); + var zero_symbol = "0"; + var zero_string = "-0-".substring(1,2); + + assertThrows(function() { o[zero_smi] = "new value"; }, TypeError); + assertThrows(function() { o[zero_number] = "new value"; }, TypeError); + assertThrows(function() { o[zero_symbol] = "new value"; }, TypeError); + assertThrows(function() { o[zero_string] = "new value"; }, TypeError); +})(); + + +(function TestSetElementNonConfigurable() { + "use strict"; + var frozen = Object.freeze({}); + var sealed = Object.seal({}); + + var zero_number = 0; + var zero_symbol = "0"; + var zero_string = "-0-".substring(1,2); + + assertThrows(function() { frozen[zero_number] = "value"; }, TypeError); + assertThrows(function() { sealed[zero_number] = "value"; }, TypeError); + assertThrows(function() { frozen[zero_symbol] = "value"; }, TypeError); + assertThrows(function() { sealed[zero_symbol] = "value"; }, TypeError); + assertThrows(function() { frozen[zero_string] = "value"; }, TypeError); + assertThrows(function() { sealed[zero_string] = "value"; }, TypeError); +})(); + + +(function TestAssignmentToReadOnlyElement() { + "use strict"; + + var o = {}; + Object.defineProperty(o, 7, { value: 17 }); + + var seven_smi = 7; + var seven_number = new Number(7); + var seven_symbol = "7"; + var seven_string = "-7-".substring(1,2); + + // Index with number. + assertThrows(function() { o[seven_smi] = "value"; }, TypeError); + assertThrows(function() { o[seven_smi] += 10; }, TypeError); + assertThrows(function() { o[seven_smi] -= 10; }, TypeError); + assertThrows(function() { o[seven_smi] *= 10; }, TypeError); + assertThrows(function() { o[seven_smi] /= 10; }, TypeError); + assertThrows(function() { o[seven_smi]++; }, TypeError); + assertThrows(function() { o[seven_smi]--; }, TypeError); + assertThrows(function() { ++o[seven_smi]; }, TypeError); + assertThrows(function() { --o[seven_smi]; }, TypeError); + + assertThrows(function() { o[seven_number] = "value"; }, TypeError); + assertThrows(function() { o[seven_number] += 10; }, TypeError); + assertThrows(function() { o[seven_number] -= 10; }, TypeError); + assertThrows(function() { o[seven_number] *= 10; }, TypeError); + assertThrows(function() { o[seven_number] /= 10; }, TypeError); + assertThrows(function() { o[seven_number]++; }, TypeError); + assertThrows(function() { o[seven_number]--; }, TypeError); + assertThrows(function() { ++o[seven_number]; }, TypeError); + assertThrows(function() { --o[seven_number]; }, TypeError); + + assertThrows(function() { o[seven_symbol] = "value"; }, TypeError); + assertThrows(function() { o[seven_symbol] += 10; }, TypeError); + assertThrows(function() { o[seven_symbol] -= 10; }, TypeError); + assertThrows(function() { o[seven_symbol] *= 10; }, TypeError); + assertThrows(function() { o[seven_symbol] /= 10; }, TypeError); + assertThrows(function() { o[seven_symbol]++; }, TypeError); + assertThrows(function() { o[seven_symbol]--; }, TypeError); + assertThrows(function() { ++o[seven_symbol]; }, TypeError); + assertThrows(function() { --o[seven_symbol]; }, TypeError); + + assertThrows(function() { o[seven_string] = "value"; }, TypeError); + assertThrows(function() { o[seven_string] += 10; }, TypeError); + assertThrows(function() { o[seven_string] -= 10; }, TypeError); + assertThrows(function() { o[seven_string] *= 10; }, TypeError); + assertThrows(function() { o[seven_string] /= 10; }, TypeError); + assertThrows(function() { o[seven_string]++; }, TypeError); + assertThrows(function() { o[seven_string]--; }, TypeError); + assertThrows(function() { ++o[seven_string]; }, TypeError); + assertThrows(function() { --o[seven_string]; }, TypeError); + + assertEquals(o[seven_number], 17); + assertEquals(o[seven_symbol], 17); + assertEquals(o[seven_string], 17); +})(); + + +(function TestAssignmentToReadOnlyLoop() { + "use strict"; + + var o = {}; + Object.defineProperty(o, 7, { value: 17 }); + + var seven_smi = 7; + var seven_number = new Number(7); + var seven_symbol = "7"; + var seven_string = "-7-".substring(1,2); + + for (var i = 0; i < 10; i ++) { + assertThrows(function() { o[seven_smi] = "value" }, TypeError); + assertThrows(function() { o[seven_number] = "value" }, TypeError); + assertThrows(function() { o[seven_symbol] = "value" }, TypeError); + assertThrows(function() { o[seven_string] = "value" }, TypeError); + } + + assertEquals(o[7], 17); +})(); + + +(function TestAssignmentToStringLength() { + "use strict"; + + var str_val = "string"; + var str_obj = new String(str_val); + var str_cat = str_val + str_val + str_obj; + + assertThrows(function() { str_val.length = 1; }, TypeError); + assertThrows(function() { str_obj.length = 1; }, TypeError); + assertThrows(function() { str_cat.length = 1; }, TypeError); +})(); diff --git a/test/mjsunit/tools/tickprocessor-test-func-info.log b/test/mjsunit/tools/tickprocessor-test-func-info.log index 29a12f6f..755fbb2a 100644 --- a/test/mjsunit/tools/tickprocessor-test-func-info.log +++ b/test/mjsunit/tools/tickprocessor-test-func-info.log @@ -3,11 +3,9 @@ shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000 shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000 profiler,"begin",1 code-creation,Stub,0x424260,348,"CompareStub_GE" -code-creation,LazyCompile,0x2a8100,18535,"DrawQube 3d-cube.js:188" -function-creation,0x2d11b8,0x2a8100 -code-creation,LazyCompile,0x480100,3908,"DrawLine 3d-cube.js:17" -function-creation,0x2d0f7c,0x480100 -tick,0x424284,0xbfffeea0,0x2d0f7c,0,0x2aaaa5 -tick,0x42429f,0xbfffed88,0x2d0f7c,0,0x2aacb4 +code-creation,LazyCompile,0x2a8100,18535,"DrawQube 3d-cube.js:188",0xf43abcac, +code-creation,LazyCompile,0x480100,3908,"DrawLine 3d-cube.js:17",0xf43abc50, +tick,0x424284,0xbfffeea0,0x480600,0,0x2aaaa5 +tick,0x42429f,0xbfffed88,0x480600,0,0x2aacb4 tick,0x48063d,0xbfffec7c,0x2d0f7c,0,0x2aaec6 profiler,"end" diff --git a/test/sputnik/README b/test/sputnik/README index 94c689bd..50d721f3 100644 --- a/test/sputnik/README +++ b/test/sputnik/README @@ -1,6 +1,6 @@ To run the sputniktests you must check out the test suite from googlecode.com. The test expectations are currently relative to -version 28. To get the tests run the following command within +version 94. To get the tests run the following command within v8/test/sputnik/ - svn co http://sputniktests.googlecode.com/svn/trunk/ -r28 sputniktests + svn co http://sputniktests.googlecode.com/svn/trunk/ -r94 sputniktests diff --git a/test/sputnik/sputnik.status b/test/sputnik/sputnik.status index 966500d0..6da87eac 100644 --- a/test/sputnik/sputnik.status +++ b/test/sputnik/sputnik.status @@ -102,33 +102,20 @@ S7.8.4_A4.3_T5: FAIL_OK S7.8.4_A7.2_T5: FAIL_OK # We allow some keywords to be used as identifiers -S7.5.3_A1.17: FAIL_OK S7.5.3_A1.26: FAIL_OK S7.5.3_A1.18: FAIL_OK S7.5.3_A1.27: FAIL_OK -S7.5.3_A1.28: FAIL_OK -S7.5.3_A1.19: FAIL_OK -S7.5.3_A1.29: FAIL_OK -S7.5.3_A1.1: FAIL_OK -S7.5.3_A1.2: FAIL_OK -S7.5.3_A1.3: FAIL_OK -S7.5.3_A1.4: FAIL_OK S7.5.3_A1.5: FAIL_OK -S7.5.3_A1.8: FAIL_OK S7.5.3_A1.9: FAIL_OK S7.5.3_A1.10: FAIL_OK S7.5.3_A1.11: FAIL_OK +# native +S7.5.3_A1.20: FAIL_OK S7.5.3_A1.21: FAIL_OK -S7.5.3_A1.12: FAIL_OK -S7.5.3_A1.30: FAIL_OK -S7.5.3_A1.31: FAIL_OK -S7.5.3_A1.13: FAIL_OK S7.5.3_A1.22: FAIL_OK S7.5.3_A1.23: FAIL_OK -S7.5.3_A1.14: FAIL_OK S7.5.3_A1.15: FAIL_OK S7.5.3_A1.24: FAIL_OK -S7.5.3_A1.25: FAIL_OK S7.5.3_A1.16: FAIL_OK # This checks for non-262 behavior @@ -199,10 +186,40 @@ S9.9_A2: FAIL_OK S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug +# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1196 +S8.7_A5_T2: FAIL + +# V8 bugs: http://code.google.com/p/v8/issues/detail?id=1198 +# V8 should not wrap this when calling builtin functions +S15.2.4.3_A12: FAIL +S15.2.4.7_A13: FAIL +# Object.prototype.toString +S15.2.4.2_A12: FAIL +S15.2.4.2_A13: FAIL +# Object.prototype.toLocaleString +S15.2.4.3_A13: FAIL +S15.2.4.4_A13: FAIL +S15.2.4.4_A12: FAIL +# Object.prototype.propertyIsEnumerable +S15.2.4.7_A12: FAIL +# Object.prototype.hasOwnProperty +S15.2.4.5_A12: FAIL +S15.2.4.5_A13: FAIL +# Object.prototype.isPrototypeOf +S15.2.4.6_A13: FAIL +S15.2.4.6_A12: FAIL + +# Invalid test case (recent change adding var changes semantics) +S8.3_A1_T1: FAIL +# Test bug: http://code.google.com/p/sputniktests/issues/detail?id=35 +S15.5.4.8_A1_T1: FAIL +# Invalid test case (recent change adding var changes semantics) +S15.3_A3_T1: FAIL +# Invalid test case (recent change adding var changes semantics) +S15.3_A3_T3: FAIL # These tests fail because we had to add bugs to be compatible with JSC. See # http://code.google.com/p/chromium/issues/detail?id=1717 -S15.4.4_A1.1_T2: FAIL_OK S15.5.4.1_A1_T2: FAIL_OK S15.5.4_A1: FAIL_OK S15.5.4_A3: FAIL_OK diff --git a/test/sputnik/testcfg.py b/test/sputnik/testcfg.py index f7a5edcc..31e4b226 100644 --- a/test/sputnik/testcfg.py +++ b/test/sputnik/testcfg.py @@ -88,7 +88,8 @@ class SputnikTestConfiguration(test.TestConfiguration): sys.path.append(modroot) import sputnik globals()['sputnik'] = sputnik - test_suite = sputnik.TestSuite(testroot) + # Do not run strict mode tests yet. TODO(mmaly) + test_suite = sputnik.TestSuite(testroot, False) test_suite.Validate() tests = test_suite.EnumerateTests([]) result = [] diff --git a/tools/disasm.py b/tools/disasm.py new file mode 100644 index 00000000..c326382d --- /dev/null +++ b/tools/disasm.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python +# +# Copyright 2011 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import re +import subprocess +import tempfile + + +# Avoid using the slow (google-specific) wrapper around objdump. +OBJDUMP_BIN = "/usr/bin/objdump" +if not os.path.exists(OBJDUMP_BIN): + OBJDUMP_BIN = "objdump" + + +_COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C"] + +_DISASM_HEADER_RE = re.compile(r"[a-f0-9]+\s+<.*:$") +_DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):\s*(\S.*)") + +# Keys must match constants in Logger::LogCodeInfo. +_ARCH_MAP = { + "ia32": "-m i386", + "x64": "-m i386 -M x86-64", + "arm": "-m arm" # Not supported by our objdump build. +} + + +def GetDisasmLines(filename, offset, size, arch, inplace): + tmp_name = None + if not inplace: + # Create a temporary file containing a copy of the code. + assert arch in _ARCH_MAP, "Unsupported architecture '%s'" % arch + arch_flags = _ARCH_MAP[arch] + tmp_name = tempfile.mktemp(".v8code") + command = "dd if=%s of=%s bs=1 count=%d skip=%d && " \ + "%s %s -D -b binary %s %s" % ( + filename, tmp_name, size, offset, + OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), arch_flags, + tmp_name) + else: + command = "%s %s --start-address=%d --stop-address=%d -d %s " % ( + OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), + offset, + offset + size, + filename) + process = subprocess.Popen(command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = process.communicate() + lines = out.split("\n") + header_line = 0 + for i, line in enumerate(lines): + if _DISASM_HEADER_RE.match(line): + header_line = i + break + if tmp_name: + os.unlink(tmp_name) + split_lines = [] + for line in lines[header_line + 1:]: + match = _DISASM_LINE_RE.match(line) + if match: + line_address = int(match.group(1), 16) + split_lines.append((line_address, match.group(2))) + return split_lines diff --git a/tools/grokdump.py b/tools/grokdump.py new file mode 100755 index 00000000..de681b2b --- /dev/null +++ b/tools/grokdump.py @@ -0,0 +1,840 @@ +#!/usr/bin/env python +# +# Copyright 2011 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import ctypes +import mmap +import optparse +import os +import disasm +import sys +import types +import codecs +import re + + +USAGE="""usage: %prog [OPTION]... + +Minidump analyzer. + +Shows the processor state at the point of exception including the +stack of the active thread and the referenced objects in the V8 +heap. Code objects are disassembled and the addresses linked from the +stack (pushed return addresses) are marked with "=>". + + +Examples: + $ %prog 12345678-1234-1234-1234-123456789abcd-full.dmp +""" + +DEBUG=False + + +def DebugPrint(s): + if not DEBUG: return + print s + + +class Descriptor(object): + """Descriptor of a structure in a memory.""" + + def __init__(self, fields): + self.fields = fields + self.is_flexible = False + for _, type_or_func in fields: + if isinstance(type_or_func, types.FunctionType): + self.is_flexible = True + break + if not self.is_flexible: + self.ctype = Descriptor._GetCtype(fields) + self.size = ctypes.sizeof(self.ctype) + + def Read(self, memory, offset): + if self.is_flexible: + fields_copy = self.fields[:] + last = 0 + for name, type_or_func in fields_copy: + if isinstance(type_or_func, types.FunctionType): + partial_ctype = Descriptor._GetCtype(fields_copy[:last]) + partial_object = partial_ctype.from_buffer(memory, offset) + type = type_or_func(partial_object) + if type is not None: + fields_copy[last] = (name, type) + last += 1 + else: + last += 1 + complete_ctype = Descriptor._GetCtype(fields_copy[:last]) + else: + complete_ctype = self.ctype + return complete_ctype.from_buffer(memory, offset) + + @staticmethod + def _GetCtype(fields): + class Raw(ctypes.Structure): + _fields_ = fields + _pack_ = 1 + + def __str__(self): + return "{" + ", ".join("%s: %s" % (field, self.__getattribute__(field)) + for field, _ in Raw._fields_) + "}" + return Raw + + +# Set of structures and constants that describe the layout of minidump +# files. Based on MSDN and Google Breakpad. + +MINIDUMP_HEADER = Descriptor([ + ("signature", ctypes.c_uint32), + ("version", ctypes.c_uint32), + ("stream_count", ctypes.c_uint32), + ("stream_directories_rva", ctypes.c_uint32), + ("checksum", ctypes.c_uint32), + ("time_date_stampt", ctypes.c_uint32), + ("flags", ctypes.c_uint64) +]) + +MINIDUMP_LOCATION_DESCRIPTOR = Descriptor([ + ("data_size", ctypes.c_uint32), + ("rva", ctypes.c_uint32) +]) + +MINIDUMP_DIRECTORY = Descriptor([ + ("stream_type", ctypes.c_uint32), + ("location", MINIDUMP_LOCATION_DESCRIPTOR.ctype) +]) + +MD_EXCEPTION_MAXIMUM_PARAMETERS = 15 + +MINIDUMP_EXCEPTION = Descriptor([ + ("code", ctypes.c_uint32), + ("flags", ctypes.c_uint32), + ("record", ctypes.c_uint64), + ("address", ctypes.c_uint64), + ("parameter_count", ctypes.c_uint32), + ("unused_alignment", ctypes.c_uint32), + ("information", ctypes.c_uint64 * MD_EXCEPTION_MAXIMUM_PARAMETERS) +]) + +MINIDUMP_EXCEPTION_STREAM = Descriptor([ + ("thread_id", ctypes.c_uint32), + ("unused_alignment", ctypes.c_uint32), + ("exception", MINIDUMP_EXCEPTION.ctype), + ("thread_context", MINIDUMP_LOCATION_DESCRIPTOR.ctype) +]) + +# Stream types. +MD_UNUSED_STREAM = 0 +MD_RESERVED_STREAM_0 = 1 +MD_RESERVED_STREAM_1 = 2 +MD_THREAD_LIST_STREAM = 3 +MD_MODULE_LIST_STREAM = 4 +MD_MEMORY_LIST_STREAM = 5 +MD_EXCEPTION_STREAM = 6 +MD_SYSTEM_INFO_STREAM = 7 +MD_THREAD_EX_LIST_STREAM = 8 +MD_MEMORY_64_LIST_STREAM = 9 +MD_COMMENT_STREAM_A = 10 +MD_COMMENT_STREAM_W = 11 +MD_HANDLE_DATA_STREAM = 12 +MD_FUNCTION_TABLE_STREAM = 13 +MD_UNLOADED_MODULE_LIST_STREAM = 14 +MD_MISC_INFO_STREAM = 15 +MD_MEMORY_INFO_LIST_STREAM = 16 +MD_THREAD_INFO_LIST_STREAM = 17 +MD_HANDLE_OPERATION_LIST_STREAM = 18 + +MD_FLOATINGSAVEAREA_X86_REGISTERAREA_SIZE = 80 + +MINIDUMP_FLOATING_SAVE_AREA_X86 = Descriptor([ + ("control_word", ctypes.c_uint32), + ("status_word", ctypes.c_uint32), + ("tag_word", ctypes.c_uint32), + ("error_offset", ctypes.c_uint32), + ("error_selector", ctypes.c_uint32), + ("data_offset", ctypes.c_uint32), + ("data_selector", ctypes.c_uint32), + ("register_area", ctypes.c_uint8 * MD_FLOATINGSAVEAREA_X86_REGISTERAREA_SIZE), + ("cr0_npx_state", ctypes.c_uint32) +]) + +MD_CONTEXT_X86_EXTENDED_REGISTERS_SIZE = 512 + +# Context flags. +MD_CONTEXT_X86 = 0x00010000 +MD_CONTEXT_X86_CONTROL = (MD_CONTEXT_X86 | 0x00000001) +MD_CONTEXT_X86_INTEGER = (MD_CONTEXT_X86 | 0x00000002) +MD_CONTEXT_X86_SEGMENTS = (MD_CONTEXT_X86 | 0x00000004) +MD_CONTEXT_X86_FLOATING_POINT = (MD_CONTEXT_X86 | 0x00000008) +MD_CONTEXT_X86_DEBUG_REGISTERS = (MD_CONTEXT_X86 | 0x00000010) +MD_CONTEXT_X86_EXTENDED_REGISTERS = (MD_CONTEXT_X86 | 0x00000020) + +def EnableOnFlag(type, flag): + return lambda o: [None, type][int((o.context_flags & flag) != 0)] + +MINIDUMP_CONTEXT_X86 = Descriptor([ + ("context_flags", ctypes.c_uint32), + # MD_CONTEXT_X86_DEBUG_REGISTERS. + ("dr0", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + ("dr1", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + ("dr2", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + ("dr3", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + ("dr6", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + ("dr7", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + # MD_CONTEXT_X86_FLOATING_POINT. + ("float_save", EnableOnFlag(MINIDUMP_FLOATING_SAVE_AREA_X86.ctype, + MD_CONTEXT_X86_FLOATING_POINT)), + # MD_CONTEXT_X86_SEGMENTS. + ("gs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)), + ("fs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)), + ("es", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)), + ("ds", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)), + # MD_CONTEXT_X86_INTEGER. + ("edi", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + ("esi", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + ("ebx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + ("edx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + ("ecx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + ("eax", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + # MD_CONTEXT_X86_CONTROL. + ("ebp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + ("eip", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + ("cs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + ("eflags", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + ("esp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + ("ss", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + # MD_CONTEXT_X86_EXTENDED_REGISTERS. + ("extended_registers", + EnableOnFlag(ctypes.c_uint8 * MD_CONTEXT_X86_EXTENDED_REGISTERS_SIZE, + MD_CONTEXT_X86_EXTENDED_REGISTERS)) +]) + +MINIDUMP_MEMORY_DESCRIPTOR = Descriptor([ + ("start", ctypes.c_uint64), + ("memory", MINIDUMP_LOCATION_DESCRIPTOR.ctype) +]) + +MINIDUMP_MEMORY_DESCRIPTOR64 = Descriptor([ + ("start", ctypes.c_uint64), + ("size", ctypes.c_uint64) +]) + +MINIDUMP_MEMORY_LIST = Descriptor([ + ("range_count", ctypes.c_uint32), + ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count) +]) + +MINIDUMP_MEMORY_LIST64 = Descriptor([ + ("range_count", ctypes.c_uint64), + ("base_rva", ctypes.c_uint64), + ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR64.ctype * m.range_count) +]) + +MINIDUMP_THREAD = Descriptor([ + ("id", ctypes.c_uint32), + ("suspend_count", ctypes.c_uint32), + ("priority_class", ctypes.c_uint32), + ("priority", ctypes.c_uint32), + ("ted", ctypes.c_uint64), + ("stack", MINIDUMP_MEMORY_DESCRIPTOR.ctype), + ("context", MINIDUMP_LOCATION_DESCRIPTOR.ctype) +]) + +MINIDUMP_THREAD_LIST = Descriptor([ + ("thread_count", ctypes.c_uint32), + ("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count) +]) + + +class MinidumpReader(object): + """Minidump (.dmp) reader.""" + + _HEADER_MAGIC = 0x504d444d + + def __init__(self, options, minidump_name): + self.minidump_name = minidump_name + self.minidump_file = open(minidump_name, "r") + self.minidump = mmap.mmap(self.minidump_file.fileno(), 0, mmap.MAP_PRIVATE) + self.header = MINIDUMP_HEADER.Read(self.minidump, 0) + if self.header.signature != MinidumpReader._HEADER_MAGIC: + print >>sys.stderr, "Warning: unsupported minidump header magic" + DebugPrint(self.header) + directories = [] + offset = self.header.stream_directories_rva + for _ in xrange(self.header.stream_count): + directories.append(MINIDUMP_DIRECTORY.Read(self.minidump, offset)) + offset += MINIDUMP_DIRECTORY.size + self.exception = None + self.exception_context = None + self.memory_list = None + self.thread_map = {} + for d in directories: + DebugPrint(d) + # TODO(vitalyr): extract system info including CPU features. + if d.stream_type == MD_EXCEPTION_STREAM: + self.exception = MINIDUMP_EXCEPTION_STREAM.Read( + self.minidump, d.location.rva) + DebugPrint(self.exception) + self.exception_context = MINIDUMP_CONTEXT_X86.Read( + self.minidump, self.exception.thread_context.rva) + DebugPrint(self.exception_context) + elif d.stream_type == MD_THREAD_LIST_STREAM: + thread_list = MINIDUMP_THREAD_LIST.Read(self.minidump, d.location.rva) + assert ctypes.sizeof(thread_list) == d.location.data_size + DebugPrint(thread_list) + for thread in thread_list.threads: + DebugPrint(thread) + self.thread_map[thread.id] = thread + elif d.stream_type == MD_MEMORY_LIST_STREAM: + print >>sys.stderr, "Warning: not a full minidump" + ml = MINIDUMP_MEMORY_LIST.Read(self.minidump, d.location.rva) + DebugPrint(ml) + for m in ml.ranges: + DebugPrint(m) + elif d.stream_type == MD_MEMORY_64_LIST_STREAM: + assert self.memory_list is None + self.memory_list = MINIDUMP_MEMORY_LIST64.Read( + self.minidump, d.location.rva) + assert ctypes.sizeof(self.memory_list) == d.location.data_size + DebugPrint(self.memory_list) + + def IsValidAddress(self, address): + return self.FindLocation(address) is not None + + def ReadU8(self, address): + location = self.FindLocation(address) + return ctypes.c_uint8.from_buffer(self.minidump, location).value + + def ReadU32(self, address): + location = self.FindLocation(address) + return ctypes.c_uint32.from_buffer(self.minidump, location).value + + def ReadBytes(self, address, size): + location = self.FindLocation(address) + return self.minidump[location:location + size] + + def FindLocation(self, address): + # TODO(vitalyr): only works for full minidumps (...64 structure variants). + offset = 0 + for r in self.memory_list.ranges: + if r.start <= address < r.start + r.size: + return self.memory_list.base_rva + offset + address - r.start + offset += r.size + return None + + def GetDisasmLines(self, address, size): + location = self.FindLocation(address) + if location is None: return [] + return disasm.GetDisasmLines(self.minidump_name, + location, + size, + "ia32", + False) + + + def Dispose(self): + self.minidump.close() + self.minidump_file.close() + + +# List of V8 instance types. Obtained by adding the code below to any .cc file. +# +# #define DUMP_TYPE(T) printf("%d: \"%s\",\n", T, #T); +# struct P { +# P() { +# printf("{\n"); +# INSTANCE_TYPE_LIST(DUMP_TYPE) +# printf("}\n"); +# } +# }; +# static P p; +INSTANCE_TYPES = { + 64: "SYMBOL_TYPE", + 68: "ASCII_SYMBOL_TYPE", + 65: "CONS_SYMBOL_TYPE", + 69: "CONS_ASCII_SYMBOL_TYPE", + 66: "EXTERNAL_SYMBOL_TYPE", + 74: "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE", + 70: "EXTERNAL_ASCII_SYMBOL_TYPE", + 0: "STRING_TYPE", + 4: "ASCII_STRING_TYPE", + 1: "CONS_STRING_TYPE", + 5: "CONS_ASCII_STRING_TYPE", + 2: "EXTERNAL_STRING_TYPE", + 10: "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE", + 6: "EXTERNAL_ASCII_STRING_TYPE", + 6: "PRIVATE_EXTERNAL_ASCII_STRING_TYPE", + 128: "MAP_TYPE", + 129: "CODE_TYPE", + 130: "ODDBALL_TYPE", + 131: "JS_GLOBAL_PROPERTY_CELL_TYPE", + 132: "HEAP_NUMBER_TYPE", + 133: "PROXY_TYPE", + 134: "BYTE_ARRAY_TYPE", + 135: "PIXEL_ARRAY_TYPE", + 136: "EXTERNAL_BYTE_ARRAY_TYPE", + 137: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE", + 138: "EXTERNAL_SHORT_ARRAY_TYPE", + 139: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE", + 140: "EXTERNAL_INT_ARRAY_TYPE", + 141: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE", + 142: "EXTERNAL_FLOAT_ARRAY_TYPE", + 143: "FILLER_TYPE", + 144: "ACCESSOR_INFO_TYPE", + 145: "ACCESS_CHECK_INFO_TYPE", + 146: "INTERCEPTOR_INFO_TYPE", + 147: "CALL_HANDLER_INFO_TYPE", + 148: "FUNCTION_TEMPLATE_INFO_TYPE", + 149: "OBJECT_TEMPLATE_INFO_TYPE", + 150: "SIGNATURE_INFO_TYPE", + 151: "TYPE_SWITCH_INFO_TYPE", + 152: "SCRIPT_TYPE", + 153: "CODE_CACHE_TYPE", + 156: "FIXED_ARRAY_TYPE", + 157: "SHARED_FUNCTION_INFO_TYPE", + 158: "JS_MESSAGE_OBJECT_TYPE", + 159: "JS_VALUE_TYPE", + 160: "JS_OBJECT_TYPE", + 161: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", + 162: "JS_GLOBAL_OBJECT_TYPE", + 163: "JS_BUILTINS_OBJECT_TYPE", + 164: "JS_GLOBAL_PROXY_TYPE", + 165: "JS_ARRAY_TYPE", + 166: "JS_REGEXP_TYPE", + 167: "JS_FUNCTION_TYPE", + 154: "DEBUG_INFO_TYPE", + 155: "BREAK_POINT_INFO_TYPE", +} + + +class Printer(object): + """Printer with indentation support.""" + + def __init__(self): + self.indent = 0 + + def Indent(self): + self.indent += 2 + + def Dedent(self): + self.indent -= 2 + + def Print(self, string): + print "%s%s" % (self._IndentString(), string) + + def PrintLines(self, lines): + indent = self._IndentString() + print "\n".join("%s%s" % (indent, line) for line in lines) + + def _IndentString(self): + return self.indent * " " + + +ADDRESS_RE = re.compile(r"0x[0-9a-fA-F]+") + + +def FormatDisasmLine(start, heap, line): + line_address = start + line[0] + stack_slot = heap.stack_map.get(line_address) + marker = " " + if stack_slot: + marker = "=>" + code = AnnotateAddresses(heap, line[1]) + return "%s%08x %08x: %s" % (marker, line_address, line[0], code) + + +def AnnotateAddresses(heap, line): + extra = [] + for m in ADDRESS_RE.finditer(line): + maybe_address = int(m.group(0), 16) + object = heap.FindObject(maybe_address) + if not object: continue + extra.append(str(object)) + if len(extra) == 0: return line + return "%s ;; %s" % (line, ", ".join(extra)) + + +class HeapObject(object): + def __init__(self, heap, map, address): + self.heap = heap + self.map = map + self.address = address + + def Is(self, cls): + return isinstance(self, cls) + + def Print(self, p): + p.Print(str(self)) + + def __str__(self): + return "HeapObject(%08x, %s)" % (self.address, + INSTANCE_TYPES[self.map.instance_type]) + + def ObjectField(self, offset): + field_value = self.heap.reader.ReadU32(self.address + offset) + return self.heap.FindObjectOrSmi(field_value) + + def SmiField(self, offset): + field_value = self.heap.reader.ReadU32(self.address + offset) + assert (field_value & 1) == 0 + return field_value / 2 + + +class Map(HeapObject): + INSTANCE_TYPE_OFFSET = 8 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.instance_type = \ + heap.reader.ReadU8(self.address + Map.INSTANCE_TYPE_OFFSET) + + +class String(HeapObject): + LENGTH_OFFSET = 4 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.length = self.SmiField(String.LENGTH_OFFSET) + + def GetChars(self): + return "?string?" + + def Print(self, p): + p.Print(str(self)) + + def __str__(self): + return "\"%s\"" % self.GetChars() + + +class SeqString(String): + CHARS_OFFSET = 12 + + def __init__(self, heap, map, address): + String.__init__(self, heap, map, address) + self.chars = heap.reader.ReadBytes(self.address + SeqString.CHARS_OFFSET, + self.length) + + def GetChars(self): + return self.chars + + +class ExternalString(String): + RESOURCE_OFFSET = 12 + + WEBKIT_RESOUCE_STRING_IMPL_OFFSET = 4 + WEBKIT_STRING_IMPL_CHARS_OFFSET = 8 + + def __init__(self, heap, map, address): + String.__init__(self, heap, map, address) + reader = heap.reader + self.resource = \ + reader.ReadU32(self.address + ExternalString.RESOURCE_OFFSET) + self.chars = "?external string?" + if not reader.IsValidAddress(self.resource): return + string_impl_address = self.resource + \ + ExternalString.WEBKIT_RESOUCE_STRING_IMPL_OFFSET + if not reader.IsValidAddress(string_impl_address): return + string_impl = reader.ReadU32(string_impl_address) + chars_ptr_address = string_impl + \ + ExternalString.WEBKIT_STRING_IMPL_CHARS_OFFSET + if not reader.IsValidAddress(chars_ptr_address): return + chars_ptr = reader.ReadU32(chars_ptr_address) + if not reader.IsValidAddress(chars_ptr): return + raw_chars = reader.ReadBytes(chars_ptr, 2 * self.length) + self.chars = codecs.getdecoder("utf16")(raw_chars)[0] + + def GetChars(self): + return self.chars + + +class ConsString(String): + LEFT_OFFSET = 12 + RIGHT_OFFSET = 16 + + def __init__(self, heap, map, address): + String.__init__(self, heap, map, address) + self.left = self.ObjectField(ConsString.LEFT_OFFSET) + self.right = self.ObjectField(ConsString.RIGHT_OFFSET) + + def GetChars(self): + return self.left.GetChars() + self.right.GetChars() + + +class Oddball(HeapObject): + TO_STRING_OFFSET = 4 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.to_string = self.ObjectField(Oddball.TO_STRING_OFFSET) + + def Print(self, p): + p.Print(str(self)) + + def __str__(self): + return "<%s>" % self.to_string.GetChars() + + +class FixedArray(HeapObject): + LENGTH_OFFSET = 4 + ELEMENTS_OFFSET = 8 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.length = self.SmiField(FixedArray.LENGTH_OFFSET) + + def Print(self, p): + p.Print("FixedArray(%08x) {" % self.address) + p.Indent() + p.Print("length: %d" % self.length) + for i in xrange(self.length): + offset = FixedArray.ELEMENTS_OFFSET + 4 * i + p.Print("[%08d] = %s" % (i, self.ObjectField(offset))) + p.Dedent() + p.Print("}") + + def __str__(self): + return "FixedArray(%08x, length=%d)" % (self.address, self.length) + + +class JSFunction(HeapObject): + CODE_ENTRY_OFFSET = 12 + SHARED_OFFSET = 20 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + code_entry = \ + heap.reader.ReadU32(self.address + JSFunction.CODE_ENTRY_OFFSET) + self.code = heap.FindObject(code_entry - Code.ENTRY_OFFSET + 1) + self.shared = self.ObjectField(JSFunction.SHARED_OFFSET) + + def Print(self, p): + source = "\n".join(" %s" % line for line in self._GetSource().split("\n")) + p.Print("JSFunction(%08x) {" % self.address) + p.Indent() + p.Print("inferred name: %s" % self.shared.inferred_name) + if self.shared.script.Is(Script) and self.shared.script.name.Is(String): + p.Print("script name: %s" % self.shared.script.name) + p.Print("source:") + p.PrintLines(self._GetSource().split("\n")) + p.Print("code:") + self.code.Print(p) + if self.code != self.shared.code: + p.Print("unoptimized code:") + self.shared.code.Print(p) + p.Dedent() + p.Print("}") + + def __str__(self): + inferred_name = "" + if self.shared.Is(SharedFunctionInfo): + inferred_name = self.shared.inferred_name + return "JSFunction(%08x, %s)" % (self.address, inferred_name) + + def _GetSource(self): + source = "?source?" + start = self.shared.start_position + end = self.shared.end_position + if not self.shared.script.Is(Script): return source + script_source = self.shared.script.source + if not script_source.Is(String): return source + return script_source.GetChars()[start:end] + + +class SharedFunctionInfo(HeapObject): + CODE_OFFSET = 2 * 4 + SCRIPT_OFFSET = 7 * 4 + INFERRED_NAME_OFFSET = 9 * 4 + START_POSITION_AND_TYPE_OFFSET = 17 * 4 + END_POSITION_OFFSET = 18 * 4 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.code = self.ObjectField(SharedFunctionInfo.CODE_OFFSET) + self.script = self.ObjectField(SharedFunctionInfo.SCRIPT_OFFSET) + self.inferred_name = \ + self.ObjectField(SharedFunctionInfo.INFERRED_NAME_OFFSET) + start_position_and_type = \ + self.SmiField(SharedFunctionInfo.START_POSITION_AND_TYPE_OFFSET) + self.start_position = start_position_and_type >> 2 + self.end_position = self.SmiField(SharedFunctionInfo.END_POSITION_OFFSET) + + +class Script(HeapObject): + SOURCE_OFFSET = 4 + NAME_OFFSET = 8 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.source = self.ObjectField(Script.SOURCE_OFFSET) + self.name = self.ObjectField(Script.NAME_OFFSET) + + +class Code(HeapObject): + INSTRUCTION_SIZE_OFFSET = 4 + ENTRY_OFFSET = 32 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.entry = self.address + Code.ENTRY_OFFSET + self.instruction_size = \ + heap.reader.ReadU32(self.address + Code.INSTRUCTION_SIZE_OFFSET) + + def Print(self, p): + lines = self.heap.reader.GetDisasmLines(self.entry, self.instruction_size) + p.Print("Code(%08x) {" % self.address) + p.Indent() + p.Print("instruction_size: %d" % self.instruction_size) + p.PrintLines(self._FormatLine(line) for line in lines) + p.Dedent() + p.Print("}") + + def _FormatLine(self, line): + return FormatDisasmLine(self.entry, self.heap, line) + + +class V8Heap(object): + CLASS_MAP = { + "SYMBOL_TYPE": SeqString, + "ASCII_SYMBOL_TYPE": SeqString, + "CONS_SYMBOL_TYPE": ConsString, + "CONS_ASCII_SYMBOL_TYPE": ConsString, + "EXTERNAL_SYMBOL_TYPE": ExternalString, + "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE": ExternalString, + "EXTERNAL_ASCII_SYMBOL_TYPE": ExternalString, + "STRING_TYPE": SeqString, + "ASCII_STRING_TYPE": SeqString, + "CONS_STRING_TYPE": ConsString, + "CONS_ASCII_STRING_TYPE": ConsString, + "EXTERNAL_STRING_TYPE": ExternalString, + "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE": ExternalString, + "EXTERNAL_ASCII_STRING_TYPE": ExternalString, + + "MAP_TYPE": Map, + "ODDBALL_TYPE": Oddball, + "FIXED_ARRAY_TYPE": FixedArray, + "JS_FUNCTION_TYPE": JSFunction, + "SHARED_FUNCTION_INFO_TYPE": SharedFunctionInfo, + "SCRIPT_TYPE": Script, + "CODE_TYPE": Code + } + + def __init__(self, reader, stack_map): + self.reader = reader + self.stack_map = stack_map + self.objects = {} + + def FindObjectOrSmi(self, tagged_address): + if (tagged_address & 1) == 0: return tagged_address / 2 + return self.FindObject(tagged_address) + + def FindObject(self, tagged_address): + if tagged_address in self.objects: + return self.objects[tagged_address] + if (tagged_address & 1) != 1: return None + address = tagged_address - 1 + if not self.reader.IsValidAddress(address): return None + map_tagged_address = self.reader.ReadU32(address) + if tagged_address == map_tagged_address: + # Meta map? + meta_map = Map(self, None, address) + instance_type_name = INSTANCE_TYPES.get(meta_map.instance_type) + if instance_type_name != "MAP_TYPE": return None + meta_map.map = meta_map + object = meta_map + else: + map = self.FindObject(map_tagged_address) + if map is None: return None + instance_type_name = INSTANCE_TYPES.get(map.instance_type) + if instance_type_name is None: return None + cls = V8Heap.CLASS_MAP.get(instance_type_name, HeapObject) + object = cls(self, map, address) + self.objects[tagged_address] = object + return object + + +EIP_PROXIMITY = 64 + + +def AnalyzeMinidump(options, minidump_name): + reader = MinidumpReader(options, minidump_name) + DebugPrint("========================================") + if reader.exception is None: + print "Minidump has no exception info" + return + print "Exception info:" + exception_thread = reader.thread_map[reader.exception.thread_id] + print " thread id: %d" % exception_thread.id + print " code: %08X" % reader.exception.exception.code + print " context:" + print " eax: %08x" % reader.exception_context.eax + print " ebx: %08x" % reader.exception_context.ebx + print " ecx: %08x" % reader.exception_context.ecx + print " edx: %08x" % reader.exception_context.edx + print " edi: %08x" % reader.exception_context.edi + print " esi: %08x" % reader.exception_context.esi + print " ebp: %08x" % reader.exception_context.ebp + print " esp: %08x" % reader.exception_context.esp + print " eip: %08x" % reader.exception_context.eip + # TODO(vitalyr): decode eflags. + print " eflags: %s" % bin(reader.exception_context.eflags)[2:] + print + + stack_bottom = exception_thread.stack.start + \ + exception_thread.stack.memory.data_size + stack_map = {reader.exception_context.eip: -1} + for slot in xrange(reader.exception_context.esp, stack_bottom, 4): + maybe_address = reader.ReadU32(slot) + if not maybe_address in stack_map: + stack_map[maybe_address] = slot + heap = V8Heap(reader, stack_map) + + print "Disassembly around exception.eip:" + start = reader.exception_context.eip - EIP_PROXIMITY + lines = reader.GetDisasmLines(start, 2 * EIP_PROXIMITY) + for line in lines: + print FormatDisasmLine(start, heap, line) + print + + print "Annotated stack (from exception.esp to bottom):" + for slot in xrange(reader.exception_context.esp, stack_bottom, 4): + maybe_address = reader.ReadU32(slot) + heap_object = heap.FindObject(maybe_address) + print "%08x: %08x" % (slot, maybe_address) + if heap_object: + heap_object.Print(Printer()) + print + + reader.Dispose() + + +if __name__ == "__main__": + parser = optparse.OptionParser(USAGE) + options, args = parser.parse_args() + if len(args) != 1: + parser.print_help() + sys.exit(1) + AnalyzeMinidump(options, args[0]) diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp index 15185671..6dab52d8 100644 --- a/tools/gyp/v8.gyp +++ b/tools/gyp/v8.gyp @@ -598,6 +598,8 @@ '../../src/arm/lithium-arm.h', '../../src/arm/lithium-codegen-arm.cc', '../../src/arm/lithium-codegen-arm.h', + '../../src/arm/lithium-gap-resolver-arm.cc', + '../../src/arm/lithium-gap-resolver-arm.h', '../../src/arm/macro-assembler-arm.cc', '../../src/arm/macro-assembler-arm.h', '../../src/arm/regexp-macro-assembler-arm.cc', diff --git a/tools/linux-tick-processor.py b/tools/linux-tick-processor.py deleted file mode 100755 index 67c3b955..00000000 --- a/tools/linux-tick-processor.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2008 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Usage: process-ticks.py <logfile> -# Where <logfile> is the log file name (eg, v8.log). - -import subprocess, re, sys, tickprocessor - -class LinuxTickProcessor(tickprocessor.TickProcessor): - - def ParseVMSymbols(self, filename, start, end): - """Extract symbols and add them to the cpp entries.""" - # Extra both dynamic and non-dynamic symbols. - command = 'nm -C -n "%s"; nm -C -n -D "%s"' % (filename, filename) - process = subprocess.Popen(command, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - pipe = process.stdout - try: - for line in pipe: - row = re.match('^([0-9a-fA-F]{8}) . (.*)$', line) - if row: - addr = int(row.group(1), 16) - if addr < start and addr < end - start: - addr += start - self.cpp_entries.Insert(addr, tickprocessor.CodeEntry(addr, row.group(2))) - finally: - pipe.close() - - -class LinuxCmdLineProcessor(tickprocessor.CmdLineProcessor): - - def GetRequiredArgsNames(self): - return 'log_file' - - def ProcessRequiredArgs(self, args): - if len(args) != 1: - self.PrintUsageAndExit() - else: - self.log_file = args[0] - - -def Main(): - cmdline_processor = LinuxCmdLineProcessor() - cmdline_processor.ProcessArguments() - tick_processor = LinuxTickProcessor() - cmdline_processor.RunLogfileProcessing(tick_processor) - tick_processor.PrintResults() - - -if __name__ == '__main__': - Main() diff --git a/tools/ll_prof.py b/tools/ll_prof.py index 8390d4af..7f12c133 100755 --- a/tools/ll_prof.py +++ b/tools/ll_prof.py @@ -30,13 +30,13 @@ import bisect import collections import ctypes +import disasm import mmap import optparse import os import re import subprocess import sys -import tempfile import time @@ -74,27 +74,12 @@ V8_GC_FAKE_MMAP = "/tmp/__v8_gc__" JS_ORIGIN = "js" JS_SNAPSHOT_ORIGIN = "js-snapshot" -# Avoid using the slow (google-specific) wrapper around objdump. -OBJDUMP_BIN = "/usr/bin/objdump" -if not os.path.exists(OBJDUMP_BIN): - OBJDUMP_BIN = "objdump" +OBJDUMP_BIN = disasm.OBJDUMP_BIN class Code(object): """Code object.""" - _COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C"] - - _DISASM_HEADER_RE = re.compile(r"[a-f0-9]+\s+<.*:$") - _DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):.*") - - # Keys must match constants in Logger::LogCodeInfo. - _ARCH_MAP = { - "ia32": "-m i386", - "x64": "-m i386 -M x86-64", - "arm": "-m arm" # Not supported by our objdump build. - } - _id = 0 def __init__(self, name, start_address, end_address, origin, origin_offset): @@ -150,12 +135,7 @@ class Code(object): ticks_offsets = [t[0] for t in ticks_map] ticks_counts = [t[1] for t in ticks_map] # Get a list of disassembled lines and their addresses. - lines = [] - for line in self._GetDisasmLines(code_info, options): - match = Code._DISASM_LINE_RE.match(line) - if match: - line_address = int(match.group(1), 16) - lines.append((line_address, line)) + lines = self._GetDisasmLines(code_info, options) if len(lines) == 0: return # Print annotated lines. @@ -179,9 +159,9 @@ class Code(object): total_count += count count = 100.0 * count / self.self_ticks if count >= 0.01: - print "%15.2f %s" % (count, lines[i][1]) + print "%15.2f %x: %s" % (count, lines[i][0], lines[i][1]) else: - print "%s %s" % (" " * 15, lines[i][1]) + print "%s %x: %s" % (" " * 15, lines[i][0], lines[i][1]) print assert total_count == self.self_ticks, \ "Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self) @@ -195,39 +175,17 @@ class Code(object): self.origin) def _GetDisasmLines(self, code_info, options): - tmp_name = None if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN: - assert code_info.arch in Code._ARCH_MAP, \ - "Unsupported architecture '%s'" % arch - arch_flags = Code._ARCH_MAP[code_info.arch] - # Create a temporary file just with this code object. - tmp_name = tempfile.mktemp(".v8code") - size = self.end_address - self.start_address - command = "dd if=%s.code of=%s bs=1 count=%d skip=%d && " \ - "%s %s -D -b binary %s %s" % ( - options.log, tmp_name, size, self.origin_offset, - OBJDUMP_BIN, ' '.join(Code._COMMON_DISASM_OPTIONS), arch_flags, - tmp_name) + inplace = False + filename = options.log + ".code" else: - command = "%s %s --start-address=%d --stop-address=%d -d %s " % ( - OBJDUMP_BIN, ' '.join(Code._COMMON_DISASM_OPTIONS), - self.origin_offset, - self.origin_offset + self.end_address - self.start_address, - self.origin) - process = subprocess.Popen(command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = process.communicate() - lines = out.split("\n") - header_line = 0 - for i, line in enumerate(lines): - if Code._DISASM_HEADER_RE.match(line): - header_line = i - break - if tmp_name: - os.unlink(tmp_name) - return lines[header_line + 1:] + inplace = True + filename = self.origin + return disasm.GetDisasmLines(filename, + self.origin_offset, + self.end_address - self.start_address, + code_info.arch, + inplace) class CodePage(object): @@ -353,7 +311,7 @@ class CodeLogReader(object): r"code-info,([^,]+),(\d+)") _CODE_CREATE_RE = re.compile( - r"code-creation,([^,]+),(0x[a-f0-9]+),(\d+),\"(.*)\"(?:,(\d+))?") + r"code-creation,([^,]+),(0x[a-f0-9]+),(\d+),\"(.*)\"(?:,(0x[a-f0-9]+),([~*])?)?(?:,(\d+))?") _CODE_MOVE_RE = re.compile( r"code-move,(0x[a-f0-9]+),(0x[a-f0-9]+)") @@ -400,12 +358,18 @@ class CodeLogReader(object): name = self.address_to_snapshot_name[start_address] origin = JS_SNAPSHOT_ORIGIN else: - name = "%s:%s" % (match.group(1), match.group(4)) + tag = match.group(1) + optimization_status = match.group(6) + func_name = match.group(4) + if optimization_status: + name = "%s:%s%s" % (tag, optimization_status, func_name) + else: + name = "%s:%s" % (tag, func_name) origin = JS_ORIGIN if self.is_snapshot: origin_offset = 0 else: - origin_offset = int(match.group(5)) + origin_offset = int(match.group(7)) code = Code(name, start_address, end_address, origin, origin_offset) conficting_code = self.code_map.Find(start_address) if conficting_code: diff --git a/tools/profile.js b/tools/profile.js index 03bee839..c9c9437e 100644 --- a/tools/profile.js +++ b/tools/profile.js @@ -38,11 +38,6 @@ function Profile() { this.bottomUpTree_ = new CallTree(); }; -/** - * Version of profiler log. - */ -Profile.VERSION = 2; - /** * Returns whether a function with the specified name must be skipped. @@ -69,6 +64,18 @@ Profile.Operation = { /** + * Enum for code state regarding its dynamic optimization. + * + * @enum {number} + */ +Profile.CodeState = { + COMPILED: 0, + OPTIMIZABLE: 1, + OPTIMIZED: 2 +}; + + +/** * Called whenever the specified operation has failed finding a function * containing the specified address. Should be overriden by subclasses. * See the Profile.Operation enum for the list of @@ -134,17 +141,30 @@ Profile.prototype.addCode = function( /** - * Creates an alias entry for a code entry. + * Registers dynamic (JIT-compiled) code entry. * - * @param {number} aliasAddr Alias address. - * @param {number} addr Code entry address. - */ -Profile.prototype.addCodeAlias = function( - aliasAddr, addr) { - var entry = this.codeMap_.findDynamicEntryByStartAddress(addr); - if (entry) { - this.codeMap_.addCode(aliasAddr, entry); + * @param {string} type Code entry type. + * @param {string} name Code entry name. + * @param {number} start Starting address. + * @param {number} size Code entry size. + * @param {number} funcAddr Shared function object address. + * @param {Profile.CodeState} state Optimization state. + */ +Profile.prototype.addFuncCode = function( + type, name, start, size, funcAddr, state) { + // As code and functions are in the same address space, + // it is safe to put them in a single code map. + var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr); + if (!func) { + func = new Profile.FunctionEntry(name); + this.codeMap_.addCode(funcAddr, func); + } else if (func.name !== name) { + // Function object has been overwritten with a new one. + func.name = name; } + var entry = new Profile.DynamicFuncCodeEntry(size, type, func, state); + this.codeMap_.addCode(start, entry); + return entry; }; @@ -183,7 +203,7 @@ Profile.prototype.deleteCode = function(start) { * @param {number} from Current code entry address. * @param {number} to New code entry address. */ -Profile.prototype.safeMoveDynamicCode = function(from, to) { +Profile.prototype.moveFunc = function(from, to) { if (this.codeMap_.findDynamicEntryByStartAddress(from)) { this.codeMap_.moveCode(from, to); } @@ -191,18 +211,6 @@ Profile.prototype.safeMoveDynamicCode = function(from, to) { /** - * Reports about deletion of a dynamic code entry. - * - * @param {number} start Starting address. - */ -Profile.prototype.safeDeleteDynamicCode = function(start) { - if (this.codeMap_.findDynamicEntryByStartAddress(start)) { - this.codeMap_.deleteCode(start); - } -}; - - -/** * Retrieves a code entry by an address. * * @param {number} addr Entry address. @@ -383,14 +391,7 @@ Profile.DynamicCodeEntry = function(size, type, name) { * Returns node name. */ Profile.DynamicCodeEntry.prototype.getName = function() { - var name = this.name; - if (name.length == 0) { - name = '<anonymous>'; - } else if (name.charAt(0) == ' ') { - // An anonymous function with location: " aaa.js:10". - name = '<anonymous>' + name; - } - return this.type + ': ' + name; + return this.type + ': ' + this.name; }; @@ -403,9 +404,73 @@ Profile.DynamicCodeEntry.prototype.getRawName = function() { Profile.DynamicCodeEntry.prototype.isJSFunction = function() { - return this.type == "Function" || - this.type == "LazyCompile" || - this.type == "Script"; + return false; +}; + + +/** + * Creates a dynamic code entry. + * + * @param {number} size Code size. + * @param {string} type Code type. + * @param {Profile.FunctionEntry} func Shared function entry. + * @param {Profile.CodeState} state Code optimization state. + * @constructor + */ +Profile.DynamicFuncCodeEntry = function(size, type, func, state) { + CodeMap.CodeEntry.call(this, size); + this.type = type; + this.func = func; + this.state = state; +}; + +Profile.DynamicFuncCodeEntry.STATE_PREFIX = ["", "~", "*"]; + +/** + * Returns node name. + */ +Profile.DynamicFuncCodeEntry.prototype.getName = function() { + var name = this.func.getName(); + return this.type + ': ' + Profile.DynamicFuncCodeEntry.STATE_PREFIX[this.state] + name; +}; + + +/** + * Returns raw node name (without type decoration). + */ +Profile.DynamicFuncCodeEntry.prototype.getRawName = function() { + return this.func.getName(); +}; + + +Profile.DynamicFuncCodeEntry.prototype.isJSFunction = function() { + return true; +}; + + +/** + * Creates a shared function object entry. + * + * @param {string} name Function name. + * @constructor + */ +Profile.FunctionEntry = function(name) { + CodeMap.CodeEntry.call(this, 0, name); +}; + + +/** + * Returns node name. + */ +Profile.FunctionEntry.prototype.getName = function() { + var name = this.name; + if (name.length == 0) { + name = '<anonymous>'; + } else if (name.charAt(0) == ' ') { + // An anonymous function with location: " aaa.js:10". + name = '<anonymous>' + name; + } + return name; }; diff --git a/tools/splaytree.py b/tools/splaytree.py deleted file mode 100644 index 8c3c4fe1..00000000 --- a/tools/splaytree.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2008 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -class Node(object): - """Nodes in the splay tree.""" - - def __init__(self, key, value): - self.key = key - self.value = value - self.left = None - self.right = None - - -class KeyNotFoundError(Exception): - """KeyNotFoundError is raised when removing a non-existing node.""" - - def __init__(self, key): - self.key = key - - -class SplayTree(object): - """The splay tree itself is just a reference to the root of the tree.""" - - def __init__(self): - """Create a new SplayTree.""" - self.root = None - - def IsEmpty(self): - """Is the SplayTree empty?""" - return not self.root - - def Insert(self, key, value): - """Insert a new node in the SplayTree.""" - # If the tree is empty, insert the new node. - if self.IsEmpty(): - self.root = Node(key, value) - return - # Splay on the key to move the last node on the search path for - # the key to the root of the tree. - self.Splay(key) - # Ignore repeated insertions with the same key. - if self.root.key == key: - return - # Insert the new node. - node = Node(key, value) - if key > self.root.key: - node.left = self.root - node.right = self.root.right - self.root.right = None - else: - node.right = self.root - node.left = self.root.left - self.root.left = None - self.root = node - - def Remove(self, key): - """Remove the node with the given key from the SplayTree.""" - # Raise exception for key that is not found if the tree is empty. - if self.IsEmpty(): - raise KeyNotFoundError(key) - # Splay on the key to move the node with the given key to the top. - self.Splay(key) - # Raise exception for key that is not found. - if self.root.key != key: - raise KeyNotFoundError(key) - removed = self.root - # Link out the root node. - if not self.root.left: - # No left child, so the new tree is just the right child. - self.root = self.root.right - else: - # Left child exists. - right = self.root.right - # Make the original left child the new root. - self.root = self.root.left - # Splay to make sure that the new root has an empty right child. - self.Splay(key) - # Insert the original right child as the right child of the new - # root. - self.root.right = right - return removed - - def Find(self, key): - """Returns the node with the given key or None if no such node exists.""" - if self.IsEmpty(): - return None - self.Splay(key) - if self.root.key == key: - return self.root - return None - - def FindMax(self): - """Returns the node with the largest key value.""" - if self.IsEmpty(): - return None - current = self.root - while current.right != None: - current = current.right - return current - - # Returns the node with the smallest key value. - def FindMin(self): - if self.IsEmpty(): - return None - current = self.root - while current.left != None: - current = current.left - return current - - def FindGreatestsLessThan(self, key): - """Returns node with greatest key less than or equal to the given key.""" - if self.IsEmpty(): - return None - # Splay on the key to move the node with the given key or the last - # node on the search path to the top of the tree. - self.Splay(key) - # Now the result is either the root node or the greatest node in - # the left subtree. - if self.root.key <= key: - return self.root - else: - tmp = self.root - self.root = self.root.left - result = self.FindMax() - self.root = tmp - return result - - def ExportValueList(self): - """Returns a list containing all the values of the nodes in the tree.""" - result = [] - nodes_to_visit = [self.root] - while len(nodes_to_visit) > 0: - node = nodes_to_visit.pop() - if not node: - continue - result.append(node.value) - nodes_to_visit.append(node.left) - nodes_to_visit.append(node.right) - return result - - def Splay(self, key): - """Perform splay operation. - - Perform the splay operation for the given key. Moves the node with - the given key to the top of the tree. If no node has the given - key, the last node on the search path is moved to the top of the - tree. - - This uses the simplified top-down splaying algorithm from: - - "Self-adjusting Binary Search Trees" by Sleator and Tarjan - - """ - if self.IsEmpty(): - return - # Create a dummy node. The use of the dummy node is a bit - # counter-intuitive: The right child of the dummy node will hold - # the L tree of the algorithm. The left child of the dummy node - # will hold the R tree of the algorithm. Using a dummy node, left - # and right will always be nodes and we avoid special cases. - dummy = left = right = Node(None, None) - current = self.root - while True: - if key < current.key: - if not current.left: - break - if key < current.left.key: - # Rotate right. - tmp = current.left - current.left = tmp.right - tmp.right = current - current = tmp - if not current.left: - break - # Link right. - right.left = current - right = current - current = current.left - elif key > current.key: - if not current.right: - break - if key > current.right.key: - # Rotate left. - tmp = current.right - current.right = tmp.left - tmp.left = current - current = tmp - if not current.right: - break - # Link left. - left.right = current - left = current - current = current.right - else: - break - # Assemble. - left.right = current.left - right.left = current.right - current.left = dummy.right - current.right = dummy.left - self.root = current diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js index db2f3c9b..f105a21c 100644 --- a/tools/tickprocessor.js +++ b/tools/tickprocessor.js @@ -57,10 +57,23 @@ function readFile(fileName) { } +/** + * Parser for dynamic code optimization state. + */ +function parseState(s) { + switch (s) { + case "": return Profile.CodeState.COMPILED; + case "~": return Profile.CodeState.OPTIMIZABLE; + case "*": return Profile.CodeState.OPTIMIZED; + } + throw new Error("unknown code state: " + s); +} + + function SnapshotLogProcessor() { LogReader.call(this, { 'code-creation': { - parsers: [null, parseInt, parseInt, null], + parsers: [null, parseInt, parseInt, null, 'var-args'], processor: this.processCodeCreation }, 'code-move': { parsers: [parseInt, parseInt], processor: this.processCodeMove }, @@ -69,6 +82,7 @@ function SnapshotLogProcessor() { 'function-creation': null, 'function-move': null, 'function-delete': null, + 'sfi-move': null, 'snapshot-pos': { parsers: [parseInt, parseInt], processor: this.processSnapshotPosition }}); @@ -93,8 +107,14 @@ inherits(SnapshotLogProcessor, LogReader); SnapshotLogProcessor.prototype.processCodeCreation = function( - type, start, size, name) { - var entry = this.profile_.addCode(type, name, start, size); + type, start, size, name, maybe_func) { + if (maybe_func.length) { + var funcAddr = parseInt(maybe_func[0]); + var state = parseState(maybe_func[1]); + this.profile_.addFuncCode(type, name, start, size, funcAddr, state); + } else { + this.profile_.addCode(type, name, start, size); + } }; @@ -131,18 +151,14 @@ function TickProcessor( 'shared-library': { parsers: [null, parseInt, parseInt], processor: this.processSharedLibrary }, 'code-creation': { - parsers: [null, parseInt, parseInt, null], + parsers: [null, parseInt, parseInt, null, 'var-args'], processor: this.processCodeCreation }, 'code-move': { parsers: [parseInt, parseInt], processor: this.processCodeMove }, 'code-delete': { parsers: [parseInt], processor: this.processCodeDelete }, - 'function-creation': { parsers: [parseInt, parseInt], - processor: this.processFunctionCreation }, - 'function-move': { parsers: [parseInt, parseInt], + 'sfi-move': { parsers: [parseInt, parseInt], processor: this.processFunctionMove }, - 'function-delete': { parsers: [parseInt], - processor: this.processFunctionDelete }, 'snapshot-pos': { parsers: [parseInt, parseInt], processor: this.processSnapshotPosition }, 'tick': { parsers: [parseInt, parseInt, parseInt, parseInt, 'var-args'], @@ -155,6 +171,9 @@ function TickProcessor( processor: this.processJSProducer }, // Ignored events. 'profiler': null, + 'function-creation': null, + 'function-move': null, + 'function-delete': null, 'heap-sample-stats': null, 'heap-sample-item': null, 'heap-js-cons-item': null, @@ -285,9 +304,15 @@ TickProcessor.prototype.processSharedLibrary = function( TickProcessor.prototype.processCodeCreation = function( - type, start, size, name) { + type, start, size, name, maybe_func) { name = this.deserializedEntriesNames_[start] || name; - var entry = this.profile_.addCode(type, name, start, size); + if (maybe_func.length) { + var funcAddr = parseInt(maybe_func[0]); + var state = parseState(maybe_func[1]); + this.profile_.addFuncCode(type, name, start, size, funcAddr, state); + } else { + this.profile_.addCode(type, name, start, size); + } }; @@ -301,19 +326,8 @@ TickProcessor.prototype.processCodeDelete = function(start) { }; -TickProcessor.prototype.processFunctionCreation = function( - functionAddr, codeAddr) { - this.profile_.addCodeAlias(functionAddr, codeAddr); -}; - - TickProcessor.prototype.processFunctionMove = function(from, to) { - this.profile_.safeMoveDynamicCode(from, to); -}; - - -TickProcessor.prototype.processFunctionDelete = function(start) { - this.profile_.safeDeleteDynamicCode(start); + this.profile_.moveFunc(from, to); }; @@ -330,7 +344,7 @@ TickProcessor.prototype.includeTick = function(vmState) { }; -TickProcessor.prototype.processTick = function(pc, sp, func, vmState, stack) { +TickProcessor.prototype.processTick = function(pc, sp, tos, vmState, stack) { this.ticks_.total++; if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++; if (!this.includeTick(vmState)) { @@ -338,19 +352,14 @@ TickProcessor.prototype.processTick = function(pc, sp, func, vmState, stack) { return; } - if (func) { - var funcEntry = this.profile_.findEntry(func); + if (tos) { + var funcEntry = this.profile_.findEntry(tos); if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) { - func = 0; - } else { - var currEntry = this.profile_.findEntry(pc); - if (!currEntry || !currEntry.isJSFunction || currEntry.isJSFunction()) { - func = 0; - } + tos = 0; } } - this.profile_.recordTick(this.processStack(pc, func, stack)); + this.profile_.recordTick(this.processStack(pc, tos, stack)); }; diff --git a/tools/tickprocessor.py b/tools/tickprocessor.py deleted file mode 100644 index c932e3fc..00000000 --- a/tools/tickprocessor.py +++ /dev/null @@ -1,571 +0,0 @@ -# Copyright 2008 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import csv, splaytree, sys, re -from operator import itemgetter -import getopt, os, string - -class CodeEntry(object): - - def __init__(self, start_addr, name): - self.start_addr = start_addr - self.tick_count = 0 - self.name = name - self.stacks = {} - - def Tick(self, pc, stack): - self.tick_count += 1 - if len(stack) > 0: - stack.insert(0, self.ToString()) - stack_key = tuple(stack) - self.stacks[stack_key] = self.stacks.setdefault(stack_key, 0) + 1 - - def RegionTicks(self): - return None - - def SetStartAddress(self, start_addr): - self.start_addr = start_addr - - def ToString(self): - return self.name - - def IsSharedLibraryEntry(self): - return False - - def IsICEntry(self): - return False - - def IsJSFunction(self): - return False - -class SharedLibraryEntry(CodeEntry): - - def __init__(self, start_addr, name): - CodeEntry.__init__(self, start_addr, name) - - def IsSharedLibraryEntry(self): - return True - - -class JSCodeEntry(CodeEntry): - - def __init__(self, start_addr, name, type, size, assembler): - CodeEntry.__init__(self, start_addr, name) - self.type = type - self.size = size - self.assembler = assembler - self.region_ticks = None - self.builtin_ic_re = re.compile('^(Keyed)?(Call|Load|Store)IC_') - - def Tick(self, pc, stack): - super(JSCodeEntry, self).Tick(pc, stack) - if not pc is None: - offset = pc - self.start_addr - seen = [] - narrowest = None - narrowest_width = None - for region in self.Regions(): - if region.Contains(offset): - if (not region.name in seen): - seen.append(region.name) - if narrowest is None or region.Width() < narrowest.Width(): - narrowest = region - if len(seen) == 0: - return - if self.region_ticks is None: - self.region_ticks = {} - for name in seen: - if not name in self.region_ticks: - self.region_ticks[name] = [0, 0] - self.region_ticks[name][0] += 1 - if name == narrowest.name: - self.region_ticks[name][1] += 1 - - def RegionTicks(self): - return self.region_ticks - - def Regions(self): - if self.assembler: - return self.assembler.regions - else: - return [] - - def ToString(self): - name = self.name - if name == '': - name = '<anonymous>' - elif name.startswith(' '): - name = '<anonymous>' + name - return self.type + ': ' + name - - def IsICEntry(self): - return self.type in ('CallIC', 'LoadIC', 'StoreIC') or \ - (self.type == 'Builtin' and self.builtin_ic_re.match(self.name)) - - def IsJSFunction(self): - return self.type in ('Function', 'LazyCompile', 'Script') - -class CodeRegion(object): - - def __init__(self, start_offset, name): - self.start_offset = start_offset - self.name = name - self.end_offset = None - - def Contains(self, pc): - return (self.start_offset <= pc) and (pc <= self.end_offset) - - def Width(self): - return self.end_offset - self.start_offset - - -class Assembler(object): - - def __init__(self): - # Mapping from region ids to open regions - self.pending_regions = {} - self.regions = [] - - -class FunctionEnumerator(object): - - def __init__(self): - self.known_funcs = {} - self.next_func_id = 0 - - def GetFunctionId(self, name): - if not self.known_funcs.has_key(name): - self.known_funcs[name] = self.next_func_id - self.next_func_id += 1 - return self.known_funcs[name] - - def GetKnownFunctions(self): - known_funcs_items = self.known_funcs.items(); - known_funcs_items.sort(key = itemgetter(1)) - result = [] - for func, id_not_used in known_funcs_items: - result.append(func) - return result - - -VMStates = { 'JS': 0, 'GC': 1, 'COMPILER': 2, 'OTHER': 3, 'EXTERNAL' : 4 } - - -class TickProcessor(object): - - def __init__(self): - self.log_file = '' - self.deleted_code = [] - self.vm_extent = {} - # Map from assembler ids to the pending assembler objects - self.pending_assemblers = {} - # Map from code addresses the have been allocated but not yet officially - # created to their assemblers. - self.assemblers = {} - self.js_entries = splaytree.SplayTree() - self.cpp_entries = splaytree.SplayTree() - self.total_number_of_ticks = 0 - self.number_of_library_ticks = 0 - self.unaccounted_number_of_ticks = 0 - self.excluded_number_of_ticks = 0 - self.number_of_gc_ticks = 0 - # Flag indicating whether to ignore unaccounted ticks in the report - self.ignore_unknown = False - self.func_enum = FunctionEnumerator() - self.packed_stacks = [] - - def ProcessLogfile(self, filename, included_state = None, ignore_unknown = False, separate_ic = False, call_graph_json = False): - self.log_file = filename - self.included_state = included_state - self.ignore_unknown = ignore_unknown - self.separate_ic = separate_ic - self.call_graph_json = call_graph_json - - try: - logfile = open(filename, 'rb') - except IOError: - sys.exit("Could not open logfile: " + filename) - try: - try: - logreader = csv.reader(logfile) - row_num = 1 - for row in logreader: - row_num += 1 - if row[0] == 'tick': - self.ProcessTick(int(row[1], 16), int(row[2], 16), int(row[3], 16), int(row[4]), self.PreprocessStack(row[5:])) - elif row[0] == 'code-creation': - self.ProcessCodeCreation(row[1], int(row[2], 16), int(row[3]), row[4]) - elif row[0] == 'code-move': - self.ProcessCodeMove(int(row[1], 16), int(row[2], 16)) - elif row[0] == 'code-delete': - self.ProcessCodeDelete(int(row[1], 16)) - elif row[0] == 'function-creation': - self.ProcessFunctionCreation(int(row[1], 16), int(row[2], 16)) - elif row[0] == 'function-move': - self.ProcessFunctionMove(int(row[1], 16), int(row[2], 16)) - elif row[0] == 'function-delete': - self.ProcessFunctionDelete(int(row[1], 16)) - elif row[0] == 'shared-library': - self.AddSharedLibraryEntry(row[1], int(row[2], 16), int(row[3], 16)) - self.ParseVMSymbols(row[1], int(row[2], 16), int(row[3], 16)) - elif row[0] == 'begin-code-region': - self.ProcessBeginCodeRegion(int(row[1], 16), int(row[2], 16), int(row[3], 16), row[4]) - elif row[0] == 'end-code-region': - self.ProcessEndCodeRegion(int(row[1], 16), int(row[2], 16), int(row[3], 16)) - elif row[0] == 'code-allocate': - self.ProcessCodeAllocate(int(row[1], 16), int(row[2], 16)) - except csv.Error: - print("parse error in line " + str(row_num)) - raise - finally: - logfile.close() - - def AddSharedLibraryEntry(self, filename, start, end): - # Mark the pages used by this library. - i = start - while i < end: - page = i >> 12 - self.vm_extent[page] = 1 - i += 4096 - # Add the library to the entries so that ticks for which we do not - # have symbol information is reported as belonging to the library. - self.cpp_entries.Insert(start, SharedLibraryEntry(start, filename)) - - def ParseVMSymbols(self, filename, start, end): - return - - def ProcessCodeAllocate(self, addr, assem): - if assem in self.pending_assemblers: - assembler = self.pending_assemblers.pop(assem) - self.assemblers[addr] = assembler - - def ProcessCodeCreation(self, type, addr, size, name): - if addr in self.assemblers: - assembler = self.assemblers.pop(addr) - else: - assembler = None - self.js_entries.Insert(addr, JSCodeEntry(addr, name, type, size, assembler)) - - def ProcessCodeMove(self, from_addr, to_addr): - try: - removed_node = self.js_entries.Remove(from_addr) - removed_node.value.SetStartAddress(to_addr); - self.js_entries.Insert(to_addr, removed_node.value) - except splaytree.KeyNotFoundError: - print('Code move event for unknown code: 0x%x' % from_addr) - - def ProcessCodeDelete(self, from_addr): - try: - removed_node = self.js_entries.Remove(from_addr) - self.deleted_code.append(removed_node.value) - except splaytree.KeyNotFoundError: - print('Code delete event for unknown code: 0x%x' % from_addr) - - def ProcessFunctionCreation(self, func_addr, code_addr): - js_entry_node = self.js_entries.Find(code_addr) - if js_entry_node: - js_entry = js_entry_node.value - self.js_entries.Insert(func_addr, JSCodeEntry(func_addr, js_entry.name, js_entry.type, 1, None)) - - def ProcessFunctionMove(self, from_addr, to_addr): - try: - removed_node = self.js_entries.Remove(from_addr) - removed_node.value.SetStartAddress(to_addr); - self.js_entries.Insert(to_addr, removed_node.value) - except splaytree.KeyNotFoundError: - return - - def ProcessFunctionDelete(self, from_addr): - try: - removed_node = self.js_entries.Remove(from_addr) - self.deleted_code.append(removed_node.value) - except splaytree.KeyNotFoundError: - return - - def ProcessBeginCodeRegion(self, id, assm, start, name): - if not assm in self.pending_assemblers: - self.pending_assemblers[assm] = Assembler() - assembler = self.pending_assemblers[assm] - assembler.pending_regions[id] = CodeRegion(start, name) - - def ProcessEndCodeRegion(self, id, assm, end): - assm = self.pending_assemblers[assm] - region = assm.pending_regions.pop(id) - region.end_offset = end - assm.regions.append(region) - - def IncludeTick(self, pc, sp, state): - return (self.included_state is None) or (self.included_state == state) - - def FindEntry(self, pc): - page = pc >> 12 - if page in self.vm_extent: - entry = self.cpp_entries.FindGreatestsLessThan(pc) - if entry != None: - return entry.value - else: - return entry - max = self.js_entries.FindMax() - min = self.js_entries.FindMin() - if max != None and pc < (max.key + max.value.size) and pc > min.key: - return self.js_entries.FindGreatestsLessThan(pc).value - return None - - def PreprocessStack(self, stack): - # remove all non-addresses (e.g. 'overflow') and convert to int - result = [] - for frame in stack: - if frame.startswith('0x'): - result.append(int(frame, 16)) - return result - - def ProcessStack(self, stack): - result = [] - for frame in stack: - entry = self.FindEntry(frame) - if entry != None: - result.append(entry.ToString()) - return result - - def ProcessTick(self, pc, sp, func, state, stack): - if state == VMStates['GC']: - self.number_of_gc_ticks += 1 - if not self.IncludeTick(pc, sp, state): - self.excluded_number_of_ticks += 1; - return - self.total_number_of_ticks += 1 - entry = self.FindEntry(pc) - if entry == None: - self.unaccounted_number_of_ticks += 1 - return - if entry.IsSharedLibraryEntry(): - self.number_of_library_ticks += 1 - if entry.IsICEntry() and not self.separate_ic: - if len(stack) > 0: - caller_pc = stack.pop(0) - self.total_number_of_ticks -= 1 - self.ProcessTick(caller_pc, sp, func, state, stack) - else: - self.unaccounted_number_of_ticks += 1 - else: - processed_stack = self.ProcessStack(stack) - if not entry.IsSharedLibraryEntry() and not entry.IsJSFunction(): - func_entry_node = self.js_entries.Find(func) - if func_entry_node and func_entry_node.value.IsJSFunction(): - processed_stack.insert(0, func_entry_node.value.ToString()) - entry.Tick(pc, processed_stack) - if self.call_graph_json: - self.AddToPackedStacks(pc, stack) - - def AddToPackedStacks(self, pc, stack): - full_stack = stack - full_stack.insert(0, pc) - func_names = self.ProcessStack(full_stack) - func_ids = [] - for func in func_names: - func_ids.append(self.func_enum.GetFunctionId(func)) - self.packed_stacks.append(func_ids) - - def PrintResults(self): - if not self.call_graph_json: - self.PrintStatistics() - else: - self.PrintCallGraphJSON() - - def PrintStatistics(self): - print('Statistical profiling result from %s, (%d ticks, %d unaccounted, %d excluded).' % - (self.log_file, - self.total_number_of_ticks, - self.unaccounted_number_of_ticks, - self.excluded_number_of_ticks)) - if self.total_number_of_ticks > 0: - js_entries = self.js_entries.ExportValueList() - js_entries.extend(self.deleted_code) - cpp_entries = self.cpp_entries.ExportValueList() - # Print the unknown ticks percentage if they are not ignored. - if not self.ignore_unknown and self.unaccounted_number_of_ticks > 0: - self.PrintHeader('Unknown') - self.PrintCounter(self.unaccounted_number_of_ticks) - # Print the library ticks. - self.PrintHeader('Shared libraries') - self.PrintEntries(cpp_entries, lambda e:e.IsSharedLibraryEntry()) - # Print the JavaScript ticks. - self.PrintHeader('JavaScript') - self.PrintEntries(js_entries, lambda e:not e.IsSharedLibraryEntry()) - # Print the C++ ticks. - self.PrintHeader('C++') - self.PrintEntries(cpp_entries, lambda e:not e.IsSharedLibraryEntry()) - # Print the GC ticks. - self.PrintHeader('GC') - self.PrintCounter(self.number_of_gc_ticks) - # Print call profile. - print('\n [Call profile]:') - print(' total call path') - js_entries.extend(cpp_entries) - self.PrintCallProfile(js_entries) - - def PrintHeader(self, header_title): - print('\n [%s]:' % header_title) - print(' ticks total nonlib name') - - def PrintCounter(self, ticks_count): - percentage = ticks_count * 100.0 / self.total_number_of_ticks - print(' %(ticks)5d %(total)5.1f%%' % { - 'ticks' : ticks_count, - 'total' : percentage, - }) - - def PrintEntries(self, entries, condition): - # If ignoring unaccounted ticks don't include these in percentage - # calculations - number_of_accounted_ticks = self.total_number_of_ticks - if self.ignore_unknown: - number_of_accounted_ticks -= self.unaccounted_number_of_ticks - - number_of_non_library_ticks = number_of_accounted_ticks - self.number_of_library_ticks - entries.sort(key=lambda e: (e.tick_count, e.ToString()), reverse=True) - for entry in entries: - if entry.tick_count > 0 and condition(entry): - total_percentage = entry.tick_count * 100.0 / number_of_accounted_ticks - if entry.IsSharedLibraryEntry(): - non_library_percentage = 0 - else: - non_library_percentage = entry.tick_count * 100.0 / number_of_non_library_ticks - print(' %(ticks)5d %(total)5.1f%% %(nonlib)6.1f%% %(name)s' % { - 'ticks' : entry.tick_count, - 'total' : total_percentage, - 'nonlib' : non_library_percentage, - 'name' : entry.ToString() - }) - region_ticks = entry.RegionTicks() - if not region_ticks is None: - items = region_ticks.items() - items.sort(key=lambda e: e[1][1], reverse=True) - for (name, ticks) in items: - print(' flat cum') - print(' %(flat)5.1f%% %(accum)5.1f%% %(name)s' % { - 'flat' : ticks[1] * 100.0 / entry.tick_count, - 'accum' : ticks[0] * 100.0 / entry.tick_count, - 'name': name - }) - - def PrintCallProfile(self, entries): - all_stacks = {} - total_stacks = 0 - for entry in entries: - all_stacks.update(entry.stacks) - for count in entry.stacks.itervalues(): - total_stacks += count - all_stacks_items = all_stacks.items(); - all_stacks_items.sort(key = itemgetter(1), reverse=True) - missing_percentage = (self.total_number_of_ticks - total_stacks) * 100.0 / self.total_number_of_ticks - print(' %(ticks)5d %(total)5.1f%% <no call path information>' % { - 'ticks' : self.total_number_of_ticks - total_stacks, - 'total' : missing_percentage - }) - for stack, count in all_stacks_items: - total_percentage = count * 100.0 / self.total_number_of_ticks - print(' %(ticks)5d %(total)5.1f%% %(call_path)s' % { - 'ticks' : count, - 'total' : total_percentage, - 'call_path' : stack[0] + ' <- ' + stack[1] - }) - - def PrintCallGraphJSON(self): - print('\nvar __profile_funcs = ["' + - '",\n"'.join(self.func_enum.GetKnownFunctions()) + - '"];') - print('var __profile_ticks = [') - str_packed_stacks = [] - for stack in self.packed_stacks: - str_packed_stacks.append('[' + ','.join(map(str, stack)) + ']') - print(',\n'.join(str_packed_stacks)) - print('];') - -class CmdLineProcessor(object): - - def __init__(self): - self.options = ["js", - "gc", - "compiler", - "other", - "external", - "ignore-unknown", - "separate-ic", - "call-graph-json"] - # default values - self.state = None - self.ignore_unknown = False - self.log_file = None - self.separate_ic = False - self.call_graph_json = False - - def ProcessArguments(self): - try: - opts, args = getopt.getopt(sys.argv[1:], "jgcoe", self.options) - except getopt.GetoptError: - self.PrintUsageAndExit() - for key, value in opts: - if key in ("-j", "--js"): - self.state = VMStates['JS'] - if key in ("-g", "--gc"): - self.state = VMStates['GC'] - if key in ("-c", "--compiler"): - self.state = VMStates['COMPILER'] - if key in ("-o", "--other"): - self.state = VMStates['OTHER'] - if key in ("-e", "--external"): - self.state = VMStates['EXTERNAL'] - if key in ("--ignore-unknown"): - self.ignore_unknown = True - if key in ("--separate-ic"): - self.separate_ic = True - if key in ("--call-graph-json"): - self.call_graph_json = True - self.ProcessRequiredArgs(args) - - def ProcessRequiredArgs(self, args): - return - - def GetRequiredArgsNames(self): - return - - def PrintUsageAndExit(self): - print('Usage: %(script_name)s --{%(opts)s} %(req_opts)s' % { - 'script_name': os.path.basename(sys.argv[0]), - 'opts': string.join(self.options, ','), - 'req_opts': self.GetRequiredArgsNames() - }) - sys.exit(2) - - def RunLogfileProcessing(self, tick_processor): - tick_processor.ProcessLogfile(self.log_file, self.state, self.ignore_unknown, - self.separate_ic, self.call_graph_json) - - -if __name__ == '__main__': - sys.exit('You probably want to run windows-tick-processor.py or linux-tick-processor.py.') diff --git a/tools/utils.py b/tools/utils.py index 8083091b..fb94d141 100644 --- a/tools/utils.py +++ b/tools/utils.py @@ -49,6 +49,8 @@ def GuessOS(): return 'linux' elif id == 'Darwin': return 'macos' + elif id.find('CYGWIN') >= 0: + return 'cygwin' elif id == 'Windows' or id == 'Microsoft': # On Windows Vista platform.system() can return 'Microsoft' with some # versions of Python, see http://bugs.python.org/issue1082 diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj index 24321e52..10fbc58a 100644 --- a/tools/v8.xcodeproj/project.pbxproj +++ b/tools/v8.xcodeproj/project.pbxproj @@ -211,6 +211,7 @@ 895692A512D4ED240072C313 /* objects-printer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8946827412C26EB700C914BC /* objects-printer.cc */; }; 8956B6CF0F5D86730033B5A2 /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; }; 895FA753107FFED3006F39D4 /* constants-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 895FA748107FFE73006F39D4 /* constants-arm.cc */; }; + 896FA1E5130F93D300042054 /* lithium-gap-resolver-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 896FA1E3130F93D300042054 /* lithium-gap-resolver-arm.cc */; }; 896FD03A0E78D717003DFB6A /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; }; 897C77D012B68E3D000767A8 /* d8-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988150F2A3686007D5254 /* d8-debug.cc */; }; 897C77D112B68E3D000767A8 /* d8-js.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988320F2A3B8B007D5254 /* d8-js.cc */; }; @@ -647,6 +648,8 @@ 895FA751107FFEAE006F39D4 /* register-allocator-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-arm.h"; path = "arm/register-allocator-arm.h"; sourceTree = "<group>"; }; 8964482B0E9C00F700E7C516 /* codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-ia32.h"; path = "ia32/codegen-ia32.h"; sourceTree = "<group>"; }; 896448BC0E9D530500E7C516 /* codegen-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-arm.h"; path = "arm/codegen-arm.h"; sourceTree = "<group>"; }; + 896FA1E3130F93D300042054 /* lithium-gap-resolver-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-gap-resolver-arm.cc"; path = "arm/lithium-gap-resolver-arm.cc"; sourceTree = "<group>"; }; + 896FA1E4130F93D300042054 /* lithium-gap-resolver-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-gap-resolver-arm.h"; path = "arm/lithium-gap-resolver-arm.h"; sourceTree = "<group>"; }; 8970F2F00E719FB2006AE7B5 /* libv8.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libv8.a; sourceTree = BUILT_PRODUCTS_DIR; }; 897C77D912B68E3D000767A8 /* d8-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-arm"; sourceTree = BUILT_PRODUCTS_DIR; }; 897F767A0E71B4CC007ACF34 /* v8_shell */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = v8_shell; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -1538,6 +1541,8 @@ 893E24C812B14B510083370F /* lithium-arm.h */, 893E24C912B14B520083370F /* lithium-codegen-arm.cc */, 893E24CA12B14B520083370F /* lithium-codegen-arm.h */, + 896FA1E3130F93D300042054 /* lithium-gap-resolver-arm.cc */, + 896FA1E4130F93D300042054 /* lithium-gap-resolver-arm.h */, 897FF1540E719B8F00D62E90 /* macro-assembler-arm.cc */, 897FF1550E719B8F00D62E90 /* macro-assembler-arm.h */, 89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */, @@ -2290,6 +2295,7 @@ 894A59EA12D777E80000766D /* lithium.cc in Sources */, 89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */, 89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */, + 896FA1E5130F93D300042054 /* lithium-gap-resolver-arm.cc in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/tools/visual_studio/x64.vsprops b/tools/visual_studio/x64.vsprops index 79904403..04d9c655 100644 --- a/tools/visual_studio/x64.vsprops +++ b/tools/visual_studio/x64.vsprops @@ -12,6 +12,7 @@ /> <Tool Name="VCLinkerTool" + StackReserveSize="2091752" TargetMachine="17" /> </VisualStudioPropertySheet> diff --git a/tools/windows-tick-processor.py b/tools/windows-tick-processor.py deleted file mode 100755 index ade2bf27..00000000 --- a/tools/windows-tick-processor.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2008 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -# Usage: process-ticks.py <binary> <logfile> -# -# Where <binary> is the binary program name (eg, v8_shell.exe) and -# <logfile> is the log file name (eg, v8.log). -# -# This tick processor expects to find a map file for the binary named -# binary.map if the binary is named binary.exe. The tick processor -# only works for statically linked executables - no information about -# shared libraries is logged from v8 on Windows. - -import os, re, sys, tickprocessor - -class WindowsTickProcessor(tickprocessor.TickProcessor): - - def Unmangle(self, name): - """Performs very simple unmangling of C++ names. - - Does not handle arguments and template arguments. The mangled names have - the form: - - ?LookupInDescriptor@JSObject@internal@v8@@...arguments info... - - """ - # Name is mangled if it starts with a question mark. - is_mangled = re.match("^\?(.*)", name) - if is_mangled: - substrings = is_mangled.group(1).split('@') - try: - # The function name is terminated by two @s in a row. Find the - # substrings that are part of the function name. - index = substrings.index('') - substrings = substrings[0:index] - except ValueError: - # If we did not find two @s in a row, the mangled name is not in - # the format we expect and we give up. - return name - substrings.reverse() - function_name = "::".join(substrings) - return function_name - return name - - - def ParseMapFile(self, filename): - """Parse map file and add symbol information to the cpp entries.""" - # Locate map file. - has_dot = re.match('^([a-zA-F0-9_-]*)[\.]?.*$', filename) - if has_dot: - map_file_name = has_dot.group(1) + '.map' - try: - map_file = open(map_file_name, 'rb') - except IOError: - sys.exit("Could not open map file: " + map_file_name) - else: - sys.exit("Could not find map file for executable: " + filename) - try: - max_addr = 0 - min_addr = 2**30 - # Process map file and search for function entries. - row_regexp = re.compile(' 0001:[0-9a-fA-F]{8}\s*([_\?@$0-9a-zA-Z]*)\s*([0-9a-fA-F]{8}).*') - for line in map_file: - row = re.match(row_regexp, line) - if row: - addr = int(row.group(2), 16) - if addr > max_addr: - max_addr = addr - if addr < min_addr: - min_addr = addr - mangled_name = row.group(1) - name = self.Unmangle(mangled_name) - self.cpp_entries.Insert(addr, tickprocessor.CodeEntry(addr, name)); - i = min_addr - # Mark the pages for which there are functions in the map file. - while i < max_addr: - page = i >> 12 - self.vm_extent[page] = 1 - i += 4096 - finally: - map_file.close() - - -class WindowsCmdLineProcessor(tickprocessor.CmdLineProcessor): - - def __init__(self): - super(WindowsCmdLineProcessor, self).__init__() - self.binary_file = None - - def GetRequiredArgsNames(self): - return 'binary log_file' - - def ProcessRequiredArgs(self, args): - if len(args) != 2: - self.PrintUsageAndExit() - else: - self.binary_file = args[0] - self.log_file = args[1] - - -def Main(): - cmdline_processor = WindowsCmdLineProcessor() - cmdline_processor.ProcessArguments() - tickprocessor = WindowsTickProcessor() - tickprocessor.ParseMapFile(cmdline_processor.binary_file) - cmdline_processor.RunLogfileProcessing(tickprocessor) - tickprocessor.PrintResults() - -if __name__ == '__main__': - Main() |