summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ChangeLog50
-rw-r--r--SConstruct24
-rw-r--r--V8_MERGE_REVISION4
-rw-r--r--include/v8.h126
-rw-r--r--src/api.cc35
-rw-r--r--src/arm/assembler-arm-inl.h19
-rw-r--r--src/arm/assembler-arm.cc162
-rw-r--r--src/arm/assembler-arm.h47
-rw-r--r--src/arm/builtins-arm.cc45
-rw-r--r--src/arm/codegen-arm-inl.h24
-rw-r--r--src/arm/codegen-arm.cc1633
-rw-r--r--src/arm/codegen-arm.h169
-rw-r--r--src/arm/constants-arm.h7
-rw-r--r--src/arm/debug-arm.cc71
-rw-r--r--src/arm/disasm-arm.cc44
-rw-r--r--src/arm/full-codegen-arm.cc217
-rw-r--r--src/arm/ic-arm.cc340
-rw-r--r--src/arm/jump-target-arm.cc22
-rw-r--r--src/arm/macro-assembler-arm.cc97
-rw-r--r--src/arm/macro-assembler-arm.h35
-rw-r--r--src/arm/simulator-arm.cc45
-rw-r--r--src/arm/stub-cache-arm.cc416
-rw-r--r--src/arm/virtual-frame-arm-inl.h6
-rw-r--r--src/arm/virtual-frame-arm.cc141
-rw-r--r--src/arm/virtual-frame-arm.h67
-rw-r--r--src/assembler.cc6
-rw-r--r--src/assembler.h14
-rw-r--r--src/ast.h13
-rw-r--r--src/builtins.cc13
-rw-r--r--src/builtins.h1
-rw-r--r--src/checks.h6
-rw-r--r--src/codegen.cc39
-rw-r--r--src/codegen.h286
-rw-r--r--src/compilation-cache.cc28
-rw-r--r--src/compilation-cache.h3
-rwxr-xr-xsrc/compiler.cc1
-rw-r--r--src/cpu-profiler.cc54
-rw-r--r--src/cpu-profiler.h11
-rw-r--r--src/d8.cc3
-rw-r--r--src/data-flow.cc3
-rw-r--r--src/debug.cc128
-rw-r--r--src/debug.h22
-rw-r--r--src/disassembler.cc2
-rw-r--r--src/flag-definitions.h5
-rw-r--r--src/full-codegen.cc370
-rw-r--r--src/full-codegen.h34
-rw-r--r--src/globals.h13
-rw-r--r--src/heap-inl.h70
-rw-r--r--src/heap.cc733
-rw-r--r--src/heap.h117
-rw-r--r--src/ia32/assembler-ia32-inl.h22
-rw-r--r--src/ia32/assembler-ia32.cc61
-rw-r--r--src/ia32/assembler-ia32.h24
-rw-r--r--src/ia32/builtins-ia32.cc43
-rw-r--r--src/ia32/codegen-ia32.cc1118
-rw-r--r--src/ia32/codegen-ia32.h92
-rw-r--r--src/ia32/debug-ia32.cc42
-rw-r--r--src/ia32/disasm-ia32.cc37
-rw-r--r--src/ia32/full-codegen-ia32.cc265
-rw-r--r--src/ia32/ic-ia32.cc571
-rw-r--r--src/ia32/macro-assembler-ia32.cc125
-rw-r--r--src/ia32/macro-assembler-ia32.h18
-rw-r--r--src/ia32/stub-cache-ia32.cc576
-rw-r--r--src/ia32/virtual-frame-ia32.cc18
-rw-r--r--src/ia32/virtual-frame-ia32.h5
-rw-r--r--src/ic.cc154
-rw-r--r--src/ic.h55
-rw-r--r--src/jump-target-heavy.cc29
-rw-r--r--src/jump-target-light.cc5
-rw-r--r--src/liveedit.cc4
-rw-r--r--src/log.cc4
-rw-r--r--src/log.h12
-rw-r--r--src/macros.py10
-rw-r--r--src/mark-compact.cc211
-rw-r--r--src/mark-compact.h62
-rw-r--r--src/mips/assembler-mips.cc9
-rw-r--r--src/mips/assembler-mips.h5
-rw-r--r--src/mips/full-codegen-mips.cc2
-rw-r--r--src/mirror-debugger.js50
-rw-r--r--src/objects-debug.cc3
-rw-r--r--src/objects-inl.h121
-rw-r--r--src/objects.cc59
-rw-r--r--src/objects.h254
-rw-r--r--src/parser.cc5
-rw-r--r--src/platform-freebsd.cc6
-rw-r--r--src/platform-linux.cc3
-rw-r--r--src/platform-solaris.cc6
-rw-r--r--src/profile-generator-inl.h11
-rw-r--r--src/profile-generator.cc895
-rw-r--r--src/profile-generator.h348
-rw-r--r--src/regexp.js24
-rw-r--r--src/runtime.cc295
-rw-r--r--src/runtime.h3
-rw-r--r--src/serialize.cc4
-rw-r--r--src/spaces-inl.h251
-rw-r--r--src/spaces.cc445
-rw-r--r--src/spaces.h279
-rw-r--r--src/string.js39
-rw-r--r--src/stub-cache.cc155
-rw-r--r--src/stub-cache.h67
-rw-r--r--src/type-info.h2
-rw-r--r--src/unbound-queue-inl.h8
-rw-r--r--src/unbound-queue.h1
-rw-r--r--src/utils.h46
-rw-r--r--src/v8-counters.h12
-rw-r--r--src/v8.cc4
-rw-r--r--src/v8natives.js20
-rw-r--r--src/v8threads.cc2
-rw-r--r--src/v8threads.h2
-rw-r--r--src/version.cc4
-rw-r--r--src/virtual-frame-light-inl.h30
-rw-r--r--src/virtual-frame-light.cc2
-rw-r--r--src/x64/assembler-x64-inl.h15
-rw-r--r--src/x64/assembler-x64.cc16
-rw-r--r--src/x64/assembler-x64.h18
-rw-r--r--src/x64/builtins-x64.cc51
-rw-r--r--src/x64/codegen-x64.cc903
-rw-r--r--src/x64/codegen-x64.h61
-rw-r--r--src/x64/debug-x64.cc51
-rw-r--r--src/x64/full-codegen-x64.cc244
-rw-r--r--src/x64/ic-x64.cc426
-rw-r--r--src/x64/macro-assembler-x64.cc169
-rw-r--r--src/x64/macro-assembler-x64.h16
-rw-r--r--src/x64/stub-cache-x64.cc535
-rw-r--r--src/x64/virtual-frame-x64.cc17
-rw-r--r--src/x64/virtual-frame-x64.h2
-rw-r--r--test/cctest/test-api.cc300
-rw-r--r--test/cctest/test-assembler-arm.cc36
-rw-r--r--test/cctest/test-cpu-profiler.cc5
-rw-r--r--test/cctest/test-debug.cc424
-rw-r--r--test/cctest/test-decls.cc68
-rw-r--r--test/cctest/test-disasm-arm.cc46
-rw-r--r--test/cctest/test-disasm-ia32.cc3
-rw-r--r--test/cctest/test-heap.cc67
-rw-r--r--test/cctest/test-profile-generator.cc185
-rw-r--r--test/cctest/test-spaces.cc22
-rw-r--r--test/cctest/test-strings.cc48
-rw-r--r--test/cctest/test-utils.cc52
-rw-r--r--test/es5conform/es5conform.status8
-rw-r--r--test/mjsunit/const-eval-init.js2
-rw-r--r--test/mjsunit/debug-conditional-breakpoints.js12
-rw-r--r--test/mjsunit/debug-return-value.js163
-rw-r--r--test/mjsunit/debug-step.js7
-rw-r--r--test/mjsunit/delete.js27
-rw-r--r--test/mjsunit/eval.js2
-rw-r--r--test/mjsunit/get-own-property-descriptor.js64
-rw-r--r--test/mjsunit/keyed-call-generic.js96
-rw-r--r--test/mjsunit/keyed-call-ic.js205
-rw-r--r--test/mjsunit/regress/regress-728.js42
-rw-r--r--test/mjsunit/regress/regress-732.js46
-rw-r--r--test/mjsunit/samevalue.js204
-rw-r--r--test/mjsunit/string-charat.js235
-rw-r--r--test/mjsunit/string-charcodeat.js4
-rw-r--r--test/mjsunit/string-index.js24
154 files changed, 12351 insertions, 5426 deletions
diff --git a/ChangeLog b/ChangeLog
index 3c7003a6..941c314a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,53 @@
+2010-06-14: Version 2.2.17
+
+ Improved debugger support for stepping out of functions.
+
+ Incremental performance improvements.
+
+
+2010-06-09: Version 2.2.16
+
+ Removed the SetExternalStringDiposeCallback API. Changed the
+ disposal of external string resources to call a virtual Dispose
+ method on the resource.
+
+ Added support for more precise break points when debugging and
+ stepping.
+
+ Memory usage improvements on all platforms.
+
+
+2010-06-07: Version 2.2.15
+
+ Add an API to control the disposal of external string resources.
+
+ Add missing initialization of a couple of variables which makes
+ some compilers complaint when compiling with -Werror.
+
+ Improve performance on all platforms.
+
+
+2010-06-02: Version 2.2.14
+
+ Fixed a crash in code generated for String.charCodeAt.
+
+ Fixed a compilation issue with some GCC versions (issue 727).
+
+ Performance optimizations on x64 and ARM platforms.
+
+
+2010-05-31: Version 2.2.13
+
+ Implement Object.getOwnPropertyDescriptor for element indices and
+ strings (issue 599).
+
+ Fix bug for windows 64 bit C calls from generated code.
+
+ Add new scons flag unalignedaccesses for arm builds.
+
+ Performance improvements on all platforms.
+
+
2010-05-26: Version 2.2.12
Allowed accessors to be defined on objects rather than just object
diff --git a/SConstruct b/SConstruct
index cf6b57d7..53d845c2 100644
--- a/SConstruct
+++ b/SConstruct
@@ -204,10 +204,16 @@ LIBRARY_FLAGS = {
'LINKFLAGS': ['-m32']
},
'arch:arm': {
- 'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
+ 'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
+ 'unalignedaccesses:on' : {
+ 'CPPDEFINES' : ['CAN_USE_UNALIGNED_ACCESSES=1']
+ },
+ 'unalignedaccesses:off' : {
+ 'CPPDEFINES' : ['CAN_USE_UNALIGNED_ACCESSES=0']
+ }
},
'simulator:arm': {
- 'CCFLAGS': ['-m32', '-DCAN_USE_UNALIGNED_ACCESSES=1'],
+ 'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:mips': {
@@ -734,6 +740,11 @@ SIMPLE_OPTIONS = {
'default': 'none',
'help': 'build with simulator'
},
+ 'unalignedaccesses': {
+ 'values': ['default', 'on', 'off'],
+ 'default': 'default',
+ 'help': 'set whether the ARM target supports unaligned accesses'
+ },
'disassembler': {
'values': ['on', 'off'],
'default': 'off',
@@ -771,6 +782,7 @@ def GetOptions():
result = Options()
result.Add('mode', 'compilation mode (debug, release)', 'release')
result.Add('sample', 'build sample (shell, process, lineprocessor)', '')
+ result.Add('cache', 'directory to use for scons build cache', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
for (name, option) in SIMPLE_OPTIONS.iteritems():
@@ -852,6 +864,12 @@ def VerifyOptions(env):
Abort("Shared Object soname not applicable for static library.")
if env['os'] != 'win32' and env['pgo'] != 'off':
Abort("Profile guided optimization only supported on Windows.")
+ if env['cache'] and not os.path.isdir(env['cache']):
+ Abort("The specified cache directory does not exist.")
+ if not (env['arch'] == 'arm' or env['simulator'] == 'arm') and ('unalignedaccesses' in ARGUMENTS):
+ print env['arch']
+ print env['simulator']
+ Abort("Option unalignedaccesses only supported for the ARM architecture.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." %
@@ -1116,6 +1134,8 @@ def Build():
else:
env.Default('library')
+ if env['cache']:
+ CacheDir(env['cache'])
# We disable deprecation warnings because we need to be able to use
# env.Copy without getting warnings for compatibility with older
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index cb51e93c..1a569dd3 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,4 @@
We use a V8 revision that has been used for a Chromium release.
-http://src.chromium.org/svn/releases/6.0.423.0/DEPS
-http://v8.googlecode.com/svn/trunk@4730
+http://src.chromium.org/svn/releases/6.0.436.0/DEPS
+http://v8.googlecode.com/svn/trunk@4851
diff --git a/include/v8.h b/include/v8.h
index 5b5dabe3..24b4cbe3 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -134,6 +134,7 @@ namespace internal {
class Arguments;
class Object;
+class Heap;
class Top;
}
@@ -513,6 +514,7 @@ class V8EXPORT Data {
class V8EXPORT ScriptData { // NOLINT
public:
virtual ~ScriptData() { }
+
/**
* Pre-compiles the specified script (context-independent).
*
@@ -522,6 +524,16 @@ class V8EXPORT ScriptData { // NOLINT
static ScriptData* PreCompile(const char* input, int length);
/**
+ * Pre-compiles the specified script (context-independent).
+ *
+ * NOTE: Pre-compilation using this method cannot happen on another thread
+ * without using Lockers.
+ *
+ * \param source Script source code.
+ */
+ static ScriptData* PreCompile(Handle<String> source);
+
+ /**
* Load previous pre-compilation data.
*
* \param data Pointer to data returned by a call to Data() of a previous
@@ -1026,12 +1038,24 @@ class V8EXPORT String : public Primitive {
class V8EXPORT ExternalStringResourceBase {
public:
virtual ~ExternalStringResourceBase() {}
+
protected:
ExternalStringResourceBase() {}
+
+ /**
+ * Internally V8 will call this Dispose method when the external string
+ * resource is no longer needed. The default implementation will use the
+ * delete operator. This method can be overridden in subclasses to
+ * control how allocated external string resources are disposed.
+ */
+ virtual void Dispose() { delete this; }
+
private:
// Disallow copying and assigning.
ExternalStringResourceBase(const ExternalStringResourceBase&);
void operator=(const ExternalStringResourceBase&);
+
+ friend class v8::internal::Heap;
};
/**
@@ -1048,10 +1072,17 @@ class V8EXPORT String : public Primitive {
* buffer.
*/
virtual ~ExternalStringResource() {}
- /** The string data from the underlying buffer.*/
+
+ /**
+ * The string data from the underlying buffer.
+ */
virtual const uint16_t* data() const = 0;
- /** The length of the string. That is, the number of two-byte characters.*/
+
+ /**
+ * The length of the string. That is, the number of two-byte characters.
+ */
virtual size_t length() const = 0;
+
protected:
ExternalStringResource() {}
};
@@ -1122,11 +1153,11 @@ class V8EXPORT String : public Primitive {
/**
* Creates a new external string using the data defined in the given
- * resource. The resource is deleted when the external string is no
- * longer live on V8's heap. The caller of this function should not
- * delete or modify the resource. Neither should the underlying buffer be
- * deallocated or modified except through the destructor of the
- * external string resource.
+ * resource. When the external string is no longer live on V8's heap the
+ * resource will be disposed by calling its Dispose method. The caller of
+ * this function should not otherwise delete or modify the resource. Neither
+ * should the underlying buffer be deallocated or modified except through the
+ * destructor of the external string resource.
*/
static Local<String> NewExternal(ExternalStringResource* resource);
@@ -1136,17 +1167,18 @@ class V8EXPORT String : public Primitive {
* will use the external string resource. The external string resource's
* character contents needs to be equivalent to this string.
* Returns true if the string has been changed to be an external string.
- * The string is not modified if the operation fails.
+ * The string is not modified if the operation fails. See NewExternal for
+ * information on the lifetime of the resource.
*/
bool MakeExternal(ExternalStringResource* resource);
/**
* Creates a new external string using the ascii data defined in the given
- * resource. The resource is deleted when the external string is no
- * longer live on V8's heap. The caller of this function should not
- * delete or modify the resource. Neither should the underlying buffer be
- * deallocated or modified except through the destructor of the
- * external string resource.
+ * resource. When the external string is no longer live on V8's heap the
+ * resource will be disposed by calling its Dispose method. The caller of
+ * this function should not otherwise delete or modify the resource. Neither
+ * should the underlying buffer be deallocated or modified except through the
+ * destructor of the external string resource.
*/
static Local<String> NewExternal(ExternalAsciiStringResource* resource);
@@ -1156,7 +1188,8 @@ class V8EXPORT String : public Primitive {
* will use the external string resource. The external string resource's
* character contents needs to be equivalent to this string.
* Returns true if the string has been changed to be an external string.
- * The string is not modified if the operation fails.
+ * The string is not modified if the operation fails. See NewExternal for
+ * information on the lifetime of the resource.
*/
bool MakeExternal(ExternalAsciiStringResource* resource);
@@ -1726,13 +1759,22 @@ typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
Local<Value> value,
const AccessorInfo& info);
-
/**
* Returns a non-empty handle if the interceptor intercepts the request.
- * The result is true if the property exists and false otherwise.
+ * The result is either boolean (true if property exists and false
+ * otherwise) or an integer encoding property attributes.
*/
+#ifdef USE_NEW_QUERY_CALLBACKS
+typedef Handle<Integer> (*NamedPropertyQuery)(Local<String> property,
+ const AccessorInfo& info);
+#else
typedef Handle<Boolean> (*NamedPropertyQuery)(Local<String> property,
const AccessorInfo& info);
+#endif
+
+typedef Handle<Value> (*NamedPropertyQueryImpl)(Local<String> property,
+ const AccessorInfo& info);
+
/**
@@ -1984,7 +2026,16 @@ class V8EXPORT FunctionTemplate : public Template {
NamedPropertyQuery query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
- Handle<Value> data);
+ Handle<Value> data) {
+ NamedPropertyQueryImpl casted =
+ reinterpret_cast<NamedPropertyQueryImpl>(query);
+ SetNamedInstancePropertyHandlerImpl(getter,
+ setter,
+ casted,
+ remover,
+ enumerator,
+ data);
+ }
void SetIndexedInstancePropertyHandler(IndexedPropertyGetter getter,
IndexedPropertySetter setter,
IndexedPropertyQuery query,
@@ -1996,6 +2047,13 @@ class V8EXPORT FunctionTemplate : public Template {
friend class Context;
friend class ObjectTemplate;
+ private:
+ void SetNamedInstancePropertyHandlerImpl(NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQueryImpl query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data);
};
@@ -2053,7 +2111,7 @@ class V8EXPORT ObjectTemplate : public Template {
*
* \param getter The callback to invoke when getting a property.
* \param setter The callback to invoke when setting a property.
- * \param query The callback to invoke to check is an object has a property.
+ * \param query The callback to invoke to check if an object has a property.
* \param deleter The callback to invoke when deleting a property.
* \param enumerator The callback to invoke to enumerate all the named
* properties of an object.
@@ -2065,7 +2123,26 @@ class V8EXPORT ObjectTemplate : public Template {
NamedPropertyQuery query = 0,
NamedPropertyDeleter deleter = 0,
NamedPropertyEnumerator enumerator = 0,
- Handle<Value> data = Handle<Value>());
+ Handle<Value> data = Handle<Value>()) {
+ NamedPropertyQueryImpl casted =
+ reinterpret_cast<NamedPropertyQueryImpl>(query);
+ SetNamedPropertyHandlerImpl(getter,
+ setter,
+ casted,
+ deleter,
+ enumerator,
+ data);
+ }
+
+ private:
+ void SetNamedPropertyHandlerImpl(NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQueryImpl query,
+ NamedPropertyDeleter deleter,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data);
+
+ public:
/**
* Sets an indexed property handler on the object template.
@@ -2335,15 +2412,6 @@ typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCCallback)();
-// --- C o n t e x t G e n e r a t o r ---
-
-/**
- * Applications must provide a callback function which is called to generate
- * a context if a context was not deserialized from the snapshot.
- */
-typedef Persistent<Context> (*ContextGenerator)();
-
-
/**
* Profiler modules.
*
@@ -3177,7 +3245,7 @@ class Internals {
static const int kProxyProxyOffset = sizeof(void*);
static const int kJSObjectHeaderSize = 3 * sizeof(void*);
static const int kFullStringRepresentationMask = 0x07;
- static const int kExternalTwoByteRepresentationTag = 0x03;
+ static const int kExternalTwoByteRepresentationTag = 0x02;
// These constants are compiler dependent so their values must be
// defined within the implementation.
diff --git a/src/api.cc b/src/api.cc
index a7948aeb..cb5e96df 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -853,10 +853,10 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
}
-void FunctionTemplate::SetNamedInstancePropertyHandler(
+void FunctionTemplate::SetNamedInstancePropertyHandlerImpl(
NamedPropertyGetter getter,
NamedPropertySetter setter,
- NamedPropertyQuery query,
+ NamedPropertyQueryImpl query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
@@ -987,12 +987,13 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
}
-void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
+void ObjectTemplate::SetNamedPropertyHandlerImpl(NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQueryImpl query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator
+ enumerator,
+ Handle<Value> data) {
if (IsDeadCheck("v8::ObjectTemplate::SetNamedPropertyHandler()")) return;
ENTER_V8;
HandleScope scope;
@@ -1000,12 +1001,12 @@ void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- data);
+ Utils::ToLocal(cons)->SetNamedInstancePropertyHandlerImpl(getter,
+ setter,
+ query,
+ remover,
+ enumerator,
+ data);
}
@@ -1119,6 +1120,12 @@ ScriptData* ScriptData::PreCompile(const char* input, int length) {
}
+ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ return i::PreParse(str, NULL, NULL);
+}
+
+
ScriptData* ScriptData::New(const char* data, int length) {
// Return an empty ScriptData if the length is obviously invalid.
if (length % sizeof(unsigned) != 0) {
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index e292cefa..8ca91265 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -116,9 +116,10 @@ Address* RelocInfo::target_reference_address() {
Address RelocInfo::call_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
+ // The 2 instructions offset assumes patched debug break slot or return
+ // sequence.
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
}
@@ -168,6 +169,12 @@ bool RelocInfo::IsPatchedReturnSequence() {
}
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instr current_instr = Assembler::instr_at(pc_);
+ return !Assembler::IsNop(current_instr, 2);
+}
+
+
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
@@ -178,8 +185,10 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
- RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) {
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 050e15bc..025f28e5 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -903,20 +903,6 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
// Data-processing instructions.
-// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
-// Instruction details available in ARM DDI 0406A, A8-464.
-// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
-// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
-void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
- const Operand& src3, Condition cond) {
- ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
- ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
- ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
- emit(cond | 0x3F*B21 | src3.imm32_*B16 |
- dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
-}
-
-
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2);
@@ -1106,6 +1092,82 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
}
+// Bitfield manipulation instructions.
+
+// Unsigned bit field extract.
+// Extracts #width adjacent bits from position #lsb in a register, and
+// writes them to the low bits of a destination register.
+// ubfx dst, src, #lsb, #width
+void Assembler::ubfx(Register dst,
+ Register src,
+ int lsb,
+ int width,
+ Condition cond) {
+ // v7 and above.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ ASSERT((lsb >= 0) && (lsb <= 31));
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
+ emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
+ lsb*B7 | B6 | B4 | src.code());
+}
+
+
+// Signed bit field extract.
+// Extracts #width adjacent bits from position #lsb in a register, and
+// writes them to the low bits of a destination register. The extracted
+// value is sign extended to fill the destination register.
+// sbfx dst, src, #lsb, #width
+void Assembler::sbfx(Register dst,
+ Register src,
+ int lsb,
+ int width,
+ Condition cond) {
+ // v7 and above.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ ASSERT((lsb >= 0) && (lsb <= 31));
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
+ emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
+ lsb*B7 | B6 | B4 | src.code());
+}
+
+
+// Bit field clear.
+// Sets #width adjacent bits at position #lsb in the destination register
+// to zero, preserving the value of the other bits.
+// bfc dst, #lsb, #width
+void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
+ // v7 and above.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(!dst.is(pc));
+ ASSERT((lsb >= 0) && (lsb <= 31));
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
+ int msb = lsb + width - 1;
+ emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
+}
+
+
+// Bit field insert.
+// Inserts #width adjacent bits from the low bits of the source register
+// into position #lsb of the destination register.
+// bfi dst, src, #lsb, #width
+void Assembler::bfi(Register dst,
+ Register src,
+ int lsb,
+ int width,
+ Condition cond) {
+ // v7 and above.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ ASSERT((lsb >= 0) && (lsb <= 31));
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
+ int msb = lsb + width - 1;
+ emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
+ src.code());
+}
+
+
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc));
@@ -1151,31 +1213,32 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
// Both instructions can be eliminated if ry = rx.
// If ry != rx, a register copy from ry to rx is inserted
// after eliminating the push and the pop instructions.
- Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
-
- if (can_peephole_optimize(2) &&
- IsPush(push_instr) &&
- IsPop(pop_instr)) {
- if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
- // For consecutive push and pop on different registers,
- // we delete both the push & pop and insert a register move.
- // push ry, pop rx --> mov rx, ry
- Register reg_pushed, reg_popped;
- reg_pushed = GetRd(push_instr);
- reg_popped = GetRd(pop_instr);
- pc_ -= 2 * kInstrSize;
- // Insert a mov instruction, which is better than a pair of push & pop
- mov(reg_popped, reg_pushed);
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (diff reg) replaced by a reg move\n", pc_offset());
- }
- } else {
- // For consecutive push and pop on the same register,
- // both the push and the pop can be deleted.
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+ if (can_peephole_optimize(2)) {
+ Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
+ Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
+
+ if (IsPush(push_instr) && IsPop(pop_instr)) {
+ if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
+ // For consecutive push and pop on different registers,
+ // we delete both the push & pop and insert a register move.
+ // push ry, pop rx --> mov rx, ry
+ Register reg_pushed, reg_popped;
+ reg_pushed = GetRd(push_instr);
+ reg_popped = GetRd(pop_instr);
+ pc_ -= 2 * kInstrSize;
+ // Insert a mov instruction, which is better than a pair of push & pop
+ mov(reg_popped, reg_pushed);
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop (diff reg) replaced by a reg move\n",
+ pc_offset());
+ }
+ } else {
+ // For consecutive push and pop on the same register,
+ // both the push and the pop can be deleted.
+ pc_ -= 2 * kInstrSize;
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+ }
}
}
}
@@ -1977,6 +2040,13 @@ void Assembler::RecordJSReturn() {
}
+void Assembler::RecordDebugBreakSlot() {
+ WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
void Assembler::RecordComment(const char* msg) {
if (FLAG_debug_code) {
CheckBuffer();
@@ -1999,13 +2069,16 @@ void Assembler::RecordStatementPosition(int pos) {
}
-void Assembler::WriteRecordedPositions() {
+bool Assembler::WriteRecordedPositions() {
+ bool written = false;
+
// Write the statement position if it is different from what was written last
// time.
if (current_statement_position_ != written_statement_position_) {
CheckBuffer();
RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
written_statement_position_ = current_statement_position_;
+ written = true;
}
// Write the position if it is different from what was written last time and
@@ -2015,7 +2088,11 @@ void Assembler::WriteRecordedPositions() {
CheckBuffer();
RecordRelocInfo(RelocInfo::POSITION, current_position_);
written_position_ = current_position_;
+ written = true;
}
+
+ // Return whether something was written.
+ return written;
}
@@ -2072,9 +2149,10 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
- ASSERT(RelocInfo::IsJSReturn(rmode)
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ || RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index a1b98f67..e5d42f9a 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -629,22 +629,39 @@ class Assembler : public Malloced {
// Distance between start of patched return sequence and the emitted address
// to jump to.
#ifdef USE_BLX
- // Return sequence is:
+ // Patched return sequence is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
#else
- // Return sequence is:
+ // Patched return sequence is:
// mov lr, pc @ start of sequence
// ldr pc, [pc, #-4] @ emited address
static const int kPatchReturnSequenceAddressOffset = kInstrSize;
#endif
+ // Distance between start of patched debug break slot and the emitted address
+ // to jump to.
+#ifdef USE_BLX
+ // Patched debug break slot code is:
+ // ldr ip, [pc, #0] @ emited address and start
+ // blx ip
+ static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
+#else
+ // Patched debug break slot code is:
+ // mov lr, pc @ start of sequence
+ // ldr pc, [pc, #-4] @ emited address
+ static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
+#endif
+
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 8;
- static const int kJSReturnSequenceLength = 4;
+ static const int kJSReturnSequenceInstructions = 4;
+ static const int kDebugBreakSlotInstructions = 3;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstrSize;
// ---------------------------------------------------------------------------
// Code generation
@@ -671,8 +688,6 @@ class Assembler : public Malloced {
void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
// Data-processing instructions
- void ubfx(Register dst, Register src1, const Operand& src2,
- const Operand& src3, Condition cond = al);
void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@@ -692,6 +707,10 @@ class Assembler : public Malloced {
void add(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
+ void add(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al) {
+ add(dst, src1, Operand(src2), s, cond);
+ }
void adc(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@@ -759,6 +778,19 @@ class Assembler : public Malloced {
void clz(Register dst, Register src, Condition cond = al); // v5 and above
+ // Bitfield manipulation instructions. v7 and above.
+
+ void ubfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+
+ void sbfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+
+ void bfc(Register dst, int lsb, int width, Condition cond = al);
+
+ void bfi(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+
// Status register access instructions
void mrs(Register dst, SRegister s, Condition cond = al);
@@ -966,13 +998,16 @@ class Assembler : public Malloced {
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
// Record a comment relocation entry that can be used by a disassembler.
// Use --debug_code to enable.
void RecordComment(const char* msg);
void RecordPosition(int pos);
void RecordStatementPosition(int pos);
- void WriteRecordedPositions();
+ bool WriteRecordedPositions();
int pc_offset() const { return pc_ - buffer_; }
int current_position() const { return current_position_; }
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 1f776562..ddbb9777 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -138,7 +138,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// Clear the heap tag on the elements array.
__ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
- // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array (untagged)
@@ -146,7 +146,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ mov(scratch3, Operand(initial_capacity));
+ __ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
@@ -243,23 +243,23 @@ static void AllocateJSArray(MacroAssembler* masm,
__ and_(elements_array_storage,
elements_array_storage,
Operand(~kHeapObjectTagMask));
- // Initialize the fixed array and fill it with holes. FixedArray length is not
+ // Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// elements_array_storage: elements array (untagged)
// array_size: size of array (smi)
- ASSERT(kSmiTag == 0);
__ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
- // Convert array_size from smi to value.
- __ mov(array_size,
- Operand(array_size, ASR, kSmiTagSize));
+ ASSERT(kSmiTag == 0);
__ tst(array_size, array_size);
// Length of the FixedArray is the number of pre-allocated elements if
// the actual JSArray has length 0 and the size of the JSArray for non-empty
- // JSArrays. The length of a FixedArray is not stored as a smi.
- __ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
+ // JSArrays. The length of a FixedArray is stored as a smi.
+ __ mov(array_size,
+ Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
+ LeaveCC,
+ eq);
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(array_size,
MemOperand(elements_array_storage, kPointerSize, PostIndex));
@@ -267,10 +267,11 @@ static void AllocateJSArray(MacroAssembler* masm,
// Calculate elements array and elements array end.
// result: JSObject
// elements_array_storage: elements array element storage
- // array_size: size of elements array
+ // array_size: smi-tagged size of elements array
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(elements_array_end,
elements_array_storage,
- Operand(array_size, LSL, kPointerSizeLog2));
+ Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
@@ -543,7 +544,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
- // r7: undefined
+ // r7: undefined value
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &rt_call);
@@ -555,14 +556,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// instance type would be JS_FUNCTION_TYPE.
// r1: constructor function
// r2: initial map
- // r7: undefined
+ // r7: undefined value
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
// Now allocate the JSObject on the heap.
// r1: constructor function
// r2: initial map
- // r7: undefined
+ // r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
@@ -572,7 +573,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r2: initial map
// r3: object size
// r4: JSObject (not tagged)
- // r7: undefined
+ // r7: undefined value
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
@@ -588,7 +589,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
- // r7: undefined
+ // r7: undefined value
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
{ Label loop, entry;
@@ -611,7 +612,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r4: JSObject
// r5: start of next object (not tagged)
- // r7: undefined
+ // r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
@@ -633,7 +634,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: start of next object
- // r7: undefined
+ // r7: undefined value
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
__ AllocateInNewSpace(
r0,
@@ -648,13 +649,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: FixedArray (not tagged)
- // r7: undefined
+ // r7: undefined value
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
__ mov(r2, r5);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, Array::kLengthOffset);
- __ str(r3, MemOperand(r2, kPointerSize, PostIndex));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
// Initialize the fields to undefined.
// r1: constructor function
@@ -1047,6 +1049,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(r2, Operand(r2, ASR, kSmiTagSize));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r2, r0); // Check formal and actual parameter counts.
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
index 6edec4d7..264498db 100644
--- a/src/arm/codegen-arm-inl.h
+++ b/src/arm/codegen-arm-inl.h
@@ -36,30 +36,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-void CodeGenerator::LoadConditionAndSpill(Expression* expression,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_control) {
- LoadCondition(expression, true_target, false_target, force_control);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- ASSERT(VirtualFrame::SpilledScope::is_spilled());
- Load(expression);
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- Visit(statement);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
- VisitStatements(statements);
-}
-
-
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 64ed425a..1ca236d1 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -49,8 +49,6 @@ namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cc,
@@ -68,33 +66,41 @@ static void MultiplyByKnownInt(MacroAssembler* masm,
static bool IsEasyToMultiplyBy(int x);
+#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
void DeferredCode::SaveRegisters() {
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int action = registers_[i];
- if (action == kPush) {
- __ push(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
- __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
- }
- }
+ // On ARM you either have a completely spilled frame or you
+ // handle it yourself, but at the moment there's no automation
+ // of registers and deferred code.
}
void DeferredCode::RestoreRegisters() {
- // Restore registers in reverse order due to the stack.
- for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
- int action = registers_[i];
- if (action == kPush) {
- __ pop(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore) {
- action &= ~kSyncedFlag;
- __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
- }
- }
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ frame_state_->frame()->AssertIsSpilled();
+}
+
+
+void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+}
+
+
+void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterInternalFrame();
+}
+
+
+void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveInternalFrame();
}
@@ -103,21 +109,28 @@ void DeferredCode::RestoreRegisters() {
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- true_target_(NULL),
- false_target_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
+ previous_(owner->state()) {
+ owner->set_state(this);
}
-CodeGenState::CodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target)
- : owner_(owner),
+ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
+ JumpTarget* true_target,
+ JumpTarget* false_target)
+ : CodeGenState(owner),
true_target_(true_target),
- false_target_(false_target),
- previous_(owner->state()) {
- owner_->set_state(this);
+ false_target_(false_target) {
+ owner->set_state(this);
+}
+
+
+TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
+ Slot* slot,
+ TypeInfo type_info)
+ : CodeGenState(owner),
+ slot_(slot) {
+ owner->set_state(this);
+ old_type_info_ = owner->set_type_info(slot, type_info);
}
@@ -127,6 +140,10 @@ CodeGenState::~CodeGenState() {
}
+TypeInfoCodeGenState::~TypeInfoCodeGenState() {
+ owner()->set_type_info(slot_, old_type_info_);
+}
+
// -------------------------------------------------------------------------
// CodeGenerator implementation
@@ -139,6 +156,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
cc_reg_(al),
state_(NULL),
loop_nesting_(0),
+ type_info_(NULL),
function_return_is_shadowed_(false) {
}
@@ -156,6 +174,11 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize state.
info_ = info;
+
+ int slots = scope()->num_parameters() + scope()->num_stack_slots();
+ ScopedVector<TypeInfo> type_info_array(slots);
+ type_info_ = &type_info_array;
+
ASSERT(allocator_ == NULL);
RegisterAllocator register_allocator(this);
allocator_ = &register_allocator;
@@ -315,7 +338,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Ignore the return value.
}
#endif
- VisitStatementsAndSpill(info->function()->body());
+ VisitStatements(info->function()->body());
}
}
@@ -363,8 +386,10 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// the add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
- CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
- return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
+ CHECK(return_sequence_length ==
+ Assembler::kJSReturnSequenceInstructions ||
+ return_sequence_length ==
+ Assembler::kJSReturnSequenceInstructions + 1);
#endif
}
}
@@ -387,6 +412,21 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
allocator_ = NULL;
+ type_info_ = NULL;
+}
+
+
+int CodeGenerator::NumberOfSlot(Slot* slot) {
+ if (slot == NULL) return kInvalidSlotNumber;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return slot->index();
+ case Slot::LOCAL:
+ return slot->index() + scope()->num_parameters();
+ default:
+ break;
+ }
+ return kInvalidSlotNumber;
}
@@ -484,7 +524,7 @@ void CodeGenerator::LoadCondition(Expression* x,
ASSERT(!has_cc());
int original_height = frame_->height();
- { CodeGenState new_state(this, true_target, false_target);
+ { ConditionCodeGenState new_state(this, true_target, false_target);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@@ -652,7 +692,6 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
void CodeGenerator::LoadTypeofExpression(Expression* expr) {
// Special handling of identifiers as subexpressions of typeof.
- VirtualFrame::SpilledScope spilled_scope(frame_);
Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
// For a global variable we build the property reference
@@ -667,10 +706,9 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
- frame_->SpillAll();
} else {
// Anything else can be handled normally.
- LoadAndSpill(expr);
+ Load(expr);
}
}
@@ -719,8 +757,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
}
} else {
// Anything else is a runtime error.
- VirtualFrame::SpilledScope spilled_scope(frame_);
- LoadAndSpill(e);
+ Load(e);
frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
}
}
@@ -788,73 +825,100 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
+ GenerateInlineSmi inline_smi,
int constant_rhs) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // sp[0] : y
- // sp[1] : x
- // result : r0
+ // top of virtual frame: y
+ // 2nd elt. on virtual frame : x
+ // result : top of virtual frame
// Stub is entered with a call: 'return address' is in lr.
switch (op) {
case Token::ADD:
case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
+ if (inline_smi) {
+ JumpTarget done;
+ Register rhs = frame_->PopToRegister();
+ Register lhs = frame_->PopToRegister(rhs);
+ Register scratch = VirtualFrame::scratch0();
+ __ orr(scratch, rhs, Operand(lhs));
+ // Check they are both small and positive.
+ __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
+ ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
+ ASSERT_EQ(0, kSmiTag);
+ if (op == Token::ADD) {
+ __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
+ } else {
+ __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
+ }
+ done.Branch(eq);
+ GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
+ frame_->SpillAll();
+ frame_->CallStub(&stub, 0);
+ done.Bind();
+ frame_->EmitPush(r0);
+ break;
+ } else {
+ // Fall through!
+ }
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- frame_->EmitPop(r0); // r0 : y
- frame_->EmitPop(r1); // r1 : x
- GenericBinaryOpStub stub(op, overwrite_mode, r1, r0, constant_rhs);
- frame_->CallStub(&stub, 0);
- break;
- }
-
- case Token::COMMA:
- frame_->EmitPop(r0);
- // Simply discard left value.
- frame_->Drop();
- break;
-
- default:
- // Other cases should have been handled before this point.
- UNREACHABLE();
- break;
- }
-}
-
-
-void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- int constant_rhs) {
- // top of virtual frame: y
- // 2nd elt. on virtual frame : x
- // result : top of virtual frame
-
- // Stub is entered with a call: 'return address' is in lr.
- switch (op) {
- case Token::ADD: // fall through.
- case Token::SUB: // fall through.
+ if (inline_smi) {
+ bool rhs_is_smi = frame_->KnownSmiAt(0);
+ bool lhs_is_smi = frame_->KnownSmiAt(1);
+ Register rhs = frame_->PopToRegister();
+ Register lhs = frame_->PopToRegister(rhs);
+ Register smi_test_reg;
+ Condition cond;
+ if (!rhs_is_smi || !lhs_is_smi) {
+ if (rhs_is_smi) {
+ smi_test_reg = lhs;
+ } else if (lhs_is_smi) {
+ smi_test_reg = rhs;
+ } else {
+ smi_test_reg = VirtualFrame::scratch0();
+ __ orr(smi_test_reg, rhs, Operand(lhs));
+ }
+ // Check they are both Smis.
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ cond = eq;
+ } else {
+ cond = al;
+ }
+ ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
+ if (op == Token::BIT_OR) {
+ __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
+ } else if (op == Token::BIT_AND) {
+ __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
+ } else {
+ ASSERT(op == Token::BIT_XOR);
+ ASSERT_EQ(0, kSmiTag);
+ __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
+ }
+ if (cond != al) {
+ JumpTarget done;
+ done.Branch(cond);
+ GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
+ frame_->SpillAll();
+ frame_->CallStub(&stub, 0);
+ done.Bind();
+ }
+ frame_->EmitPush(r0);
+ break;
+ } else {
+ // Fall through!
+ }
case Token::MUL:
case Token::DIV:
case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
case Token::SHL:
case Token::SHR:
case Token::SAR: {
Register rhs = frame_->PopToRegister();
Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register.
- {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->CallStub(&stub, 0);
- }
+ GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
+ frame_->SpillAll();
+ frame_->CallStub(&stub, 0);
frame_->EmitPush(r0);
break;
}
@@ -971,7 +1035,8 @@ void DeferredInlineSmiOperation::Generate() {
rhs = r1;
}
} else {
- UNREACHABLE(); // Should have been handled in SmiOperation.
+ ASSERT(op_ == Token::SHL);
+ __ mov(r1, Operand(Smi::FromInt(value_)));
}
break;
}
@@ -1019,6 +1084,8 @@ void CodeGenerator::SmiOperation(Token::Value op,
OverwriteMode mode) {
int int_value = Smi::cast(*value)->value();
+ bool both_sides_are_smi = frame_->KnownSmiAt(0);
+
bool something_to_inline;
switch (op) {
case Token::ADD:
@@ -1029,7 +1096,10 @@ void CodeGenerator::SmiOperation(Token::Value op,
something_to_inline = true;
break;
}
- case Token::SHL:
+ case Token::SHL: {
+ something_to_inline = (both_sides_are_smi || !reversed);
+ break;
+ }
case Token::SHR:
case Token::SAR: {
if (reversed) {
@@ -1066,17 +1136,18 @@ void CodeGenerator::SmiOperation(Token::Value op,
// Push the rhs onto the virtual frame by putting it in a TOS register.
Register rhs = frame_->GetTOSRegister();
__ mov(rhs, Operand(value));
- frame_->EmitPush(rhs);
- VirtualFrameBinaryOperation(op, mode, int_value);
+ frame_->EmitPush(rhs, TypeInfo::Smi());
+ GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
} else {
// Pop the rhs, then push lhs and rhs in the right order. Only performs
// at most one pop, the rest takes place in TOS registers.
Register lhs = frame_->GetTOSRegister(); // Get reg for pushing.
Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this.
__ mov(lhs, Operand(value));
- frame_->EmitPush(lhs);
- frame_->EmitPush(rhs);
- VirtualFrameBinaryOperation(op, mode, kUnknownIntValue);
+ frame_->EmitPush(lhs, TypeInfo::Smi());
+ TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
+ frame_->EmitPush(rhs, t);
+ GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, kUnknownIntValue);
}
return;
}
@@ -1096,8 +1167,10 @@ void CodeGenerator::SmiOperation(Token::Value op,
__ add(tos, tos, Operand(value), SetCC);
deferred->Branch(vs);
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
+ if (!both_sides_are_smi) {
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ }
deferred->BindExit();
frame_->EmitPush(tos);
break;
@@ -1113,8 +1186,10 @@ void CodeGenerator::SmiOperation(Token::Value op,
__ sub(tos, tos, Operand(value), SetCC);
}
deferred->Branch(vs);
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
+ if (!both_sides_are_smi) {
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ }
deferred->BindExit();
frame_->EmitPush(tos);
break;
@@ -1124,25 +1199,65 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
- switch (op) {
- case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
- case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
- default: UNREACHABLE();
+ if (both_sides_are_smi) {
+ switch (op) {
+ case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
+ case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ default: UNREACHABLE();
+ }
+ frame_->EmitPush(tos, TypeInfo::Smi());
+ } else {
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ switch (op) {
+ case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
+ case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ default: UNREACHABLE();
+ }
+ deferred->BindExit();
+ TypeInfo result_type =
+ (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
+ frame_->EmitPush(tos, result_type);
}
- deferred->BindExit();
- frame_->EmitPush(tos);
break;
}
case Token::SHL:
+ if (reversed) {
+ ASSERT(both_sides_are_smi);
+ int max_shift = 0;
+ int max_result = int_value == 0 ? 1 : int_value;
+ while (Smi::IsValid(max_result << 1)) {
+ max_shift++;
+ max_result <<= 1;
+ }
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
+ // Mask off the last 5 bits of the shift operand (rhs). This is part
+ // of the definition of shift in JS and we know we have a Smi so we
+ // can safely do this. The masked version gets passed to the
+ // deferred code, but that makes no difference.
+ __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
+ __ cmp(tos, Operand(Smi::FromInt(max_shift)));
+ deferred->Branch(ge);
+ Register scratch = VirtualFrame::scratch0();
+ __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Untag.
+ __ mov(tos, Operand(Smi::FromInt(int_value))); // Load constant.
+ __ mov(tos, Operand(tos, LSL, scratch)); // Shift constant.
+ deferred->BindExit();
+ TypeInfo result = TypeInfo::Integer32();
+ frame_->EmitPush(tos, result);
+ break;
+ }
+ // Fall through!
case Token::SHR:
case Token::SAR: {
ASSERT(!reversed);
+ TypeInfo result = TypeInfo::Integer32();
Register scratch = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
int shift_value = int_value & 0x1f; // least significant 5 bits
@@ -1150,9 +1265,15 @@ void CodeGenerator::SmiOperation(Token::Value op,
new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
uint32_t problematic_mask = kSmiTagMask;
// For unsigned shift by zero all negative smis are problematic.
- if (shift_value == 0 && op == Token::SHR) problematic_mask |= 0x80000000;
- __ tst(tos, Operand(problematic_mask));
- deferred->Branch(ne); // Go slow for problematic input.
+ bool skip_smi_test = both_sides_are_smi;
+ if (shift_value == 0 && op == Token::SHR) {
+ problematic_mask |= 0x80000000;
+ skip_smi_test = false;
+ }
+ if (!skip_smi_test) {
+ __ tst(tos, Operand(problematic_mask));
+ deferred->Branch(ne); // Go slow for problematic input.
+ }
switch (op) {
case Token::SHL: {
if (shift_value != 0) {
@@ -1187,6 +1308,9 @@ void CodeGenerator::SmiOperation(Token::Value op,
// by 0 or 1 when handed a valid smi
__ tst(scratch, Operand(0xc0000000));
deferred->Branch(ne);
+ } else {
+ ASSERT(shift_value >= 2);
+ result = TypeInfo::Smi(); // SHR by at least 2 gives a Smi.
}
__ mov(tos, Operand(scratch, LSL, kSmiTagSize));
}
@@ -1203,13 +1327,15 @@ void CodeGenerator::SmiOperation(Token::Value op,
__ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
// Put tag back.
__ mov(tos, Operand(tos, LSL, kSmiTagSize));
+ // SAR by at least 1 gives a Smi.
+ result = TypeInfo::Smi();
}
break;
}
default: UNREACHABLE();
}
deferred->BindExit();
- frame_->EmitPush(tos);
+ frame_->EmitPush(tos, result);
break;
}
@@ -1218,21 +1344,24 @@ void CodeGenerator::SmiOperation(Token::Value op,
ASSERT(int_value >= 2);
ASSERT(IsPowerOf2(int_value));
DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
unsigned mask = (0x80000000u | kSmiTagMask);
__ tst(tos, Operand(mask));
deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
mask = (int_value << kSmiTagSize) - 1;
__ and_(tos, tos, Operand(mask));
deferred->BindExit();
- frame_->EmitPush(tos);
+ // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
+ frame_->EmitPush(
+ tos,
+ both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
break;
}
case Token::MUL: {
ASSERT(IsEasyToMultiplyBy(int_value));
DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
max_smi_that_wont_overflow <<= kSmiTagSize;
unsigned mask = 0x80000000u;
@@ -1278,45 +1407,66 @@ void CodeGenerator::Comparison(Condition cc,
Register lhs;
Register rhs;
+ bool lhs_is_smi;
+ bool rhs_is_smi;
+
// We load the top two stack positions into registers chosen by the virtual
// frame. This should keep the register shuffling to a minimum.
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == gt || cc == le) {
cc = ReverseCondition(cc);
+ lhs_is_smi = frame_->KnownSmiAt(0);
+ rhs_is_smi = frame_->KnownSmiAt(1);
lhs = frame_->PopToRegister();
rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
} else {
+ rhs_is_smi = frame_->KnownSmiAt(0);
+ lhs_is_smi = frame_->KnownSmiAt(1);
rhs = frame_->PopToRegister();
lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
}
+ bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
+
ASSERT(rhs.is(r0) || rhs.is(r1));
ASSERT(lhs.is(r0) || lhs.is(r1));
- // Now we have the two sides in r0 and r1. We flush any other registers
- // because the stub doesn't know about register allocation.
- frame_->SpillAll();
- Register scratch = VirtualFrame::scratch0();
- __ orr(scratch, lhs, Operand(rhs));
- __ tst(scratch, Operand(kSmiTagMask));
- JumpTarget smi;
- smi.Branch(eq);
+ JumpTarget exit;
- // Perform non-smi comparison by stub.
- // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
- // We call with 0 args because there are 0 on the stack.
- if (!rhs.is(r0)) {
- __ Swap(rhs, lhs, ip);
- }
+ if (!both_sides_are_smi) {
+ // Now we have the two sides in r0 and r1. We flush any other registers
+ // because the stub doesn't know about register allocation.
+ frame_->SpillAll();
+ Register scratch = VirtualFrame::scratch0();
+ Register smi_test_reg;
+ if (lhs_is_smi) {
+ smi_test_reg = rhs;
+ } else if (rhs_is_smi) {
+ smi_test_reg = lhs;
+ } else {
+ __ orr(scratch, lhs, Operand(rhs));
+ smi_test_reg = scratch;
+ }
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ JumpTarget smi;
+ smi.Branch(eq);
- CompareStub stub(cc, strict);
- frame_->CallStub(&stub, 0);
- __ cmp(r0, Operand(0));
- JumpTarget exit;
- exit.Jump();
+ // Perform non-smi comparison by stub.
+ // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
+ // We call with 0 args because there are 0 on the stack.
+ if (!rhs.is(r0)) {
+ __ Swap(rhs, lhs, ip);
+ }
+
+ CompareStub stub(cc, strict);
+ frame_->CallStub(&stub, 0);
+ __ cmp(r0, Operand(0));
+ exit.Jump();
+
+ smi.Bind();
+ }
// Do smi comparisons by pointer comparison.
- smi.Bind();
__ cmp(lhs, Operand(rhs));
exit.Bind();
@@ -1328,11 +1478,12 @@ void CodeGenerator::Comparison(Condition cc,
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
CallFunctionFlags flags,
int position) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
+ frame_->AssertIsSpilled();
+
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
// Record the position for debugging purposes.
@@ -1368,7 +1519,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
- LoadAndSpill(applicand);
+ Load(applicand);
Handle<String> name = Factory::LookupAsciiSymbol("apply");
frame_->Dup();
frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
@@ -1376,7 +1527,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
- LoadAndSpill(receiver);
+ Load(receiver);
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
// Emit the source position information after having loaded the
@@ -1564,7 +1715,7 @@ void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#endif
VirtualFrame::SpilledScope spilled_scope(frame_);
for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
- VisitAndSpill(statements->at(i));
+ Visit(statements->at(i));
}
ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
@@ -1578,7 +1729,7 @@ void CodeGenerator::VisitBlock(Block* node) {
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight();
- VisitStatementsAndSpill(node->statements());
+ VisitStatements(node->statements());
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
@@ -1668,12 +1819,11 @@ void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ExpressionStatement");
CodeForStatementPosition(node);
Expression* expression = node->expression();
expression->MarkAsStatement();
- LoadAndSpill(expression);
+ Load(expression);
frame_->Drop();
ASSERT(frame_->height() == original_height);
}
@@ -1683,7 +1833,6 @@ void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "// EmptyStatement");
CodeForStatementPosition(node);
// nothing to do
@@ -1695,7 +1844,6 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ IfStatement");
// Generate different code depending on which parts of the if statement
// are present or not.
@@ -1710,14 +1858,14 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
JumpTarget then;
JumpTarget else_;
// if (cond)
- LoadConditionAndSpill(node->condition(), &then, &else_, true);
+ LoadCondition(node->condition(), &then, &else_, true);
if (frame_ != NULL) {
Branch(false, &else_);
}
// then
if (frame_ != NULL || then.is_linked()) {
then.Bind();
- VisitAndSpill(node->then_statement());
+ Visit(node->then_statement());
}
if (frame_ != NULL) {
exit.Jump();
@@ -1725,7 +1873,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
// else
if (else_.is_linked()) {
else_.Bind();
- VisitAndSpill(node->else_statement());
+ Visit(node->else_statement());
}
} else if (has_then_stm) {
@@ -1733,14 +1881,14 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_else_stm);
JumpTarget then;
// if (cond)
- LoadConditionAndSpill(node->condition(), &then, &exit, true);
+ LoadCondition(node->condition(), &then, &exit, true);
if (frame_ != NULL) {
Branch(false, &exit);
}
// then
if (frame_ != NULL || then.is_linked()) {
then.Bind();
- VisitAndSpill(node->then_statement());
+ Visit(node->then_statement());
}
} else if (has_else_stm) {
@@ -1748,21 +1896,21 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_then_stm);
JumpTarget else_;
// if (!cond)
- LoadConditionAndSpill(node->condition(), &exit, &else_, true);
+ LoadCondition(node->condition(), &exit, &else_, true);
if (frame_ != NULL) {
Branch(true, &exit);
}
// else
if (frame_ != NULL || else_.is_linked()) {
else_.Bind();
- VisitAndSpill(node->else_statement());
+ Visit(node->else_statement());
}
} else {
Comment cmnt(masm_, "[ If");
ASSERT(!has_then_stm && !has_else_stm);
// if (cond)
- LoadConditionAndSpill(node->condition(), &exit, &exit, false);
+ LoadCondition(node->condition(), &exit, &exit, false);
if (frame_ != NULL) {
if (has_cc()) {
cc_reg_ = al;
@@ -1801,7 +1949,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
Comment cmnt(masm_, "[ ReturnStatement");
CodeForStatementPosition(node);
- LoadAndSpill(node->expression());
+ Load(node->expression());
if (function_return_is_shadowed_) {
frame_->EmitPop(r0);
function_return_.Jump();
@@ -1823,7 +1971,7 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WithEnterStatement");
CodeForStatementPosition(node);
- LoadAndSpill(node->expression());
+ Load(node->expression());
if (node->is_catch_block()) {
frame_->CallRuntime(Runtime::kPushCatchContext, 1);
} else {
@@ -1866,7 +2014,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight();
- LoadAndSpill(node->tag());
+ Load(node->tag());
JumpTarget next_test;
JumpTarget fall_through;
@@ -1905,7 +2053,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
fall_through.Bind();
fall_through.Unuse();
}
- VisitStatementsAndSpill(clause->statements());
+ VisitStatements(clause->statements());
// If control flow can fall through from the body, jump to the next body
// or the end of the statement.
@@ -1926,7 +2074,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
if (default_clause != NULL) {
Comment cmnt(masm_, "[ Default clause");
default_entry.Bind();
- VisitStatementsAndSpill(default_clause->statements());
+ VisitStatements(default_clause->statements());
// If control flow can fall out of the default and there is a case after
// it, jup to that case's body.
if (frame_ != NULL && default_exit.is_bound()) {
@@ -1976,7 +2124,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
}
CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
+ Visit(node->body());
// Compile the test.
switch (info) {
@@ -2003,7 +2151,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
if (has_valid_frame()) {
Comment cmnt(masm_, "[ DoWhileCondition");
CodeForDoWhileConditionPosition(node);
- LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
+ LoadCondition(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A invalid frame here indicates that control did not
// fall out of the test expression.
@@ -2044,7 +2192,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
+ LoadCondition(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A NULL frame indicates that control did not fall out of the
// test expression.
@@ -2057,7 +2205,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
if (has_valid_frame()) {
CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
+ Visit(node->body());
// If control flow can fall out of the body, jump back to the top.
if (has_valid_frame()) {
@@ -2080,7 +2228,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
Comment cmnt(masm_, "[ ForStatement");
CodeForStatementPosition(node);
if (node->init() != NULL) {
- VisitAndSpill(node->init());
+ Visit(node->init());
}
// If the test is never true there is no need to compile the test or
@@ -2091,6 +2239,17 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
node->break_target()->SetExpectedHeight();
IncrementLoopNesting();
+ // We know that the loop index is a smi if it is not modified in the
+ // loop body and it is checked against a constant limit in the loop
+ // condition. In this case, we reset the static type information of the
+ // loop index to smi before compiling the body, the update expression, and
+ // the bottom check of the loop condition.
+ TypeInfoCodeGenState type_info_scope(this,
+ node->is_fast_smi_loop() ?
+ node->loop_variable()->slot() :
+ NULL,
+ TypeInfo::Smi());
+
// If there is no update statement, label the top of the loop with the
// continue target, otherwise with the loop target.
JumpTarget loop(JumpTarget::BIDIRECTIONAL);
@@ -2105,7 +2264,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// If the test is always true, there is no need to compile it.
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
+ LoadCondition(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
Branch(false, node->break_target());
}
@@ -2116,7 +2275,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
if (has_valid_frame()) {
CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
+ Visit(node->body());
if (node->next() == NULL) {
// If there is no update statement and control flow can fall out
@@ -2136,7 +2295,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// after the code for the body actually belongs to the loop
// statement and not the body.
CodeForStatementPosition(node);
- VisitAndSpill(node->next());
+ Visit(node->next());
loop.Jump();
}
}
@@ -2165,7 +2324,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
JumpTarget exit;
// Get the object to enumerate over (converted to JSObject).
- LoadAndSpill(node->enumerable());
+ Load(node->enumerable());
// Both SpiderMonkey and kjs ignore null and undefined in contrast
// to the specification. 12.6.4 mandates a call to ToObject.
@@ -2276,7 +2435,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(r0); // map
frame_->EmitPush(r2); // enum cache bridge cache
__ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
frame_->EmitPush(r0);
__ mov(r0, Operand(Smi::FromInt(0)));
frame_->EmitPush(r0);
@@ -2289,7 +2447,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Push the length of the array and the initial index onto the stack.
__ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
frame_->EmitPush(r0);
__ mov(r0, Operand(Smi::FromInt(0))); // init index
frame_->EmitPush(r0);
@@ -2359,7 +2516,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
}
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
+ Visit(node->body());
// Next. Reestablish a spilled frame in case we are coming here via
// a continue in the body.
@@ -2406,7 +2563,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// Remove the exception from the stack.
frame_->Drop();
- VisitStatementsAndSpill(node->catch_block()->statements());
+ VisitStatements(node->catch_block()->statements());
if (frame_ != NULL) {
exit.Jump();
}
@@ -2441,7 +2598,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
}
// Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
+ VisitStatements(node->try_block()->statements());
// Stop the introduced shadowing and count the number of required unlinks.
// After shadowing stops, the original labels are unshadowed and the
@@ -2555,7 +2712,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
}
// Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
+ VisitStatements(node->try_block()->statements());
// Stop the introduced shadowing and count the number of required unlinks.
// After shadowing stops, the original labels are unshadowed and the
@@ -2645,7 +2802,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
// and the state - while evaluating the finally block.
//
// Generate code for the statements in the finally block.
- VisitStatementsAndSpill(node->finally_block()->statements());
+ VisitStatements(node->finally_block()->statements());
if (has_valid_frame()) {
// Restore state and return value or faked TOS.
@@ -2692,7 +2849,6 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2705,19 +2861,18 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
void CodeGenerator::InstantiateFunction(
Handle<SharedFunctionInfo> function_info) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- __ mov(r0, Operand(function_info));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
if (scope()->is_function_scope() && function_info->num_literals() == 0) {
FastNewClosureStub stub;
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(function_info));
+ frame_->SpillAll();
frame_->CallStub(&stub, 1);
frame_->EmitPush(r0);
} else {
// Create a new closure.
frame_->EmitPush(cp);
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(function_info));
frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->EmitPush(r0);
}
@@ -2762,19 +2917,19 @@ void CodeGenerator::VisitConditional(Conditional* node) {
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
- LoadConditionAndSpill(node->condition(), &then, &else_, true);
+ LoadCondition(node->condition(), &then, &else_, true);
if (has_valid_frame()) {
Branch(false, &else_);
}
if (has_valid_frame() || then.is_linked()) {
then.Bind();
- LoadAndSpill(node->then_expression());
+ Load(node->then_expression());
}
if (else_.is_linked()) {
JumpTarget exit;
if (has_valid_frame()) exit.Jump();
else_.Bind();
- LoadAndSpill(node->else_expression());
+ Load(node->else_expression());
if (exit.is_linked()) exit.Bind();
}
ASSERT_EQ(original_height + 1, frame_->height());
@@ -2815,7 +2970,8 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
} else {
Register scratch = VirtualFrame::scratch0();
- frame_->EmitPush(SlotOperand(slot, scratch));
+ TypeInfo info = type_info(slot);
+ frame_->EmitPush(SlotOperand(slot, scratch), info);
if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
// initialized yet) which needs to be converted into the 'undefined'
@@ -3105,8 +3261,9 @@ void CodeGenerator::VisitLiteral(Literal* node) {
#endif
Comment cmnt(masm_, "[ Literal");
Register reg = frame_->GetTOSRegister();
+ bool is_smi = node->handle()->IsSmi();
__ mov(reg, Operand(node->handle()));
- frame_->EmitPush(reg);
+ frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3194,7 +3351,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- LoadAndSpill(value);
+ Load(value);
frame_->EmitPop(r0);
__ mov(r2, Operand(key->handle()));
__ ldr(r1, frame_->Top()); // Load the receiver.
@@ -3205,28 +3362,28 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
case ObjectLiteral::Property::PROTOTYPE: {
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0); // dup the result
- LoadAndSpill(key);
- LoadAndSpill(value);
+ Load(key);
+ Load(value);
frame_->CallRuntime(Runtime::kSetProperty, 3);
break;
}
case ObjectLiteral::Property::SETTER: {
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
- LoadAndSpill(key);
+ Load(key);
__ mov(r0, Operand(Smi::FromInt(1)));
frame_->EmitPush(r0);
- LoadAndSpill(value);
+ Load(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4);
break;
}
case ObjectLiteral::Property::GETTER: {
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
- LoadAndSpill(key);
+ Load(key);
__ mov(r0, Operand(Smi::FromInt(0)));
frame_->EmitPush(r0);
- LoadAndSpill(value);
+ Load(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4);
break;
}
@@ -3275,7 +3432,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
if (CompileTimeValue::IsCompileTimeValue(value)) continue;
// The property must be set by generated code.
- LoadAndSpill(value);
+ Load(value);
frame_->EmitPop(r0);
// Fetch the object literal.
@@ -3299,12 +3456,11 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject");
- LoadAndSpill(node->key());
- LoadAndSpill(node->value());
+ Load(node->key());
+ Load(node->value());
frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
frame_->EmitPush(r0);
ASSERT_EQ(original_height + 1, frame_->height());
@@ -3338,9 +3494,16 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
false,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ GenerateInlineSmi inline_smi =
+ loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
+ if (literal != NULL) {
+ ASSERT(!literal->handle()->IsSmi());
+ inline_smi = DONT_GENERATE_INLINE_SMI;
+ }
Load(node->value());
- VirtualFrameBinaryOperation(
- node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ GenericBinaryOperation(node->binary_op(),
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
+ inline_smi);
}
} else {
Load(node->value());
@@ -3419,7 +3582,6 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
frame_->Dup();
}
EmitNamedLoad(name, var != NULL);
- frame_->EmitPush(r0);
// Perform the binary operation.
Literal* literal = node->value()->AsLiteral();
@@ -3432,9 +3594,16 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
false,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ GenerateInlineSmi inline_smi =
+ loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
+ if (literal != NULL) {
+ ASSERT(!literal->handle()->IsSmi());
+ inline_smi = DONT_GENERATE_INLINE_SMI;
+ }
Load(node->value());
- VirtualFrameBinaryOperation(
- node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ GenericBinaryOperation(node->binary_op(),
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
+ inline_smi);
}
} else {
// For non-compound assignment just load the right-hand side.
@@ -3539,9 +3708,16 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
false,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ GenerateInlineSmi inline_smi =
+ loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
+ if (literal != NULL) {
+ ASSERT(!literal->handle()->IsSmi());
+ inline_smi = DONT_GENERATE_INLINE_SMI;
+ }
Load(node->value());
- VirtualFrameBinaryOperation(
- node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ GenericBinaryOperation(node->binary_op(),
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
+ inline_smi);
}
} else {
// For non-compound assignment just load the right-hand side.
@@ -3624,10 +3800,9 @@ void CodeGenerator::VisitThrow(Throw* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Throw");
- LoadAndSpill(node->exception());
+ Load(node->exception());
CodeForSourcePosition(node->position());
frame_->CallRuntime(Runtime::kThrow, 1);
frame_->EmitPush(r0);
@@ -3652,7 +3827,6 @@ void CodeGenerator::VisitCall(Call* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Call");
Expression* function = node->expression();
@@ -3673,6 +3847,7 @@ void CodeGenerator::VisitCall(Call* node) {
// ------------------------------------------------------------------------
if (var != NULL && var->is_possibly_eval()) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// ----------------------------------
// JavaScript example: 'eval(arg)' // eval is not known to be shadowed
// ----------------------------------
@@ -3681,16 +3856,54 @@ void CodeGenerator::VisitCall(Call* node) {
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
+
// Prepare stack for call to resolved function.
- LoadAndSpill(function);
+ Load(function);
+
+ // Allocate a frame slot for the receiver.
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r2); // Slot for receiver
+ frame_->EmitPush(r2);
+
+ // Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
- // Prepare stack for call to ResolvePossiblyDirectEval.
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ JumpTarget done;
+ if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ ASSERT(var->slot()->type() == Slot::LOOKUP);
+ JumpTarget slow;
+ // Prepare the stack for the call to
+ // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
+ // function, the first argument to the eval call and the
+ // receiver.
+ LoadFromGlobalSlotCheckExtensions(var->slot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ frame_->EmitPush(r0);
+ if (arg_count > 0) {
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ frame_->EmitPush(r1);
+ } else {
+ frame_->EmitPush(r2);
+ }
+ __ ldr(r1, frame_->Receiver());
+ frame_->EmitPush(r1);
+
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+
+ done.Jump();
+ slow.Bind();
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval by
+ // pushing the loaded function, the first argument to the eval
+ // call and the receiver.
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
frame_->EmitPush(r1);
if (arg_count > 0) {
@@ -3699,14 +3912,16 @@ void CodeGenerator::VisitCall(Call* node) {
} else {
frame_->EmitPush(r2);
}
-
- // Push the receiver.
__ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1);
// Resolve the call.
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // If we generated fast-case code bind the jump-target where fast
+ // and slow case merge.
+ if (done.is_linked()) done.Bind();
+
// Touch up stack with the right values for the function and the receiver.
__ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ str(r1, MemOperand(sp, arg_count * kPointerSize));
@@ -3735,9 +3950,10 @@ void CodeGenerator::VisitCall(Call* node) {
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Setup the name register and call the IC initialization code.
__ mov(r2, Operand(var->name()));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
@@ -3750,6 +3966,7 @@ void CodeGenerator::VisitCall(Call* node) {
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// ----------------------------------
// JavaScript examples:
//
@@ -3827,13 +4044,14 @@ void CodeGenerator::VisitCall(Call* node) {
node->position());
} else {
- LoadAndSpill(property->obj()); // Receiver.
+ Load(property->obj()); // Receiver.
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Set the name register and call the IC initialization code.
__ mov(r2, Operand(name));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
@@ -3848,14 +4066,15 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// JavaScript example: 'array[index](1, 2, 3)'
// -------------------------------------------
+ VirtualFrame::SpilledScope spilled_scope(frame_);
- LoadAndSpill(property->obj());
+ Load(property->obj());
if (!property->is_synthetic()) {
// Duplicate receiver for later use.
__ ldr(r0, MemOperand(sp, 0));
frame_->EmitPush(r0);
}
- LoadAndSpill(property->key());
+ Load(property->key());
EmitKeyedLoad();
// Put the function below the receiver.
if (property->is_synthetic()) {
@@ -3880,7 +4099,9 @@ void CodeGenerator::VisitCall(Call* node) {
// ----------------------------------
// Load the function.
- LoadAndSpill(function);
+ Load(function);
+
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Pass the global proxy as the receiver.
LoadGlobalReceiver(r0);
@@ -3897,7 +4118,6 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -3909,16 +4129,18 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// Compute function to call and use the global object as the
// receiver. There is no need to use the global proxy here because
// it will always be replaced with a newly allocated object.
- LoadAndSpill(node->expression());
+ Load(node->expression());
LoadGlobal();
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
// r0: the number of arguments.
__ mov(r0, Operand(arg_count));
// Load the function into r1 as per calling convention.
@@ -3942,7 +4164,7 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
JumpTarget leave, null, function, non_function_constructor;
// Load the object into r0.
- LoadAndSpill(args->at(0));
+ Load(args->at(0));
frame_->EmitPop(r0);
// If the object is a smi, we return null.
@@ -4000,7 +4222,7 @@ void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
JumpTarget leave;
- LoadAndSpill(args->at(0));
+ Load(args->at(0));
frame_->EmitPop(r0); // r0 contains object.
// if (object->IsSmi()) return the object.
__ tst(r0, Operand(kSmiTagMask));
@@ -4019,8 +4241,8 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 2);
JumpTarget leave;
- LoadAndSpill(args->at(0)); // Load the object.
- LoadAndSpill(args->at(1)); // Load the value.
+ Load(args->at(0)); // Load the object.
+ Load(args->at(1)); // Load the value.
frame_->EmitPop(r0); // r0 contains value
frame_->EmitPop(r1); // r1 contains object
// if (object->IsSmi()) return object.
@@ -4056,9 +4278,7 @@ void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
if (ShouldGenerateLog(args->at(0))) {
Load(args->at(1));
Load(args->at(2));
- frame_->SpillAll();
- VirtualFrame::SpilledScope spilled_scope(frame_);
- __ CallRuntime(Runtime::kLog, 2);
+ frame_->CallRuntime(Runtime::kLog, 2);
}
#endif
frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
@@ -4093,99 +4313,240 @@ void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
}
-// This generates code that performs a charCodeAt() call or returns
-// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It can handle flat, 8 and 16 bit characters and cons strings where the
-// answer is found in the left hand branch of the cons. The slow case will
-// flatten the string, which will ensure that the answer is in the left hand
-// side the next time around.
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+class DeferredStringCharCodeAt : public DeferredCode {
+ public:
+ DeferredStringCharCodeAt(Register object,
+ Register index,
+ Register scratch,
+ Register result)
+ : result_(result),
+ char_code_at_generator_(object,
+ index,
+ scratch,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharCodeAtGenerator* fast_case_generator() {
+ return &char_code_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_code_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result_, Heap::kNanValueRootIndex);
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharCodeAtGenerator char_code_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charCodeAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+ Comment(masm_, "[ GenerateStringCharCodeAt");
ASSERT(args->length() == 2);
- Comment(masm_, "[ GenerateFastCharCodeAt");
Load(args->at(0));
Load(args->at(1));
- Register index = frame_->PopToRegister(); // Index.
- Register string = frame_->PopToRegister(index); // String.
- Register result = VirtualFrame::scratch0();
- Register scratch = VirtualFrame::scratch1();
- Label slow_case;
- Label exit;
- StringHelper::GenerateFastCharCodeAt(masm_,
- string,
- index,
- scratch,
- result,
- &slow_case,
- &slow_case,
- &slow_case,
- &slow_case);
- __ jmp(&exit);
+ Register index = r1;
+ Register object = r2;
- __ bind(&slow_case);
- // Move the undefined value into the result register, which will
- // trigger the slow case.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ frame_->EmitPop(r1);
+ frame_->EmitPop(r2);
- __ bind(&exit);
+ // We need two extra registers.
+ Register scratch = r3;
+ Register result = r0;
+
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(object,
+ index,
+ scratch,
+ result);
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->EmitPush(result);
}
-void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateCharFromCode");
+class DeferredStringCharFromCode : public DeferredCode {
+ public:
+ DeferredStringCharFromCode(Register code,
+ Register result)
+ : char_from_code_generator_(code, result) {}
+
+ StringCharFromCodeGenerator* fast_case_generator() {
+ return &char_from_code_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_from_code_generator_.GenerateSlow(masm(), call_helper);
+ }
+
+ private:
+ StringCharFromCodeGenerator char_from_code_generator_;
+};
+
+
+// Generates code for creating a one-char string from a char code.
+void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+ Comment(masm_, "[ GenerateStringCharFromCode");
ASSERT(args->length() == 1);
+ Load(args->at(0));
+
Register code = r1;
- Register scratch = ip;
Register result = r0;
- LoadAndSpill(args->at(0));
frame_->EmitPop(code);
- StringHelper::GenerateCharFromCode(masm_,
- code,
- scratch,
- result,
- CALL_FUNCTION);
+ DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
+ code, result);
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->EmitPush(result);
}
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+class DeferredStringCharAt : public DeferredCode {
+ public:
+ DeferredStringCharAt(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result)
+ : result_(result),
+ char_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharAtGenerator* fast_case_generator() {
+ return &char_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ mov(result_, Operand(Smi::FromInt(0)));
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharAtGenerator char_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
+ Comment(masm_, "[ GenerateStringCharAt");
+ ASSERT(args->length() == 2);
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ Register index = r1;
+ Register object = r2;
+
+ frame_->EmitPop(r1);
+ frame_->EmitPop(r2);
+
+ // We need three extra registers.
+ Register scratch1 = r3;
+ Register scratch2 = r4;
+ Register result = r0;
+
+ DeferredStringCharAt* deferred =
+ new DeferredStringCharAt(object,
+ index,
+ scratch1,
+ scratch2,
+ result);
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->EmitPush(result);
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
+ Load(args->at(0));
JumpTarget answer;
// We need the CC bits to come out as not_equal in the case where the
// object is a smi. This can't be done with the usual test opcode so
// we use XOR to get the right CC bits.
- frame_->EmitPop(r0);
- __ and_(r1, r0, Operand(kSmiTagMask));
- __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
+ Register possible_array = frame_->PopToRegister();
+ Register scratch = VirtualFrame::scratch0();
+ __ and_(scratch, possible_array, Operand(kSmiTagMask));
+ __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
answer.Branch(ne);
// It is a heap object - get the map. Check if the object is a JS array.
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+ __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
answer.Bind();
cc_reg_ = eq;
}
void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
+ Load(args->at(0));
JumpTarget answer;
// We need the CC bits to come out as not_equal in the case where the
// object is a smi. This can't be done with the usual test opcode so
// we use XOR to get the right CC bits.
- frame_->EmitPop(r0);
- __ and_(r1, r0, Operand(kSmiTagMask));
- __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
+ Register possible_regexp = frame_->PopToRegister();
+ Register scratch = VirtualFrame::scratch0();
+ __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
+ __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
answer.Branch(ne);
// It is a heap object - get the map. Check if the object is a regexp.
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+ __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
answer.Bind();
cc_reg_ = eq;
}
@@ -4194,28 +4555,27 @@ void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
- frame_->EmitPop(r1);
- __ tst(r1, Operand(kSmiTagMask));
+ Load(args->at(0));
+ Register possible_object = frame_->PopToRegister();
+ __ tst(possible_object, Operand(kSmiTagMask));
false_target()->Branch(eq);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r1, ip);
+ __ cmp(possible_object, ip);
true_target()->Branch(eq);
- Register map_reg = r2;
- __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
+ Register map_reg = VirtualFrame::scratch0();
+ __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
+ __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(ne);
- __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
false_target()->Branch(lt);
- __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
cc_reg_ = le;
}
@@ -4223,28 +4583,29 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
- frame_->EmitPop(r0);
- __ tst(r0, Operand(kSmiTagMask));
+ Load(args->at(0));
+ Register possible_function = frame_->PopToRegister();
+ __ tst(possible_function, Operand(kSmiTagMask));
false_target()->Branch(eq);
- Register map_reg = r2;
- __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
+ Register map_reg = VirtualFrame::scratch0();
+ Register scratch = VirtualFrame::scratch1();
+ __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
cc_reg_ = eq;
}
void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
- frame_->EmitPop(r0);
- __ tst(r0, Operand(kSmiTagMask));
+ Load(args->at(0));
+ Register possible_undetectable = frame_->PopToRegister();
+ __ tst(possible_undetectable, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ Register scratch = VirtualFrame::scratch0();
+ __ ldr(scratch,
+ FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
cc_reg_ = ne;
}
@@ -4305,7 +4666,7 @@ void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
// Satisfy contract with ArgumentsAccessStub:
// Load the key into r1 and the formal parameters count into r0.
- LoadAndSpill(args->at(0));
+ Load(args->at(0));
frame_->EmitPop(r1);
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
@@ -4377,6 +4738,7 @@ void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
Load(args->at(1));
StringAddStub stub(NO_STRING_ADD_FLAGS);
+ frame_->SpillAll();
frame_->CallStub(&stub, 2);
frame_->EmitPush(r0);
}
@@ -4390,6 +4752,7 @@ void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
Load(args->at(2));
SubStringStub stub;
+ frame_->SpillAll();
frame_->CallStub(&stub, 3);
frame_->EmitPush(r0);
}
@@ -4402,6 +4765,7 @@ void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
Load(args->at(1));
StringCompareStub stub;
+ frame_->SpillAll();
frame_->CallStub(&stub, 2);
frame_->EmitPush(r0);
}
@@ -4415,6 +4779,7 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
Load(args->at(2));
Load(args->at(3));
RegExpExecStub stub;
+ frame_->SpillAll();
frame_->CallStub(&stub, 4);
frame_->EmitPush(r0);
}
@@ -4488,7 +4853,8 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ mov(r2, Operand(Factory::fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
- __ str(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ mov(r6, Operand(r5, LSL, kSmiTagSize));
+ __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with the-hole.
__ mov(r2, Operand(Factory::the_hole_value()));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4548,12 +4914,14 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
Top::global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
return;
}
Load(args->at(1));
+
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
frame_->EmitPop(r2);
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
@@ -4589,6 +4957,7 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
Load(args->at(0));
NumberToStringStub stub;
+ frame_->SpillAll();
frame_->CallStub(&stub, 1);
frame_->EmitPush(r0);
}
@@ -4625,6 +4994,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
Load(args->at(1));
Load(args->at(2));
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
Register index2 = r2;
Register index1 = r1;
Register object = r0;
@@ -4716,18 +5087,28 @@ void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and jump to the runtime.
Load(args->at(0));
- frame_->CallRuntime(Runtime::kMath_sin, 1);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ frame_->SpillAllButCopyTOSToR0();
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kMath_sin, 1);
+ }
frame_->EmitPush(r0);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and jump to the runtime.
Load(args->at(0));
- frame_->CallRuntime(Runtime::kMath_cos, 1);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
+ frame_->SpillAllButCopyTOSToR0();
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kMath_cos, 1);
+ }
frame_->EmitPush(r0);
}
@@ -4749,7 +5130,6 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
if (CheckForInlineRuntimeCall(node)) {
ASSERT((has_cc() && frame_->height() == original_height) ||
(!has_cc() && frame_->height() == original_height + 1));
@@ -4763,17 +5143,21 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (function == NULL) {
// Prepare stack for calling JS runtime function.
// Push the builtins object found in the current global object.
- __ ldr(r1, GlobalObject());
- __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
- frame_->EmitPush(r0);
+ Register scratch = VirtualFrame::scratch0();
+ __ ldr(scratch, GlobalObject());
+ Register builtins = frame_->GetTOSRegister();
+ __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
+ frame_->EmitPush(builtins);
}
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
if (function == NULL) {
// Call the JS runtime function.
__ mov(r2, Operand(node->name()));
@@ -4801,10 +5185,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Token::Value op = node->op();
if (op == Token::NOT) {
- LoadConditionAndSpill(node->expression(),
- false_target(),
- true_target(),
- true);
+ LoadCondition(node->expression(), false_target(), true_target(), true);
// LoadCondition may (and usually does) leave a test and branch to
// be emitted by the caller. In that case, negate the condition.
if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
@@ -4813,43 +5194,42 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Property* property = node->expression()->AsProperty();
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (property != NULL) {
- LoadAndSpill(property->obj());
- LoadAndSpill(property->key());
+ Load(property->obj());
+ Load(property->key());
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->EmitPush(r0);
} else if (variable != NULL) {
Slot* slot = variable->slot();
if (variable->is_global()) {
LoadGlobal();
- __ mov(r0, Operand(variable->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(variable->name()));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->EmitPush(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
frame_->EmitPush(cp);
- __ mov(r0, Operand(variable->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(variable->name()));
frame_->CallRuntime(Runtime::kLookupContext, 2);
// r0: context
frame_->EmitPush(r0);
- __ mov(r0, Operand(variable->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(variable->name()));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->EmitPush(r0);
} else {
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
}
} else {
// Default: Result of deleting expressions is true.
- LoadAndSpill(node->expression()); // may have side-effects
+ Load(node->expression()); // may have side-effects
frame_->Drop();
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+ frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
}
- frame_->EmitPush(r0);
} else if (op == Token::TYPEOF) {
// Special case for loading the typeof expression; see comment on
@@ -4862,8 +5242,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- LoadAndSpill(node->expression());
- frame_->EmitPop(r0);
+ Load(node->expression());
switch (op) {
case Token::NOT:
case Token::DELETE:
@@ -4872,13 +5251,18 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
+ VirtualFrame::SpilledScope spilled(frame_);
+ frame_->EmitPop(r0);
GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
+ frame_->EmitPush(r0); // r0 has result
break;
}
case Token::BIT_NOT: {
// smi check
+ VirtualFrame::SpilledScope spilled(frame_);
+ frame_->EmitPop(r0);
JumpTarget smi_label;
JumpTarget continue_label;
__ tst(r0, Operand(kSmiTagMask));
@@ -4892,16 +5276,18 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ mvn(r0, Operand(r0));
__ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
continue_label.Bind();
+ frame_->EmitPush(r0); // r0 has result
break;
}
case Token::VOID:
- // since the stack top is cached in r0, popping and then
- // pushing a value can be done by just writing to r0.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ frame_->Drop();
+ frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
break;
case Token::ADD: {
+ VirtualFrame::SpilledScope spilled(frame_);
+ frame_->EmitPop(r0);
// Smi check.
JumpTarget continue_label;
__ tst(r0, Operand(kSmiTagMask));
@@ -4909,12 +5295,12 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
continue_label.Bind();
+ frame_->EmitPush(r0); // r0 has result
break;
}
default:
UNREACHABLE();
}
- frame_->EmitPush(r0); // r0 has result
}
ASSERT(!has_valid_frame() ||
(has_cc() && frame_->height() == original_height) ||
@@ -4933,9 +5319,36 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
Variable* var = node->expression()->AsVariableProxy()->AsVariable();
bool is_const = (var != NULL && var->mode() == Variable::CONST);
+ bool is_slot = (var != NULL && var->mode() == Variable::VAR);
+
+ if (!is_const && is_slot && type_info(var->slot()).IsSmi()) {
+ // The type info declares that this variable is always a Smi. That
+ // means it is a Smi both before and after the increment/decrement.
+ // Lets make use of that to make a very minimal count.
+ Reference target(this, node->expression(), !is_const);
+ ASSERT(!target.is_illegal());
+ target.GetValue(); // Pushes the value.
+ Register value = frame_->PopToRegister();
+ if (is_postfix) frame_->EmitPush(value);
+ if (is_increment) {
+ __ add(value, value, Operand(Smi::FromInt(1)));
+ } else {
+ __ sub(value, value, Operand(Smi::FromInt(1)));
+ }
+ frame_->EmitPush(value);
+ target.SetValue(NOT_CONST_INIT);
+ if (is_postfix) frame_->Pop();
+ ASSERT_EQ(original_height + 1, frame_->height());
+ return;
+ }
- if (is_postfix) {
+ // If it's a postfix expression and its result is not ignored and the
+ // reference is non-trivial, then push a placeholder on the stack now
+ // to hold the result of the expression.
+ bool placeholder_pushed = false;
+ if (!is_slot && is_postfix) {
frame_->EmitPush(Operand(Smi::FromInt(0)));
+ placeholder_pushed = true;
}
// A constant reference is not saved to, so a constant reference is not a
@@ -4944,12 +5357,11 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
- if (!is_postfix) {
- frame_->EmitPush(Operand(Smi::FromInt(0)));
- }
+ if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
ASSERT_EQ(original_height + 1, frame_->height());
return;
}
+
// This pushes 0, 1 or 2 words on the object to be used later when updating
// the target. It also pushes the current value of the target.
target.GetValue();
@@ -4957,16 +5369,21 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
JumpTarget slow;
JumpTarget exit;
- // Check for smi operand.
Register value = frame_->PopToRegister();
- __ tst(value, Operand(kSmiTagMask));
- slow.Branch(ne);
// Postfix: Store the old value as the result.
- if (is_postfix) {
+ if (placeholder_pushed) {
frame_->SetElementAt(value, target.size());
+ } else if (is_postfix) {
+ frame_->EmitPush(value);
+ __ mov(VirtualFrame::scratch0(), value);
+ value = VirtualFrame::scratch0();
}
+ // Check for smi operand.
+ __ tst(value, Operand(kSmiTagMask));
+ slow.Branch(ne);
+
// Perform optimistic increment/decrement.
if (is_increment) {
__ add(value, value, Operand(Smi::FromInt(1)), SetCC);
@@ -5042,10 +5459,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
if (node->op() == Token::AND) {
JumpTarget is_true;
- LoadConditionAndSpill(node->left(),
- &is_true,
- false_target(),
- false);
+ LoadCondition(node->left(), &is_true, false_target(), false);
if (has_valid_frame() && !has_cc()) {
// The left-hand side result is on top of the virtual frame.
JumpTarget pop_and_continue;
@@ -5064,7 +5478,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// Evaluate right side expression.
is_true.Bind();
- LoadAndSpill(node->right());
+ Load(node->right());
// Exit (always with a materialized value).
exit.Bind();
@@ -5076,10 +5490,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
Branch(false, false_target());
}
is_true.Bind();
- LoadConditionAndSpill(node->right(),
- true_target(),
- false_target(),
- false);
+ LoadCondition(node->right(), true_target(), false_target(), false);
} else {
// Nothing to do.
ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
@@ -5088,10 +5499,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
} else {
ASSERT(node->op() == Token::OR);
JumpTarget is_false;
- LoadConditionAndSpill(node->left(),
- true_target(),
- &is_false,
- false);
+ LoadCondition(node->left(), true_target(), &is_false, false);
if (has_valid_frame() && !has_cc()) {
// The left-hand side result is on top of the virtual frame.
JumpTarget pop_and_continue;
@@ -5110,7 +5518,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// Evaluate right side expression.
is_false.Bind();
- LoadAndSpill(node->right());
+ Load(node->right());
// Exit (always with a materialized value).
exit.Bind();
@@ -5122,10 +5530,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
Branch(true, true_target());
}
is_false.Bind();
- LoadConditionAndSpill(node->right(),
- true_target(),
- false_target(),
- false);
+ LoadCondition(node->right(), true_target(), false_target(), false);
} else {
// Nothing to do.
ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
@@ -5159,18 +5564,30 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
if (rliteral != NULL && rliteral->handle()->IsSmi()) {
VirtualFrame::RegisterAllocationScope scope(this);
Load(node->left());
+ if (frame_->KnownSmiAt(0)) overwrite_left = false;
SmiOperation(node->op(),
rliteral->handle(),
false,
- overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
} else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
VirtualFrame::RegisterAllocationScope scope(this);
Load(node->right());
+ if (frame_->KnownSmiAt(0)) overwrite_right = false;
SmiOperation(node->op(),
lliteral->handle(),
true,
- overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
+ overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ GenerateInlineSmi inline_smi =
+ loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
+ if (lliteral != NULL) {
+ ASSERT(!lliteral->handle()->IsSmi());
+ inline_smi = DONT_GENERATE_INLINE_SMI;
+ }
+ if (rliteral != NULL) {
+ ASSERT(!rliteral->handle()->IsSmi());
+ inline_smi = DONT_GENERATE_INLINE_SMI;
+ }
VirtualFrame::RegisterAllocationScope scope(this);
OverwriteMode overwrite_mode = NO_OVERWRITE;
if (overwrite_left) {
@@ -5180,7 +5597,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
}
Load(node->left());
Load(node->right());
- VirtualFrameBinaryOperation(node->op(), overwrite_mode);
+ GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
}
}
ASSERT(!has_valid_frame() ||
@@ -5392,8 +5809,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::IN: {
VirtualFrame::SpilledScope scope(frame_);
- LoadAndSpill(left);
- LoadAndSpill(right);
+ Load(left);
+ Load(right);
frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
frame_->EmitPush(r0);
break;
@@ -5401,8 +5818,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::INSTANCEOF: {
VirtualFrame::SpilledScope scope(frame_);
- LoadAndSpill(left);
- LoadAndSpill(right);
+ Load(left);
+ Load(right);
InstanceofStub stub;
frame_->CallStub(&stub, 2);
// At this point if instanceof succeeded then r0 == 0.
@@ -5435,11 +5852,19 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
};
+// Convention for this is that on entry the receiver is in a register that
+// is not used by the stack. On exit the answer is found in that same
+// register and the stack has the same height.
void DeferredReferenceGetNamedValue::Generate() {
- ASSERT(receiver_.is(r0) || receiver_.is(r1));
+#ifdef DEBUG
+ int expected_height = frame_state()->frame()->height();
+#endif
+ VirtualFrame copied_frame(*frame_state()->frame());
+ copied_frame.SpillAll();
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
+ ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
__ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
__ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
@@ -5455,11 +5880,23 @@ void DeferredReferenceGetNamedValue::Generate() {
// in-object has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
+ // At this point the answer is in r0. We move it to the expected register
+ // if necessary.
+ __ Move(receiver_, r0);
+
+ // Now go back to the frame that we entered with. This will not overwrite
+ // the receiver register since that register was not in use when we came
+ // in. The instructions emitted by this merge are skipped over by the
+ // inline load patching mechanism when looking for the branch instruction
+ // that tells it where the code to patch is.
+ copied_frame.MergeTo(frame_state()->frame());
+
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
__ BlockConstPoolFor(1);
}
+ ASSERT_EQ(expected_height, frame_state()->frame()->height());
}
@@ -5560,6 +5997,7 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
+// Consumes the top of stack (the receiver) and pushes the result instead.
void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property");
@@ -5568,6 +6006,7 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
is_contextual
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET);
+ frame_->EmitPush(r0); // Push answer.
} else {
// Inline the in-object property case.
Comment cmnt(masm(), "[ Inlined named property load");
@@ -5584,7 +6023,6 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
// Load the receiver from the stack.
Register receiver = frame_->PopToRegister();
- VirtualFrame::SpilledScope spilled(frame_);
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(receiver, name);
@@ -5600,16 +6038,19 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
__ tst(receiver, Operand(kSmiTagMask));
deferred->Branch(eq);
+ Register scratch = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
+
// Check the map. The null map used below is patched by the inline cache
- // code.
- __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ mov(r3, Operand(Factory::null_value()));
- __ cmp(r2, r3);
+ // code. Therefore we can't use a LoadRoot call.
+ __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ mov(scratch2, Operand(Factory::null_value()));
+ __ cmp(scratch, scratch2);
deferred->Branch(ne);
// Initially use an invalid index. The index will be patched by the
// inline cache code.
- __ ldr(r0, MemOperand(receiver, 0));
+ __ ldr(receiver, MemOperand(receiver, 0));
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedNamedLoadInstructions,
@@ -5617,6 +6058,9 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
}
deferred->BindExit();
+ // At this point the receiver register has the result, either from the
+ // deferred code or from the inlined code.
+ frame_->EmitPush(receiver);
}
}
@@ -5645,6 +6089,7 @@ void CodeGenerator::EmitKeyedLoad() {
frame_->scratch0(), frame_->scratch1());
// Load the key and receiver from the stack.
+ bool key_is_known_smi = frame_->KnownSmiAt(0);
Register key = frame_->PopToRegister();
Register receiver = frame_->PopToRegister(key);
VirtualFrame::SpilledScope spilled(frame_);
@@ -5667,18 +6112,21 @@ void CodeGenerator::EmitKeyedLoad() {
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that the key is a smi.
+ if (!key_is_known_smi) {
+ __ tst(key, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ }
+
#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
+ Label check_inlined_codesize;
+ masm_->bind(&check_inlined_codesize);
#endif
__ mov(scratch2, Operand(Factory::null_value()));
__ cmp(scratch1, scratch2);
deferred->Branch(ne);
- // Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- deferred->Branch(ne);
-
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -5690,7 +6138,7 @@ void CodeGenerator::EmitKeyedLoad() {
// Check that key is within bounds. Use unsigned comparison to handle
// negative keys.
__ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ cmp(scratch2, Operand(key, ASR, kSmiTagSize));
+ __ cmp(scratch2, key);
deferred->Branch(ls); // Unsigned less equal.
// Load and check that the result is not the hole (key is a smi).
@@ -5832,6 +6280,27 @@ Handle<String> Reference::GetName() {
}
+void Reference::DupIfPersist() {
+ if (persist_after_get_) {
+ switch (type_) {
+ case KEYED:
+ cgen_->frame()->Dup2();
+ break;
+ case NAMED:
+ cgen_->frame()->Dup();
+ // Fall through.
+ case UNLOADED:
+ case ILLEGAL:
+ case SLOT:
+ // Do nothing.
+ ;
+ }
+ } else {
+ set_unloaded();
+ }
+}
+
+
void Reference::GetValue() {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
@@ -5847,10 +6316,8 @@ void Reference::GetValue() {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
+ DupIfPersist();
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- if (!persist_after_get_) {
- cgen_->UnloadReference(this);
- }
break;
}
@@ -5858,23 +6325,17 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
- if (persist_after_get_) {
- cgen_->frame()->Dup();
- }
- cgen_->EmitNamedLoad(GetName(), is_global);
- cgen_->frame()->EmitPush(r0);
- if (!persist_after_get_) set_unloaded();
+ Handle<String> name = GetName();
+ DupIfPersist();
+ cgen_->EmitNamedLoad(name, is_global);
break;
}
case KEYED: {
ASSERT(property != NULL);
- if (persist_after_get_) {
- cgen_->frame()->Dup2();
- }
+ DupIfPersist();
cgen_->EmitKeyedLoad();
cgen_->frame()->EmitPush(r0);
- if (!persist_after_get_) set_unloaded();
break;
}
@@ -5991,8 +6452,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ LoadRoot(r2, Heap::kContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(length));
- __ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
+ __ mov(r2, Operand(Smi::FromInt(length)));
+ __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// Setup the fixed slots.
__ mov(r1, Operand(Smi::FromInt(0)));
@@ -6623,8 +7084,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is not a smi).
- __ mov(mask, Operand(mask, ASR, 1));
+ // Divide length by two (length is a smi).
+ __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
__ sub(mask, mask, Operand(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
@@ -6639,7 +7100,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
CpuFeatures::Scope scope(VFP3);
__ CheckMap(object,
scratch1,
- Factory::heap_number_map(),
+ Heap::kHeapNumberMapRootIndex,
not_found,
true);
@@ -7785,6 +8246,110 @@ Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
}
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Argument is a number and is on stack and in r0.
+ Label runtime_call;
+ Label input_not_smi;
+ Label loaded;
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Load argument and check if it is a smi.
+ __ BranchOnNotSmi(r0, &input_not_smi);
+
+ CpuFeatures::Scope scope(VFP3);
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into r2, r3.
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ b(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(r0,
+ r1,
+ Heap::kHeapNumberMapRootIndex,
+ &runtime_call,
+ true);
+ // Input is a HeapNumber. Load it to a double register and store the
+ // low and high words into r2, r3.
+ __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
+
+ __ bind(&loaded);
+ // r2 = low 32 bits of double value
+ // r3 = high 32 bits of double value
+ // Compute hash:
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ eor(r1, r2, Operand(r3));
+ __ eor(r1, r1, Operand(r1, LSR, 16));
+ __ eor(r1, r1, Operand(r1, LSR, 8));
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ const int kTranscendentalCacheSizeBits = 9;
+ ASSERT_EQ(1 << kTranscendentalCacheSizeBits,
+ TranscendentalCache::kCacheSize);
+ __ ubfx(r1, r1, 0, kTranscendentalCacheSizeBits);
+ } else {
+ __ and_(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
+ }
+
+ // r2 = low 32 bits of double value.
+ // r3 = high 32 bits of double value.
+ // r1 = TranscendentalCache::hash(double value).
+ __ mov(r0,
+ Operand(ExternalReference::transcendental_cache_array_address()));
+ // r0 points to cache array.
+ __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // r0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ cmp(r0, Operand(0));
+ __ b(eq, &runtime_call);
+
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+
+ // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
+ __ add(r1, r1, Operand(r1, LSL, 1));
+ __ add(r0, r0, Operand(r1, LSL, 2));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
+ __ cmp(r2, r4);
+ __ b(ne, &runtime_call);
+ __ cmp(r3, r5);
+ __ b(ne, &runtime_call);
+ // Cache hit. Load result, pop argument and return.
+ __ mov(r0, Operand(r6));
+ __ pop();
+ __ Ret();
+ }
+
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
void StackCheckStub::Generate(MacroAssembler* masm) {
// Do tail-call to runtime routine. Runtime routines expect at least one
// argument, so give it a Smi.
@@ -8515,9 +9080,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ cmp(r1, Operand(0));
__ b(eq, &done);
- // Get the parameters pointer from the stack and untag the length.
+ // Get the parameters pointer from the stack.
__ ldr(r2, MemOperand(sp, 1 * kPointerSize));
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -8526,6 +9090,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
// Copy the fixed array slots.
Label loop;
@@ -8676,7 +9241,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
__ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, r0);
+ __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
__ b(gt, &runtime);
// subject: Subject string
@@ -9009,142 +9574,200 @@ int CompareStub::MinorKey() {
}
-void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case) {
- Label not_a_flat_string;
- Label try_again_with_new_string;
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
Label ascii_string;
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
- __ BranchOnSmi(object, receiver_not_string);
+ __ BranchOnSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
- __ tst(result, Operand(kIsNotStringMask));
- __ b(ne, receiver_not_string);
+ __ tst(result_, Operand(kIsNotStringMask));
+ __ b(ne, receiver_not_string_);
// If the index is non-smi trigger the non-smi case.
- __ BranchOnNotSmi(index, index_not_smi);
+ __ BranchOnNotSmi(index_, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
// Check for index out of range.
- __ ldr(scratch, FieldMemOperand(object, String::kLengthOffset));
- // Now scratch has the length of the string. Compare with the index.
- __ cmp(scratch, Operand(index));
- __ b(ls, index_out_of_range);
-
- __ bind(&try_again_with_new_string);
- // ----------- S t a t e -------------
- // -- object : string to access
- // -- result : instance type of the string
- // -- scratch : non-negative index < length
- // -----------------------------------
+ __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
+ __ cmp(ip, Operand(scratch_));
+ __ b(ls, index_out_of_range_);
// We need special handling for non-flat strings.
- ASSERT_EQ(0, kSeqStringTag);
- __ tst(result, Operand(kStringRepresentationMask));
- __ b(ne, &not_a_flat_string);
-
- // Check for 1-byte or 2-byte string.
- ASSERT_EQ(0, kTwoByteStringTag);
- __ tst(result, Operand(kStringEncodingMask));
- __ b(ne, &ascii_string);
-
- // 2-byte string. We can add without shifting since the Smi tag size is the
- // log2 of the number of bytes in a two-byte character.
- ASSERT_EQ(1, kSmiTagSize);
- ASSERT_EQ(0, kSmiShiftSize);
- __ add(scratch, object, Operand(index));
- __ ldrh(result, FieldMemOperand(scratch, SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
+ ASSERT(kSeqStringTag == 0);
+ __ tst(result_, Operand(kStringRepresentationMask));
+ __ b(eq, &flat_string);
// Handle non-flat strings.
- __ bind(&not_a_flat_string);
- __ and_(result, result, Operand(kStringRepresentationMask));
- __ cmp(result, Operand(kConsStringTag));
- __ b(ne, slow_case);
+ __ tst(result_, Operand(kIsConsStringMask));
+ __ b(eq, &call_runtime_);
// ConsString.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
- __ ldr(result, FieldMemOperand(object, ConsString::kSecondOffset));
- __ LoadRoot(scratch, Heap::kEmptyStringRootIndex);
- __ cmp(result, Operand(scratch));
- __ b(ne, slow_case);
-
+ __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+ __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+ __ cmp(result_, Operand(ip));
+ __ b(ne, &call_runtime_);
// Get the first of the two strings and load its instance type.
- __ ldr(object, FieldMemOperand(object, ConsString::kFirstOffset));
- __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
- __ jmp(&try_again_with_new_string);
+ __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ ASSERT(kSeqStringTag == 0);
+ __ tst(result_, Operand(kStringRepresentationMask));
+ __ b(nz, &call_runtime_);
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ ASSERT(kAsciiStringTag != 0);
+ __ tst(result_, Operand(kStringEncodingMask));
+ __ b(nz, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register. We can
+ // add without shifting since the smi tag size is the log2 of the
+ // number of bytes in a two-byte character.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ add(scratch_, object_, Operand(scratch_));
+ __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
// ASCII string.
+ // Load the byte into the result register.
__ bind(&ascii_string);
- __ add(scratch, object, Operand(index, LSR, kSmiTagSize));
- __ ldrb(result, FieldMemOperand(scratch, SeqAsciiString::kHeaderSize));
+ __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
+ __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
- __ mov(result, Operand(result, LSL, kSmiTagSize));
+ __ mov(result_, Operand(result_, LSL, kSmiTagSize));
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ scratch_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ true);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(r0)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ mov(scratch_, r0);
+ }
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ BranchOnNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(r0)) {
+ __ mov(result_, r0);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
}
-void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register scratch,
- Register result,
- InvokeFlag flag) {
- ASSERT(!code.is(result));
-
- Label slow_case;
- Label exit;
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ tst(code, Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ b(nz, &slow_case);
+ __ tst(code_,
+ Operand(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ b(nz, &slow_case_);
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ascii char code.
ASSERT(kSmiTag == 0);
- __ mov(result, Operand(Factory::single_character_string_cache()));
- __ add(result, result, Operand(code, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result, MemOperand(result, FixedArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ cmp(result, scratch);
- __ b(eq, &slow_case);
- __ b(&exit);
+ __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(result_, Operand(ip));
+ __ b(eq, &slow_case_);
+ __ bind(&exit_);
+}
- __ bind(&slow_case);
- if (flag == CALL_FUNCTION) {
- __ push(code);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result.is(r0)) {
- __ mov(result, r0);
- }
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- ASSERT(result.is(r0));
- __ push(code);
- __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
- }
- __ bind(&exit);
- if (flag == JUMP_FUNCTION) {
- ASSERT(result.is(r0));
- __ Ret();
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(r0)) {
+ __ mov(result_, r0);
}
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
}
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 361ea131..91adff0f 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -43,6 +43,7 @@ class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
// -------------------------------------------------------------------------
@@ -101,6 +102,11 @@ class Reference BASE_EMBEDDED {
// is popped from beneath it (unloaded).
void SetValue(InitState init_state);
+ // This is in preparation for something that uses the reference on the stack.
+ // If we need this reference afterwards get then dup it now. Otherwise mark
+ // it as used.
+ inline void DupIfPersist();
+
private:
CodeGenerator* cgen_;
Expression* expression_;
@@ -124,24 +130,55 @@ class CodeGenState BASE_EMBEDDED {
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
- // Create a code generator state based on a code generator's current
- // state. The new state has its own pair of branch labels.
- CodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target);
-
// Destroy a code generator state and restore the owning code generator's
// previous state.
- ~CodeGenState();
+ virtual ~CodeGenState();
+
+ virtual JumpTarget* true_target() const { return NULL; }
+ virtual JumpTarget* false_target() const { return NULL; }
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
+ protected:
+ inline CodeGenerator* owner() { return owner_; }
+ inline CodeGenState* previous() const { return previous_; }
private:
CodeGenerator* owner_;
+ CodeGenState* previous_;
+};
+
+
+class ConditionCodeGenState : public CodeGenState {
+ public:
+ // Create a code generator state based on a code generator's current
+ // state. The new state has its own pair of branch labels.
+ ConditionCodeGenState(CodeGenerator* owner,
+ JumpTarget* true_target,
+ JumpTarget* false_target);
+
+ virtual JumpTarget* true_target() const { return true_target_; }
+ virtual JumpTarget* false_target() const { return false_target_; }
+
+ private:
JumpTarget* true_target_;
JumpTarget* false_target_;
- CodeGenState* previous_;
+};
+
+
+class TypeInfoCodeGenState : public CodeGenState {
+ public:
+ TypeInfoCodeGenState(CodeGenerator* owner,
+ Slot* slot_number,
+ TypeInfo info);
+ ~TypeInfoCodeGenState();
+
+ virtual JumpTarget* true_target() const { return previous()->true_target(); }
+ virtual JumpTarget* false_target() const {
+ return previous()->false_target();
+ }
+
+ private:
+ Slot* slot_;
+ TypeInfo old_type_info_;
};
@@ -189,7 +226,9 @@ class CodeGenerator: public AstVisitor {
bool is_toplevel,
Handle<Script> script);
- static void RecordPositions(MacroAssembler* masm, int pos);
+ static bool RecordPositions(MacroAssembler* masm,
+ int pos,
+ bool right_here = false);
// Accessors
MacroAssembler* masm() { return masm_; }
@@ -211,6 +250,23 @@ class CodeGenerator: public AstVisitor {
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
+ TypeInfo type_info(Slot* slot) {
+ int index = NumberOfSlot(slot);
+ if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
+ return (*type_info_)[index];
+ }
+
+ TypeInfo set_type_info(Slot* slot, TypeInfo info) {
+ int index = NumberOfSlot(slot);
+ ASSERT(index >= kInvalidSlotNumber);
+ if (index != kInvalidSlotNumber) {
+ TypeInfo previous_value = (*type_info_)[index];
+ (*type_info_)[index] = info;
+ return previous_value;
+ }
+ return TypeInfo::Unknown();
+ }
+
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
static const int kUnknownIntValue = -1;
@@ -220,7 +276,7 @@ class CodeGenerator: public AstVisitor {
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Constants related to patching of inlined load/store.
- static const int kInlinedKeyedLoadInstructionsAfterPatch = 19;
+ static const int kInlinedKeyedLoadInstructionsAfterPatch = 17;
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
private:
@@ -234,6 +290,10 @@ class CodeGenerator: public AstVisitor {
// Generating deferred code.
void ProcessDeferred();
+ static const int kInvalidSlotNumber = -1;
+
+ int NumberOfSlot(Slot* slot);
+
// State
bool has_cc() const { return cc_reg_ != al; }
JumpTarget* true_target() const { return state_->true_target(); }
@@ -252,16 +312,6 @@ class CodeGenerator: public AstVisitor {
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- inline void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
// Main code generation function
void Generate(CompilationInfo* info);
@@ -299,19 +349,6 @@ class CodeGenerator: public AstVisitor {
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- inline void LoadAndSpill(Expression* expression);
-
- // Call LoadCondition and then spill the virtual frame unless control flow
- // cannot reach the end of the expression (ie, by emitting only
- // unconditional jumps to the control targets).
- inline void LoadConditionAndSpill(Expression* expression,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_control);
-
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
@@ -369,10 +406,8 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
+ GenerateInlineSmi inline_smi,
int known_rhs = kUnknownIntValue);
- void VirtualFrameBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- int known_rhs = kUnknownIntValue);
void Comparison(Condition cc,
Expression* left,
Expression* right,
@@ -416,6 +451,8 @@ class CodeGenerator: public AstVisitor {
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+ static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
+
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
@@ -445,10 +482,13 @@ class CodeGenerator: public AstVisitor {
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
- void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
- void GenerateCharFromCode(ZoneList<Expression*>* args);
+ void GenerateStringCharFromCode(ZoneList<Expression*>* args);
+
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -526,6 +566,8 @@ class CodeGenerator: public AstVisitor {
CodeGenState* state_;
int loop_nesting_;
+ Vector<TypeInfo>* type_info_;
+
// Jump targets
BreakTarget function_return_;
@@ -547,6 +589,21 @@ class CodeGenerator: public AstVisitor {
};
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+};
+
+
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
@@ -693,38 +750,6 @@ class GenericBinaryOpStub : public CodeStub {
class StringHelper : public AllStatic {
public:
- // Generates fast code for getting a char code out of a string
- // object at the given index. May bail out for four reasons (in the
- // listed order):
- // * Receiver is not a string (receiver_not_string label).
- // * Index is not a smi (index_not_smi label).
- // * Index is out of range (index_out_of_range).
- // * Some other reason (slow_case label). In this case it's
- // guaranteed that the above conditions are not violated,
- // e.g. it's safe to assume the receiver is a string and the
- // index is a non-negative smi < length.
- // When successful, object, index, and scratch are clobbered.
- // Otherwise, scratch and result are clobbered.
- static void GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case);
-
- // Generates code for creating a one-char string from the given char
- // code. May do a runtime call, so any register can be clobbered
- // and, if the given invoke flag specifies a call, an internal frame
- // is required. In tail call mode the result must be r0 register.
- static void GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register scratch,
- Register result,
- InvokeFlag flag);
-
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersLong adds too much
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 57c5c1c0..e36f595c 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -66,10 +66,15 @@
# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
-// Simulator should support ARM5 instructions.
+// Simulator should support ARM5 instructions and unaligned access by default.
#if !defined(__arm__)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1
+
+# ifndef CAN_USE_UNALIGNED_ACCESSES
+# define CAN_USE_UNALIGNED_ACCESSES 1
+# endif
+
#endif
#if CAN_USE_UNALIGNED_ACCESSES
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 69fc504e..65f5eeaf 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -57,7 +57,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// #endif
// <debug break return code entry point address>
// bktp 0
- CodePatcher patcher(rinfo()->pc(), 4);
+ CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
#ifdef USE_BLX
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
@@ -73,17 +73,59 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceLength);
+ Assembler::kJSReturnSequenceInstructions);
}
-// A debug break in the exit code is identified by a call.
+// A debug break in the frame exit code is identified by the JS frame exit code
+// having been patched with a call instruction.
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Patch the code changing the debug break slot code from
+ // mov r2, r2
+ // mov r2, r2
+ // mov r2, r2
+ // to a call to the debug break slot code.
+ // #if USE_BLX
+ // ldr ip, [pc, #0]
+ // blx ip
+ // #else
+ // mov lr, pc
+ // ldr pc, [pc, #-4]
+ // #endif
+ // <debug break slot code entry point address>
+ CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+#ifdef USE_BLX
+ patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
+ patcher.masm()->blx(v8::internal::ip);
+#else
+ patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
+ patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
+#endif
+ patcher.Emit(Debug::debug_break_return()->entry());
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -220,10 +262,33 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the constant pool in the debug break slot code.
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(2);
+ }
+ ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+ masm->InstructionsGeneratedSince(&check_codesize));
+}
+
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0);
+}
+
+
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on arm");
}
+
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on arm");
}
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 0ac7d19f..1c05bc3a 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -401,6 +401,20 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
PrintCondition(instr);
return 4;
}
+ case 'f': { // 'f: bitfield instructions - v7 and above.
+ uint32_t lsbit = instr->Bits(11, 7);
+ uint32_t width = instr->Bits(20, 16) + 1;
+ if (instr->Bit(21) == 0) {
+ // BFC/BFI:
+ // Bits 20-16 represent most-significant bit. Covert to width.
+ width -= lsbit;
+ ASSERT(width > 0);
+ }
+ ASSERT((width + lsbit) <= 32);
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d, #%d", lsbit, width);
+ return 1;
+ }
case 'h': { // 'h: halfword operation for extra loads and stores
if (instr->HasH()) {
Print("h");
@@ -446,16 +460,6 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Offset12Field());
return 5;
- } else if ((format[3] == '1') && (format[4] == '6')) {
- ASSERT(STRING_STARTS_WITH(format, "off16to20"));
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->Bits(20, 16) +1);
- return 9;
- } else if (format[3] == '7') {
- ASSERT(STRING_STARTS_WITH(format, "off7to11"));
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->ShiftAmountField());
- return 8;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
@@ -882,10 +886,26 @@ void Decoder::DecodeType3(Instr* instr) {
case 3: {
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
- uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = widthminus1 + lsbit;
if (msbit <= 31) {
- Format(instr, "ubfx'cond 'rd, 'rm, #'off7to11, #'off16to20");
+ if (instr->Bit(22)) {
+ Format(instr, "ubfx'cond 'rd, 'rm, 'f");
+ } else {
+ Format(instr, "sbfx'cond 'rd, 'rm, 'f");
+ }
+ } else {
+ UNREACHABLE();
+ }
+ } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
+ uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
+ if (msbit >= lsbit) {
+ if (instr->RmField() == 15) {
+ Format(instr, "bfc'cond 'rd, 'f");
+ } else {
+ Format(instr, "bfi'cond 'rd, 'rm, 'f");
+ }
} else {
UNREACHABLE();
}
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index fecc2137..e6196639 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -196,11 +196,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
// body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
- EmitReturnSequence(function()->end_position());
+ EmitReturnSequence();
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ b(&return_label_);
@@ -224,7 +224,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
- CodeGenerator::RecordPositions(masm_, position);
+ CodeGenerator::RecordPositions(masm_, function()->end_position());
__ RecordJSReturn();
masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
@@ -238,8 +238,10 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
// add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
- CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
- return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
+ CHECK(return_sequence_length ==
+ Assembler::kJSReturnSequenceInstructions ||
+ return_sequence_length ==
+ Assembler::kJSReturnSequenceInstructions + 1);
#endif
}
}
@@ -917,7 +919,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Setup the four remaining stack slots.
__ push(r0); // Map.
__ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ mov(r1, Operand(r1, LSL, kSmiTagSize));
__ mov(r0, Operand(Smi::FromInt(0)));
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(r2, r1, r0);
@@ -928,7 +929,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(r1, Operand(Smi::FromInt(0))); // Map (0) - force slow check.
__ Push(r1, r0);
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ mov(r1, Operand(r1, LSL, kSmiTagSize));
__ mov(r0, Operand(Smi::FromInt(0)));
__ Push(r1, r0); // Fixed array length (as smi) and initial index.
@@ -1829,76 +1829,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (strcmp("_IsSmi", *name->ToCString()) == 0) {
- EmitIsSmi(expr->arguments());
- } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
- EmitIsNonNegativeSmi(expr->arguments());
- } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
- EmitIsObject(expr->arguments());
- } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
- EmitIsUndetectableObject(expr->arguments());
- } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
- EmitIsFunction(expr->arguments());
- } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
- EmitIsArray(expr->arguments());
- } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
- EmitIsRegExp(expr->arguments());
- } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
- EmitIsConstructCall(expr->arguments());
- } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
- EmitObjectEquals(expr->arguments());
- } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
- EmitArguments(expr->arguments());
- } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
- EmitArgumentsLength(expr->arguments());
- } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
- EmitClassOf(expr->arguments());
- } else if (strcmp("_Log", *name->ToCString()) == 0) {
- EmitLog(expr->arguments());
- } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
- EmitRandomHeapNumber(expr->arguments());
- } else if (strcmp("_SubString", *name->ToCString()) == 0) {
- EmitSubString(expr->arguments());
- } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
- EmitRegExpExec(expr->arguments());
- } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
- EmitValueOf(expr->arguments());
- } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
- EmitSetValueOf(expr->arguments());
- } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
- EmitNumberToString(expr->arguments());
- } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
- EmitCharFromCode(expr->arguments());
- } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
- EmitFastCharCodeAt(expr->arguments());
- } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
- EmitStringAdd(expr->arguments());
- } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
- EmitStringCompare(expr->arguments());
- } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
- EmitMathPow(expr->arguments());
- } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
- EmitMathSin(expr->arguments());
- } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
- EmitMathCos(expr->arguments());
- } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
- EmitMathSqrt(expr->arguments());
- } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
- EmitCallFunction(expr->arguments());
- } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
- EmitRegExpConstructResult(expr->arguments());
- } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
- EmitSwapElements(expr->arguments());
- } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
- EmitGetFromCache(expr->arguments());
- } else {
- UNREACHABLE();
- }
-}
-
-
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2349,49 +2279,120 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
- Label slow_case, done;
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ tst(r0, Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ b(nz, &slow_case);
- __ mov(r1, Operand(Factory::single_character_string_cache()));
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiTagSize == 1);
- ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ascii char code.
- __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r1, r2);
- __ b(eq, &slow_case);
- __ mov(r0, r1);
- __ b(&done);
+ Label done;
+ StringCharFromCodeGenerator generator(r0, r1);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
- __ bind(&slow_case);
- __ push(r0);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, r0);
+ Apply(context_, r1);
}
-void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
- // TODO(fsc): Port the complete implementation from the classic back-end.
- // Move the undefined value into the result register, which will
- // trigger the slow case.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- Apply(context_, r0);
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = r1;
+ Register index = r0;
+ Register scratch = r2;
+ Register result = r3;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ Apply(context_, result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = r1;
+ Register index = r0;
+ Register scratch1 = r2;
+ Register scratch2 = r3;
+ Register result = r0;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ mov(result, Operand(Smi::FromInt(0)));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ Apply(context_, result);
}
+
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index ba318fd2..d0a32e81 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -48,60 +48,70 @@ namespace internal {
#define __ ACCESS_MASM(masm)
// Helper function used from LoadIC/CallIC GenerateNormal.
+// receiver: Receiver. It is not clobbered if a jump to the miss label is
+// done
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as receiver or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The three scratch registers need to be different from the receiver, name and
+// result.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss,
- Register t0,
- Register t1) {
- // Register use:
- //
- // t0 - used to hold the property dictionary.
- //
- // t1 - initially the receiver
- // - used for the index into the property dictionary
- // - holds the result on exit.
- //
- // r3 - used as temporary and to hold the capacity of the property
- // dictionary.
- //
- // r2 - holds the name of the property and is unchanged.
- // r4 - used as temporary.
+ Register receiver,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ DictionaryCheck check_dictionary) {
+ // Main use of the scratch registers.
+ // scratch1: Used to hold the property dictionary.
+ // scratch2: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch3: Used as temporary.
Label done;
// Check for the absence of an interceptor.
- // Load the map into t0.
- __ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
+ // Load the map into scratch1.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kMapOffset));
// Bail out if the receiver has a named interceptor.
- __ ldrb(r3, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kHasNamedInterceptor));
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+ __ tst(scratch2, Operand(1 << Map::kHasNamedInterceptor));
__ b(nz, miss);
// Bail out if we have a JS global proxy object.
- __ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ cmp(r3, Operand(JS_GLOBAL_PROXY_TYPE));
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ cmp(scratch2, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, miss);
// Possible work-around for http://crbug.com/16276.
// See also: http://codereview.chromium.org/155418.
- __ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ cmp(scratch2, Operand(JS_GLOBAL_OBJECT_TYPE));
__ b(eq, miss);
- __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ cmp(scratch2, Operand(JS_BUILTINS_OBJECT_TYPE));
__ b(eq, miss);
+ // Load the properties array.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
// Check that the properties array is a dictionary.
- __ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, miss);
+ if (check_dictionary == CHECK_DICTIONARY) {
+ __ ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(scratch2, ip);
+ __ b(ne, miss);
+ }
// Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
- __ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
- __ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int
- __ sub(r3, r3, Operand(1));
+ __ ldr(scratch2, FieldMemOperand(scratch1, kCapacityOffset));
+ __ mov(scratch2, Operand(scratch2, ASR, kSmiTagSize)); // convert smi to int
+ __ sub(scratch2, scratch2, Operand(1));
const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
@@ -112,26 +122,27 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(r4, FieldMemOperand(r2, String::kHashFieldOffset));
+ __ ldr(scratch3, FieldMemOperand(name, String::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
ASSERT(StringDictionary::GetProbeOffset(i) <
1 << (32 - String::kHashFieldOffset));
- __ add(r4, r4, Operand(
+ __ add(scratch3, scratch3, Operand(
StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
- __ and_(r4, r3, Operand(r4, LSR, String::kHashShift));
+ __ and_(scratch3, scratch2, Operand(scratch3, LSR, String::kHashShift));
// Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3);
- __ add(r4, r4, Operand(r4, LSL, 1)); // r4 = r4 * 3
+ // scratch3 = scratch3 * 3.
+ __ add(scratch3, scratch3, Operand(scratch3, LSL, 1));
// Check if the key is identical to the name.
- __ add(r4, t0, Operand(r4, LSL, 2));
- __ ldr(ip, FieldMemOperand(r4, kElementsStartOffset));
- __ cmp(r2, Operand(ip));
+ __ add(scratch3, scratch1, Operand(scratch3, LSL, 2));
+ __ ldr(ip, FieldMemOperand(scratch3, kElementsStartOffset));
+ __ cmp(name, Operand(ip));
if (i != kProbes - 1) {
__ b(eq, &done);
} else {
@@ -140,13 +151,15 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
// Check that the value is a normal property.
- __ bind(&done); // r4 == t0 + 4*index
- __ ldr(r3, FieldMemOperand(r4, kElementsStartOffset + 2 * kPointerSize));
- __ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ bind(&done); // scratch3 == scratch1 + 4 * index
+ __ ldr(scratch2,
+ FieldMemOperand(scratch3, kElementsStartOffset + 2 * kPointerSize));
+ __ tst(scratch2, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ b(ne, miss);
// Get the value at the masked, scaled index and return.
- __ ldr(t1, FieldMemOperand(r4, kElementsStartOffset + 1 * kPointerSize));
+ __ ldr(result,
+ FieldMemOperand(scratch3, kElementsStartOffset + 1 * kPointerSize));
}
@@ -163,11 +176,11 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
+ // Holds the result on exit if the load succeeded.
//
// Scratch registers:
//
// t0 - holds the untagged key on entry and holds the hash once computed.
- // Holds the result on exit if the load succeeded.
//
// t1 - used to hold the capacity mask of the dictionary
//
@@ -235,7 +248,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
- __ ldr(t0, FieldMemOperand(t2, kValueOffset));
+ __ ldr(key, FieldMemOperand(t2, kValueOffset));
}
@@ -354,7 +367,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
Label* miss,
Register scratch) {
// Search dictionary - put result in register r1.
- GenerateDictionaryLoad(masm, miss, r0, r1);
+ GenerateDictionaryLoad(masm, miss, r1, r2, r1, r0, r3, r4, CHECK_DICTIONARY);
// Check that the value isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
@@ -483,6 +496,21 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ UNREACHABLE();
+}
+
+
+void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ UNREACHABLE();
+}
+
+
+void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ UNREACHABLE();
+}
+
+
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
@@ -534,7 +562,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ b(ne, &miss);
__ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, r1, r0);
+ GenerateDictionaryLoad(masm, &miss, r0, r2, r0, r1, r3, r4, CHECK_DICTIONARY);
__ Ret();
// Global object access: Check access rights.
@@ -542,7 +570,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ CheckAccessGlobalProxy(r0, r1, &miss);
__ b(&probe);
- // Cache miss: Restore receiver from stack and jump to runtime.
+ // Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
}
@@ -579,7 +607,13 @@ static inline bool IsInlinedICSite(Address address,
}
Address address_after_nop = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop = Assembler::instr_at(address_after_nop);
- ASSERT(Assembler::IsBranch(instr_after_nop));
+ // There may be some reg-reg move and frame merging code to skip over before
+ // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
+ // code.
+ while (!Assembler::IsBranch(instr_after_nop)) {
+ address_after_nop += Assembler::kInstrSize;
+ instr_after_nop = Assembler::instr_at(address_after_nop);
+ }
// Find the end of the inlined code for handling the load.
int b_offset =
@@ -709,7 +743,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ Push(r1, r0);
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
@@ -719,7 +753,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- Label slow, fast, check_pixel_array, check_number_dictionary;
+ Label slow, check_string, index_smi, index_string;
+ Label check_pixel_array, probe_dictionary, check_number_dictionary;
Register key = r0;
Register receiver = r1;
@@ -742,11 +777,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ b(lt, &slow);
// Check that the key is a smi.
- __ BranchOnNotSmi(key, &slow);
- // Untag key into r2..
- __ mov(r2, Operand(key, ASR, kSmiTagSize));
-
- // Get the elements array of the object.
+ __ BranchOnNotSmi(key, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
@@ -754,23 +788,25 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &check_pixel_array);
// Check that the key (index) is within bounds.
- __ ldr(r3, FieldMemOperand(r4, Array::kLengthOffset));
- __ cmp(r2, r3);
+ __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(r3));
__ b(hs, &slow);
// Fast case: Do the load.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));
+ // The key is a smi.
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ ldr(r2, MemOperand(r3, key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r2, ip);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ b(eq, &slow);
__ mov(r0, r2);
+ __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
__ Ret();
// Check whether the elements is a pixel array.
// r0: key
- // r2: untagged index
// r3: elements map
// r4: elements
__ bind(&check_pixel_array);
@@ -778,6 +814,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &check_number_dictionary);
__ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
+ __ mov(r2, Operand(key, ASR, kSmiTagSize));
__ cmp(r2, ip);
__ b(hs, &slow);
__ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
@@ -788,90 +825,159 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// r0: key
- // r2: untagged index
// r3: elements map
// r4: elements
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
+ __ mov(r2, Operand(r0, ASR, kSmiTagSize));
GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
- __ mov(r0, r2);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3);
GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_string);
+ // The key is not a smi.
+ // Is it a string?
+ // r0: key
+ // r1: receiver
+ __ CompareObjectType(r0, r2, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, &slow);
+
+ // Is the string an array index, with cached numeric value?
+ __ ldr(r3, FieldMemOperand(r0, String::kHashFieldOffset));
+ __ tst(r3, Operand(String::kContainsCachedArrayIndexMask));
+ __ b(eq, &index_string);
+
+ // Is the string a symbol?
+ // r2: key map
+ __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ ASSERT(kSymbolTag != 0);
+ __ tst(r3, Operand(kIsSymbolMask));
+ __ b(eq, &slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+ __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r3, ip);
+ __ b(eq, &probe_dictionary);
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
+ __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
+ __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
+ __ and_(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
+
+ // Load the key (consisting of map and symbol) from the cache and
+ // check for match.
+ ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys();
+ __ mov(r4, Operand(cache_keys));
+ __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
+ __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol.
+ __ cmp(r2, r5);
+ __ b(ne, &slow);
+ __ ldr(r5, MemOperand(r4));
+ __ cmp(r0, r5);
+ __ b(ne, &slow);
+
+ // Get field offset and check that it is an in-object property.
+ // r0 : key
+ // r1 : receiver
+ // r2 : receiver's map
+ // r3 : lookup cache index
+ ExternalReference cache_field_offsets
+ = ExternalReference::keyed_lookup_cache_field_offsets();
+ __ mov(r4, Operand(cache_field_offsets));
+ __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
+ __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
+ __ cmp(r5, r6);
+ __ b(ge, &slow);
+
+ // Load in-object property.
+ __ sub(r5, r5, r6); // Index from end of object.
+ __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ __ add(r6, r6, r5); // Index from start of object.
+ __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
+ __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
+ __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3);
+ __ Ret();
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ // Load the property to r0.
+ GenerateDictionaryLoad(
+ masm, &slow, r1, r0, r0, r2, r3, r4, DICTIONARY_CHECK_DONE);
+ __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
+ __ Ret();
+
+ __ b(&slow);
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ __ bind(&index_string);
+ // r0: key (string)
+ // r1: receiver
+ // r3: hash field
+ // We want the smi-tagged index in r0. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ and_(r3, r3, Operand(String::kArrayIndexValueMask));
+ // Here we actually clobber the key (r0) which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
+ __ mov(r0, Operand(r3, ASR, String::kHashShift - kSmiTagSize));
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
- // -- r0 : key
+ // -- r0 : key (index)
// -- r1 : receiver
// -----------------------------------
Label miss;
- Label index_not_smi;
Label index_out_of_range;
- Label slow_char_code;
- Label got_char_code;
- Register object = r1;
+ Register receiver = r1;
Register index = r0;
- Register code = r2;
- Register scratch = r3;
+ Register scratch1 = r2;
+ Register scratch2 = r3;
+ Register result = r0;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
- StringHelper::GenerateFastCharCodeAt(masm,
- object,
- index,
- scratch,
- code,
- &miss, // When not a string.
- &index_not_smi,
- &index_out_of_range,
- &slow_char_code);
-
- // If we didn't bail out, code register contains smi tagged char
- // code.
- __ bind(&got_char_code);
- StringHelper::GenerateCharFromCode(masm, code, scratch, r0, JUMP_FUNCTION);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from char from code tail call");
-#endif
-
- // Check if key is a heap number.
- __ bind(&index_not_smi);
- __ CheckMap(index, scratch, Factory::heap_number_map(), &miss, true);
-
- // Push receiver and key on the stack (now that we know they are a
- // string and a number), and call runtime.
- __ bind(&slow_char_code);
- __ EnterInternalFrame();
- __ Push(object, index);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- ASSERT(!code.is(r0));
- __ mov(code, r0);
- __ LeaveInternalFrame();
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
- // Check if the runtime call returned NaN char code. If yes, return
- // undefined. Otherwise, we can continue.
- if (FLAG_debug_code) {
- __ BranchOnSmi(code, &got_char_code);
- __ ldr(scratch, FieldMemOperand(code, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, ip);
- __ Assert(eq, "StringCharCodeAt must return smi or heap number");
- }
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ cmp(code, scratch);
- __ b(ne, &got_char_code);
__ bind(&index_out_of_range);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ Ret();
__ bind(&miss);
- GenerateGeneric(masm);
+ GenerateMiss(masm);
}
@@ -1283,11 +1389,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &check_pixel_array);
- // Untag the key (for checking against untagged length in the fixed array).
- __ mov(r4, Operand(key, ASR, kSmiTagSize));
- // Compute address to store into and check array bounds.
+ // Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(r4, Operand(ip));
+ __ cmp(key, Operand(ip));
__ b(lo, &fast);
// Slow case, handle jump to runtime.
@@ -1333,9 +1437,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Condition code from comparing key and array length is still available.
__ b(ne, &slow); // Only support writing to writing to array[array.length].
// Check for room in the elements backing store.
- __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag key.
+ // Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(r4, Operand(ip));
+ __ cmp(key, Operand(ip));
__ b(hs, &slow);
// Calculate key + 1 as smi.
ASSERT_EQ(0, kSmiTag);
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index 3c43d168..86198fb7 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -50,6 +50,11 @@ void JumpTarget::DoJump() {
ASSERT(cgen()->HasValidEntryRegisters());
if (entry_frame_set_) {
+ if (entry_label_.is_bound()) {
+ // If we already bound and generated code at the destination then it
+ // is too late to ask for less optimistic type assumptions.
+ ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
+ }
// There already a frame expectation at the target.
cgen()->frame()->MergeTo(&entry_frame_);
cgen()->DeleteFrame();
@@ -67,20 +72,21 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) {
- // Backward branch. We have an expected frame to merge to on the
- // backward edge.
- if (cc == al) {
- cgen()->frame()->MergeTo(&entry_frame_);
- } else {
- // We can't do conditional merges yet so you have to ensure that all
- // conditional branches to the JumpTarget have the same virtual frame.
- ASSERT(cgen()->frame()->Equals(&entry_frame_));
+ if (entry_label_.is_bound()) {
+ // If we already bound and generated code at the destination then it
+ // is too late to ask for less optimistic type assumptions.
+ ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
+ // We have an expected frame to merge to on the backward edge.
+ cgen()->frame()->MergeTo(&entry_frame_, cc);
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
}
__ b(cc, &entry_label_);
+ if (cc == al) {
+ cgen()->DeleteFrame();
+ }
}
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 29c48a40..6292b581 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -183,15 +183,18 @@ void MacroAssembler::Drop(int count, Condition cond) {
}
-void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
+void MacroAssembler::Swap(Register reg1,
+ Register reg2,
+ Register scratch,
+ Condition cond) {
if (scratch.is(no_reg)) {
- eor(reg1, reg1, Operand(reg2));
- eor(reg2, reg2, Operand(reg1));
- eor(reg1, reg1, Operand(reg2));
+ eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
+ eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
+ eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
} else {
- mov(scratch, reg1);
- mov(reg1, reg2);
- mov(reg2, scratch);
+ mov(scratch, reg1, LeaveCC, cond);
+ mov(reg1, reg2, LeaveCC, cond);
+ mov(reg2, scratch, LeaveCC, cond);
}
}
@@ -252,63 +255,21 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
- // This is how much we shift the remembered set bit offset to get the
- // offset of the word in the remembered set. We divide by kBitsPerInt (32,
- // shift right 5) and then multiply by kIntSize (4, shift left 2).
- const int kRSetWordShift = 3;
-
- Label fast;
+ mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once.
- // Compute the bit offset in the remembered set.
- // object: heap object pointer (with tag)
- // offset: offset to store location from the object
- mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
- and_(scratch, object, Operand(ip)); // offset into page of the object
- add(offset, scratch, Operand(offset)); // add offset into the object
- mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
+ // Calculate region number.
+ add(offset, object, Operand(offset)); // Add offset into the object.
+ and_(offset, offset, Operand(ip)); // Offset into page of the object.
+ mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
- // Compute the page address from the heap object pointer.
- // object: heap object pointer (with tag)
- // offset: bit offset of store position in the remembered set
+ // Calculate page address.
bic(object, object, Operand(ip));
- // If the bit offset lies beyond the normal remembered set range, it is in
- // the extra remembered set area of a large object.
- // object: page start
- // offset: bit offset of store position in the remembered set
- cmp(offset, Operand(Page::kPageSize / kPointerSize));
- b(lt, &fast);
-
- // Adjust the bit offset to be relative to the start of the extra
- // remembered set and the start address to be the address of the extra
- // remembered set.
- sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
- // Load the array length into 'scratch' and multiply by four to get the
- // size in bytes of the elements.
- ldr(scratch, MemOperand(object, Page::kObjectStartOffset
- + FixedArray::kLengthOffset));
- mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
- // Add the page header (including remembered set), array header, and array
- // body size to the page address.
- add(object, object, Operand(Page::kObjectStartOffset
- + FixedArray::kHeaderSize));
- add(object, object, Operand(scratch));
-
- bind(&fast);
- // Get address of the rset word.
- // object: start of the remembered set (page start for the fast case)
- // offset: bit offset of store position in the remembered set
- bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset
- add(object, object, Operand(scratch, LSR, kRSetWordShift));
- // Get bit offset in the rset word.
- // object: address of remembered set word
- // offset: bit offset of store position
- and_(offset, offset, Operand(kBitsPerInt - 1));
-
- ldr(scratch, MemOperand(object));
+ // Mark region dirty.
+ ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
orr(scratch, scratch, Operand(ip, LSL, offset));
- str(scratch, MemOperand(object));
+ str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
@@ -336,7 +297,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
Label done;
// First, test that the object is not in the new space. We cannot set
- // remembered set bits in the new space.
+ // region marks for new space pages.
InNewSpace(object, scratch, eq, &done);
// Record the actual write.
@@ -664,6 +625,7 @@ void MacroAssembler::InvokeFunction(Register fun,
ldr(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
+ mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg,
MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1237,6 +1199,21 @@ void MacroAssembler::CheckMap(Register obj,
}
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ bool is_heap_object) {
+ if (!is_heap_object) {
+ BranchOnSmi(obj, fail);
+ }
+ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ LoadRoot(ip, index);
+ cmp(scratch, ip);
+ b(ne, fail);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@@ -1328,7 +1305,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
if (CpuFeatures::IsSupported(ARMv7)) {
- ubfx(dst, src, Operand(kSmiTagSize), Operand(num_least_bits - 1));
+ ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
and_(dst, dst, Operand((1 << num_least_bits) - 1));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 494f2b69..87f7b5fe 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -88,7 +88,10 @@ class MacroAssembler: public Assembler {
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+ void Swap(Register reg1,
+ Register reg2,
+ Register scratch = no_reg,
+ Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
@@ -114,16 +117,14 @@ class MacroAssembler: public Assembler {
Label* branch);
- // Set the remebered set bit for an offset into an
- // object. RecordWriteHelper only works if the object is not in new
- // space.
- void RecordWriteHelper(Register object, Register offset, Register scracth);
+ // For the page containing |object| mark the region covering [object+offset]
+ // dirty. The object address must be in the first 8K of an allocated page.
+ void RecordWriteHelper(Register object, Register offset, Register scratch);
- // Sets the remembered set bit for [address+offset], where address is the
- // address of the heap object 'object'. The address must be in the first 8K
- // of an allocated page. The 'scratch' register is used in the
- // implementation and all 3 registers are clobbered by the operation, as
- // well as the ip register.
+ // For the page containing |object| mark the region covering [object+offset]
+ // dirty. The object address must be in the first 8K of an allocated page.
+ // The 'scratch' register is used in the implementation and all 3 registers
+ // are clobbered by the operation, as well as the ip register.
void RecordWrite(Register object, Register offset, Register scratch);
// Push two registers. Pushes leftmost register first (to highest address).
@@ -400,15 +401,23 @@ class MacroAssembler: public Assembler {
InstanceType type);
- // Check if the map of an object is equal to a specified map and
- // branch to label if not. Skip the smi check if not required
- // (object is known to be a heap object)
+ // Check if the map of an object is equal to a specified map (either
+ // given directly or as an index into the root list) and branch to
+ // label if not. Skip the smi check if not required (object is known
+ // to be a heap object)
void CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
bool is_heap_object);
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ bool is_heap_object);
+
+
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// Returns a condition that will be enabled if the object was a string.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index e72a8796..3bdca38e 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2031,7 +2031,6 @@ void Simulator::DecodeType2(Instr* instr) {
void Simulator::DecodeType3(Instr* instr) {
- ASSERT(instr->Bits(6, 4) == 0x5 || instr->Bit(4) == 0);
int rd = instr->RdField();
int rn = instr->RnField();
int32_t rn_val = get_register(rn);
@@ -2058,17 +2057,47 @@ void Simulator::DecodeType3(Instr* instr) {
break;
}
case 3: {
- // UBFX.
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
- uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = widthminus1 + lsbit;
if (msbit <= 31) {
- uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmField()));
- uint32_t extr_val = rm_val << (31 - msbit);
- extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdField(), extr_val);
+ if (instr->Bit(22)) {
+ // ubfx - unsigned bitfield extract.
+ uint32_t rm_val =
+ static_cast<uint32_t>(get_register(instr->RmField()));
+ uint32_t extr_val = rm_val << (31 - msbit);
+ extr_val = extr_val >> (31 - widthminus1);
+ set_register(instr->RdField(), extr_val);
+ } else {
+ // sbfx - signed bitfield extract.
+ int32_t rm_val = get_register(instr->RmField());
+ int32_t extr_val = rm_val << (31 - msbit);
+ extr_val = extr_val >> (31 - widthminus1);
+ set_register(instr->RdField(), extr_val);
+ }
+ } else {
+ UNREACHABLE();
+ }
+ return;
+ } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
+ uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
+ if (msbit >= lsbit) {
+ // bfc or bfi - bitfield clear/insert.
+ uint32_t rd_val =
+ static_cast<uint32_t>(get_register(instr->RdField()));
+ uint32_t bitcount = msbit - lsbit + 1;
+ uint32_t mask = (1 << bitcount) - 1;
+ rd_val &= ~(mask << lsbit);
+ if (instr->RmField() != 15) {
+ // bfi - bitfield insert.
+ uint32_t rm_val =
+ static_cast<uint32_t>(get_register(instr->RmField()));
+ rm_val &= mask;
+ rd_val |= rm_val << lsbit;
+ }
+ set_register(instr->RdField(), rd_val);
} else {
UNREACHABLE();
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index d82ef21c..3992d6c5 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -152,6 +152,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
}
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype) {
+ // Get the global function with the given index.
+ JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ // Load its initial map. The global functions all have initial maps.
+ __ Move(prototype, Handle<Map>(function->initial_map()));
+ // Load the prototype from the initial map.
+ __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
// Load a fast property out of a holder object (src). In-object properties
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
@@ -426,191 +437,6 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
}
-class LoadInterceptorCompiler BASE_EMBEDDED {
- public:
- explicit LoadInterceptorCompiler(Register name) : name_(name) {}
-
- void CompileCacheable(MacroAssembler* masm,
- StubCompiler* stub_compiler,
- Register receiver,
- Register holder,
- Register scratch1,
- Register scratch2,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- Label* miss_label) {
- AccessorInfo* callback = NULL;
- bool optimize = false;
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- if (lookup->type() == FIELD) {
- optimize = true;
- } else if (lookup->type() == CALLBACKS) {
- Object* callback_object = lookup->GetCallbackObject();
- if (callback_object->IsAccessorInfo()) {
- callback = AccessorInfo::cast(callback_object);
- optimize = callback->getter() != NULL;
- }
- }
-
- if (!optimize) {
- CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
- miss_label);
- return;
- }
-
- // Note: starting a frame here makes GC aware of pointers pushed below.
- __ EnterInternalFrame();
-
- __ push(receiver);
- __ Push(holder, name_);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1);
- __ b(eq, &interceptor_failed);
- __ LeaveInternalFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(name_);
- __ pop(holder);
- __ pop(receiver);
-
- __ LeaveInternalFrame();
-
- if (lookup->type() == FIELD) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Check that the maps from interceptor's holder to field's holder
- // haven't changed...
- holder = stub_compiler->CheckPrototypes(interceptor_holder,
- holder,
- lookup->holder(),
- scratch1,
- scratch2,
- name,
- miss_label);
- // ... and retrieve a field from field's holder.
- stub_compiler->GenerateFastPropertyLoad(masm,
- r0,
- holder,
- lookup->holder(),
- lookup->GetFieldIndex());
- __ Ret();
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- ASSERT(callback != NULL);
- ASSERT(callback->getter() != NULL);
-
- // Prepare for tail call: push receiver to stack.
- Label cleanup;
- __ push(receiver);
-
- // Check that the maps from interceptor's holder to callback's holder
- // haven't changed.
- holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
- lookup->holder(), scratch1,
- scratch2,
- name,
- &cleanup);
-
- // Continue tail call preparation: push remaining parameters.
- __ push(holder);
- __ Move(holder, Handle<AccessorInfo>(callback));
- __ push(holder);
- __ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
- __ Push(scratch1, name_);
-
- // Tail call to runtime.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallExternalReference(ref, 5, 1);
-
- // Clean up code: we pushed receiver and need to remove it.
- __ bind(&cleanup);
- __ pop(scratch2);
- }
- }
-
-
- void CompileRegular(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register scratch,
- JSObject* interceptor_holder,
- Label* miss_label) {
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallExternalReference(ref, 5, 1);
- }
-
- private:
- Register name_;
-};
-
-
-static void CompileLoadInterceptor(LoadInterceptorCompiler* compiler,
- StubCompiler* stub_compiler,
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- stub_compiler->CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- if (lookup->IsProperty() && lookup->IsCacheable()) {
- compiler->CompileCacheable(masm,
- stub_compiler,
- receiver,
- reg,
- scratch1,
- scratch2,
- holder,
- lookup,
- name,
- miss);
- } else {
- compiler->CompileRegular(masm,
- receiver,
- reg,
- scratch2,
- holder,
- miss);
- }
-}
-
-
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@@ -770,9 +596,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
- depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver,
+ interceptor_holder, scratch1,
+ scratch2, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -785,9 +611,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, name, depth2, miss);
+ if (interceptor_holder != lookup->holder()) {
+ stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+ lookup->holder(), scratch1,
+ scratch2, name, depth2, miss);
+ } else {
+ // CheckPrototypes has a side effect of fetching a 'holder'
+ // for API (object which is instanceof for the signature). It's
+ // safe to omit it here, as if present, it should be fetched
+ // by the previous CheckPrototypes.
+ ASSERT(depth2 == kInvalidProtoDepth);
+ }
// Invoke function.
if (can_do_fast_api_call) {
@@ -1015,7 +849,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
@@ -1023,18 +857,133 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
- LoadInterceptorCompiler compiler(name_reg);
- CompileLoadInterceptor(&compiler,
- this,
- masm(),
- object,
- holder,
- name,
- lookup,
- receiver,
- scratch1,
- scratch2,
- miss);
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(receiver, miss);
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->type() == FIELD) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsAccessorInfo() &&
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+ compile_followup_inline = true;
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1);
+ __ b(eq, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ // Check that the maps from interceptor's holder to lookup's holder
+ // haven't changed. And load lookup's holder into |holder| register.
+ if (interceptor_holder != lookup->holder()) {
+ holder_reg = CheckPrototypes(interceptor_holder,
+ holder_reg,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss);
+ }
+
+ if (lookup->type() == FIELD) {
+ // We found FIELD property in prototype chain of interceptor's holder.
+ // Retrieve a field from field's holder.
+ GenerateFastPropertyLoad(masm(), r0, holder_reg,
+ lookup->holder(), lookup->GetFieldIndex());
+ __ Ret();
+ } else {
+ // We found CALLBACKS property in prototype chain of interceptor's
+ // holder.
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ // Tail call to runtime.
+ // Important invariant in CALLBACKS case: the code above must be
+ // structured to never clobber |receiver| register.
+ __ Move(scratch2, Handle<AccessorInfo>(callback));
+ // holder_reg is either receiver or scratch1.
+ if (!receiver.is(holder_reg)) {
+ ASSERT(scratch1.is(holder_reg));
+ __ Push(receiver, holder_reg, scratch2);
+ __ ldr(scratch1,
+ FieldMemOperand(holder_reg, AccessorInfo::kDataOffset));
+ __ Push(scratch1, name_reg);
+ } else {
+ __ push(receiver);
+ __ ldr(scratch1,
+ FieldMemOperand(holder_reg, AccessorInfo::kDataOffset));
+ __ Push(holder_reg, scratch2, scratch1, name_reg);
+ }
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ PushInterceptorArguments(masm(), receiver, holder_reg,
+ name_reg, interceptor_holder);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
}
@@ -1070,6 +1019,12 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
@@ -1096,8 +1051,7 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
@@ -1146,8 +1100,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1196,14 +1149,33 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
+Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // TODO(722): implement this.
+ return Heap::undefined_value();
+}
+
+
+Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // TODO(722): implement this.
+ return Heap::undefined_value();
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -1280,9 +1252,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
__ b(hs, &miss);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss);
}
@@ -1301,9 +1272,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ b(ne, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- r0);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss);
}
@@ -1325,9 +1295,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ b(ne, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- r0);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss);
}
@@ -1351,8 +1320,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
}
__ bind(&miss_in_smi_check);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1398,8 +1366,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
@@ -1481,8 +1448,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
diff --git a/src/arm/virtual-frame-arm-inl.h b/src/arm/virtual-frame-arm-inl.h
index a97cde4f..6a7902af 100644
--- a/src/arm/virtual-frame-arm-inl.h
+++ b/src/arm/virtual-frame-arm-inl.h
@@ -48,6 +48,12 @@ MemOperand VirtualFrame::Receiver() {
return ParameterAt(-1);
}
+
+void VirtualFrame::Forget(int count) {
+ SpillAll();
+ LowerHeight(count);
+}
+
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_ARM_INL_H_
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 3acd2df4..334ca35d 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -40,122 +40,125 @@ namespace internal {
#define __ ACCESS_MASM(masm())
void VirtualFrame::PopToR1R0() {
- VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack is in r0 and r1.
- where_to_go.top_of_stack_state_ = R0_R1_TOS;
- MergeTo(&where_to_go);
+ MergeTOSTo(R0_R1_TOS);
// Pop the two registers off the stack so they are detached from the frame.
- element_count_ -= 2;
+ LowerHeight(2);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::PopToR1() {
- VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack is only in r1.
- where_to_go.top_of_stack_state_ = R1_TOS;
- MergeTo(&where_to_go);
+ MergeTOSTo(R1_TOS);
// Pop the register off the stack so it is detached from the frame.
- element_count_ -= 1;
+ LowerHeight(1);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::PopToR0() {
- VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack only in r0.
- where_to_go.top_of_stack_state_ = R0_TOS;
- MergeTo(&where_to_go);
+ MergeTOSTo(R0_TOS);
// Pop the register off the stack so it is detached from the frame.
- element_count_ -= 1;
+ LowerHeight(1);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
+void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
- MergeTOSTo(expected->top_of_stack_state_);
+ ASSERT(expected->IsCompatibleWith(this));
+ MergeTOSTo(expected->top_of_stack_state_, cond);
+ ASSERT(register_allocation_map_ == expected->register_allocation_map_);
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
+ if (Equals(expected)) return;
+ expected->tos_known_smi_map_ &= tos_known_smi_map_;
+ MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
void VirtualFrame::MergeTOSTo(
- VirtualFrame::TopOfStack expected_top_of_stack_state) {
+ VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
- __ pop(r0);
+ __ pop(r0, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
- __ pop(r1);
+ __ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
- __ pop(r0);
- __ pop(r1);
+ __ pop(r0, cond);
+ __ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
- __ pop(r1);
- __ pop(r0);
+ __ pop(r1, cond);
+ __ pop(r0, cond);
break;
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
- __ push(r0);
+ __ push(r0, cond);
break;
case CASE_NUMBER(R0_TOS, R0_TOS):
break;
case CASE_NUMBER(R0_TOS, R1_TOS):
- __ mov(r1, r0);
+ __ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_TOS, R0_R1_TOS):
- __ pop(r1);
+ __ pop(r1, cond);
break;
case CASE_NUMBER(R0_TOS, R1_R0_TOS):
- __ mov(r1, r0);
- __ pop(r0);
+ __ mov(r1, r0, LeaveCC, cond);
+ __ pop(r0, cond);
break;
case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
- __ push(r1);
+ __ push(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R0_TOS):
- __ mov(r0, r1);
+ __ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_TOS, R1_TOS):
break;
case CASE_NUMBER(R1_TOS, R0_R1_TOS):
- __ mov(r0, r1);
- __ pop(r1);
+ __ mov(r0, r1, LeaveCC, cond);
+ __ pop(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R1_R0_TOS):
- __ pop(r0);
+ __ pop(r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
- __ Push(r1, r0);
+ __ Push(r1, r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
- __ push(r1);
+ __ push(r1, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R1_TOS):
- __ push(r1);
- __ mov(r1, r0);
+ __ push(r1, cond);
+ __ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
break;
case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
- __ Swap(r0, r1, ip);
+ __ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
- __ Push(r0, r1);
+ __ Push(r0, r1, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
- __ push(r0);
- __ mov(r0, r1);
+ __ push(r0, cond);
+ __ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_TOS):
- __ push(r0);
+ __ push(r0, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
- __ Swap(r0, r1, ip);
+ __ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
break;
@@ -163,7 +166,16 @@ void VirtualFrame::MergeTOSTo(
UNREACHABLE();
#undef CASE_NUMBER
}
- top_of_stack_state_ = expected_top_of_stack_state;
+ // A conditional merge will be followed by a conditional branch and the
+ // fall-through code will have an unchanged virtual frame state. If the
+ // merge is unconditional ('al'ways) then it might be followed by a fall
+ // through. We need to update the virtual frame state to match the code we
+ // are falling into. The final case is an unconditional merge followed by an
+ // unconditional branch, in which case it doesn't matter what we do to the
+ // virtual frame state, because the virtual frame will be invalidated.
+ if (cond == al) {
+ top_of_stack_state_ = expected_top_of_stack_state;
+ }
}
@@ -264,7 +276,8 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
void VirtualFrame::CallJSFunction(int arg_count) {
// InvokeFunction requires function in r1.
- EmitPop(r1);
+ PopToR1();
+ SpillAll();
// +1 for receiver.
Forget(arg_count + 1);
@@ -277,7 +290,7 @@ void VirtualFrame::CallJSFunction(int arg_count) {
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- ASSERT(SpilledScope::is_spilled());
+ SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
@@ -285,6 +298,7 @@ void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+ SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
@@ -415,7 +429,7 @@ void VirtualFrame::Drop(int count) {
}
if (count == 0) return;
__ add(sp, sp, Operand(count * kPointerSize));
- element_count_ -= count;
+ LowerHeight(count);
}
@@ -425,7 +439,7 @@ void VirtualFrame::Pop() {
} else {
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
- element_count_--;
+ LowerHeight(1);
}
@@ -437,7 +451,7 @@ void VirtualFrame::EmitPop(Register reg) {
__ mov(reg, kTopRegister[top_of_stack_state_]);
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
- element_count_--;
+ LowerHeight(1);
}
@@ -545,7 +559,7 @@ void VirtualFrame::Dup() {
UNREACHABLE();
}
}
- element_count_++;
+ RaiseHeight(1, tos_known_smi_map_ & 1);
}
@@ -584,7 +598,7 @@ void VirtualFrame::Dup2() {
UNREACHABLE();
}
}
- element_count_ += 2;
+ RaiseHeight(2, tos_known_smi_map_ & 3);
}
@@ -592,7 +606,7 @@ Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
ASSERT(but_not_to_this_one.is(r0) ||
but_not_to_this_one.is(r1) ||
but_not_to_this_one.is(no_reg));
- element_count_--;
+ LowerHeight(1);
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (but_not_to_this_one.is(r0)) {
__ pop(r1);
@@ -620,9 +634,19 @@ void VirtualFrame::EnsureOneFreeTOSRegister() {
}
-void VirtualFrame::EmitPush(Register reg) {
- element_count_++;
+void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
+ RaiseHeight(1, info.IsSmi() ? 1 : 0);
+ if (reg.is(cp)) {
+ // If we are pushing cp then we are about to make a call and things have to
+ // be pushed to the physical stack. There's nothing to be gained my moving
+ // to a TOS register and then pushing that, we might as well push to the
+ // physical stack immediately.
+ MergeTOSTo(NO_TOS_REGISTERS);
+ __ push(reg);
+ return;
+ }
if (SpilledScope::is_spilled()) {
+ ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
__ push(reg);
return;
}
@@ -644,6 +668,9 @@ void VirtualFrame::EmitPush(Register reg) {
void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
+ if (this_far_down < kTOSKnownSmiMapSize) {
+ tos_known_smi_map_ &= ~(1 << this_far_down);
+ }
if (this_far_down == 0) {
Pop();
Register dest = GetTOSRegister();
@@ -684,8 +711,8 @@ Register VirtualFrame::GetTOSRegister() {
}
-void VirtualFrame::EmitPush(Operand operand) {
- element_count_++;
+void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
+ RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (SpilledScope::is_spilled()) {
__ mov(r0, operand);
__ push(r0);
@@ -697,8 +724,8 @@ void VirtualFrame::EmitPush(Operand operand) {
}
-void VirtualFrame::EmitPush(MemOperand operand) {
- element_count_++;
+void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
+ RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (SpilledScope::is_spilled()) {
__ ldr(r0, operand);
__ push(r0);
@@ -711,7 +738,7 @@ void VirtualFrame::EmitPush(MemOperand operand) {
void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
- element_count_++;
+ RaiseHeight(1, 0);
if (SpilledScope::is_spilled()) {
__ LoadRoot(r0, index);
__ push(r0);
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 9471d61e..e97ad496 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -107,14 +107,14 @@ class VirtualFrame : public ZoneObject {
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
- inline CodeGenerator* cgen();
+ inline CodeGenerator* cgen() const;
inline MacroAssembler* masm();
// The number of elements on the virtual frame.
- int element_count() { return element_count_; }
+ int element_count() const { return element_count_; }
// The height of the virtual expression stack.
- inline int height();
+ inline int height() const;
bool is_used(int num) {
switch (num) {
@@ -154,15 +154,12 @@ class VirtualFrame : public ZoneObject {
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted except to bring the
// frame to a spilled state.
- void Forget(int count) {
- SpillAll();
- element_count_ -= count;
- }
+ void Forget(int count);
// Spill all values from the frame to memory.
void SpillAll();
- void AssertIsSpilled() {
+ void AssertIsSpilled() const {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
ASSERT(register_allocation_map_ == 0);
}
@@ -184,7 +181,13 @@ class VirtualFrame : public ZoneObject {
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
- void MergeTo(VirtualFrame* expected);
+ void MergeTo(VirtualFrame* expected, Condition cond = al);
+ void MergeTo(const VirtualFrame* expected, Condition cond = al);
+
+ // Checks whether this frame can be branched to by the other frame.
+ bool IsCompatibleWith(const VirtualFrame* other) const {
+ return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
+ }
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
@@ -234,6 +237,11 @@ class VirtualFrame : public ZoneObject {
return MemOperand(sp, adjusted_index * kPointerSize);
}
+ bool KnownSmiAt(int index) {
+ if (index >= kTOSKnownSmiMapSize) return false;
+ return (tos_known_smi_map_ & (1 << index)) != 0;
+ }
+
// A frame-allocated local as an assembly operand.
inline MemOperand LocalAt(int index);
@@ -352,9 +360,9 @@ class VirtualFrame : public ZoneObject {
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
- void EmitPush(Register reg);
- void EmitPush(Operand operand);
- void EmitPush(MemOperand operand);
+ void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
+ void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
+ void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
void EmitPushRoot(Heap::RootListIndex index);
// Overwrite the nth thing on the stack. If the nth position is in a
@@ -419,6 +427,8 @@ class VirtualFrame : public ZoneObject {
int element_count_;
TopOfStack top_of_stack_state_:3;
int register_allocation_map_:kNumberOfAllocatedRegisters;
+ static const int kTOSKnownSmiMapSize = 4;
+ unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
// The index of the element that is at the processor's stack pointer
// (the sp register). For now since everything is in memory it is given
@@ -426,13 +436,13 @@ class VirtualFrame : public ZoneObject {
int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
- inline int parameter_count();
- inline int local_count();
+ inline int parameter_count() const;
+ inline int local_count() const;
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
- inline int frame_pointer();
+ inline int frame_pointer() const;
// The index of the first parameter. The receiver lies below the first
// parameter.
@@ -448,10 +458,10 @@ class VirtualFrame : public ZoneObject {
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
- inline int local0_index();
+ inline int local0_index() const;
// The index of the base of the expression stack.
- inline int expression_base_index();
+ inline int expression_base_index() const;
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
@@ -469,9 +479,28 @@ class VirtualFrame : public ZoneObject {
// Emit instructions to get the top of stack state from where we are to where
// we want to be.
- void MergeTOSTo(TopOfStack expected_state);
+ void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
+
+ inline bool Equals(const VirtualFrame* other);
- inline bool Equals(VirtualFrame* other);
+ inline void LowerHeight(int count) {
+ element_count_ -= count;
+ if (count >= kTOSKnownSmiMapSize) {
+ tos_known_smi_map_ = 0;
+ } else {
+ tos_known_smi_map_ >>= count;
+ }
+ }
+
+ inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
+ ASSERT(known_smi_map < (1u << count));
+ element_count_ += count;
+ if (count >= kTOSKnownSmiMapSize) {
+ tos_known_smi_map_ = known_smi_map;
+ } else {
+ tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
+ }
+ }
friend class JumpTarget;
};
diff --git a/src/assembler.cc b/src/assembler.cc
index 871ca86e..6a46f615 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -449,6 +449,11 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "external reference";
case RelocInfo::INTERNAL_REFERENCE:
return "internal reference";
+ case RelocInfo::DEBUG_BREAK_SLOT:
+#ifndef ENABLE_DEBUGGER_SUPPORT
+ UNREACHABLE();
+#endif
+ return "debug break slot";
case RelocInfo::NUMBER_OF_MODES:
UNREACHABLE();
return "number_of_modes";
@@ -513,6 +518,7 @@ void RelocInfo::Verify() {
case STATEMENT_POSITION:
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
+ case DEBUG_BREAK_SLOT:
case NONE:
break;
case NUMBER_OF_MODES:
diff --git a/src/assembler.h b/src/assembler.h
index 74613b34..fb75d6dc 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -118,9 +118,9 @@ class RelocInfo BASE_EMBEDDED {
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
- CODE_TARGET_CONTEXT, // code target used for contextual loads.
- DEBUG_BREAK,
- CODE_TARGET, // code target which is not any of the above.
+ CODE_TARGET_CONTEXT, // Code target used for contextual loads.
+ DEBUG_BREAK, // Code target for the debugger statement.
+ CODE_TARGET, // Code target which is not any of the above.
EMBEDDED_OBJECT,
// Everything after runtime_entry (inclusive) is not GC'ed.
@@ -129,6 +129,7 @@ class RelocInfo BASE_EMBEDDED {
COMMENT,
POSITION, // See comment for kNoPosition above.
STATEMENT_POSITION, // See comment for kNoPosition above.
+ DEBUG_BREAK_SLOT, // Additional code inserted for debug break slot.
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
@@ -174,6 +175,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsInternalReference(Mode mode) {
return mode == INTERNAL_REFERENCE;
}
+ static inline bool IsDebugBreakSlot(Mode mode) {
+ return mode == DEBUG_BREAK_SLOT;
+ }
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@@ -243,6 +247,10 @@ class RelocInfo BASE_EMBEDDED {
// with a call to the debugger.
INLINE(bool IsPatchedReturnSequence());
+ // Check whether this debug break slot has been patched with a call to the
+ // debugger.
+ INLINE(bool IsPatchedDebugBreakSlotSequence());
+
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* RelocModeName(Mode rmode);
diff --git a/src/ast.h b/src/ast.h
index a3a97341..b9a7a3dd 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1469,10 +1469,14 @@ class Conditional: public Expression {
public:
Conditional(Expression* condition,
Expression* then_expression,
- Expression* else_expression)
+ Expression* else_expression,
+ int then_expression_position,
+ int else_expression_position)
: condition_(condition),
then_expression_(then_expression),
- else_expression_(else_expression) { }
+ else_expression_(else_expression),
+ then_expression_position_(then_expression_position),
+ else_expression_position_(else_expression_position) { }
virtual void Accept(AstVisitor* v);
@@ -1482,10 +1486,15 @@ class Conditional: public Expression {
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
+ int then_expression_position() { return then_expression_position_; }
+ int else_expression_position() { return else_expression_position_; }
+
private:
Expression* condition_;
Expression* then_expression_;
Expression* else_expression_;
+ int then_expression_position_;
+ int else_expression_position_;
};
diff --git a/src/builtins.cc b/src/builtins.cc
index 9a0fbd27..7116dc90 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -305,7 +305,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
// In old space we do not use this trick to avoid dealing with
- // remembered sets.
+ // region dirty marks.
ASSERT(Heap::new_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
@@ -322,7 +322,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
former_start[to_trim] = Heap::fixed_array_map();
- former_start[to_trim + 1] = reinterpret_cast<Object*>(len - to_trim);
+ former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
ASSERT_EQ(elms->address() + to_trim * kPointerSize,
(elms + to_trim * kPointerSize)->address());
@@ -500,7 +500,7 @@ BUILTIN(ArrayShift) {
if (Heap::new_space()->Contains(elms)) {
// As elms still in the same space they used to be (new space),
- // there is no need to update remembered set.
+ // there is no need to update region dirty mark.
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
} else {
// Shift the elements.
@@ -1360,10 +1360,17 @@ static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
Debug::GenerateStubNoRegistersDebugBreak(masm);
}
+
+static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
+ Debug::GenerateSlotDebugBreak(masm);
+}
+
+
static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
Debug::GeneratePlainReturnLiveEdit(masm);
}
+
static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
Debug::GenerateFrameDropperLiveEdit(masm);
}
diff --git a/src/builtins.h b/src/builtins.h
index dd2e3cbf..1fab3754 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -127,6 +127,7 @@ enum BuiltinExtraArguments {
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK) \
+ V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK)
#else
diff --git a/src/checks.h b/src/checks.h
index cdcd18ad..c2e40ba9 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -285,14 +285,16 @@ template <int> class StaticAssertionHelper { };
#define ASSERT_RESULT(expr) CHECK(expr)
#define ASSERT(condition) CHECK(condition)
#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
-#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
+#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
+#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define STATIC_ASSERT(test) STATIC_CHECK(test)
#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
#define ASSERT_EQ(v1, v2) ((void) 0)
-#define ASSERT_NE(v1, v2) ((void) 0)
+#define ASSERT_NE(v1, v2) ((void) 0)
+#define ASSERT_GE(v1, v2) ((void) 0)
#define STATIC_ASSERT(test) ((void) 0)
#define SLOW_ASSERT(condition) ((void) 0)
#endif
diff --git a/src/codegen.cc b/src/codegen.cc
index f89399a9..686e173e 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -254,10 +254,28 @@ Handle<Code> CodeGenerator::ComputeCallInitialize(
// that it needs so we need to ensure it is generated already.
ComputeCallInitialize(argc, NOT_IN_LOOP);
}
- CALL_HEAP_FUNCTION(StubCache::ComputeCallInitialize(argc, in_loop), Code);
+ CALL_HEAP_FUNCTION(
+ StubCache::ComputeCallInitialize(argc, in_loop, Code::CALL_IC),
+ Code);
}
+Handle<Code> CodeGenerator::ComputeKeyedCallInitialize(
+ int argc,
+ InLoopFlag in_loop) {
+ if (in_loop == IN_LOOP) {
+ // Force the creation of the corresponding stub outside loops,
+ // because it may be used when clearing the ICs later - it is
+ // possible for a series of IC transitions to lose the in-loop
+ // information, and the IC clearing code can't generate a stub
+ // that it needs so we need to ensure it is generated already.
+ ComputeKeyedCallInitialize(argc, NOT_IN_LOOP);
+ }
+ CALL_HEAP_FUNCTION(
+ StubCache::ComputeCallInitialize(argc, in_loop, Code::KEYED_CALL_IC),
+ Code);
+}
+
void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
int length = declarations->length();
int globals = 0;
@@ -397,32 +415,41 @@ CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
}
-void CodeGenerator::RecordPositions(MacroAssembler* masm, int pos) {
+bool CodeGenerator::RecordPositions(MacroAssembler* masm,
+ int pos,
+ bool right_here) {
if (pos != RelocInfo::kNoPosition) {
masm->RecordStatementPosition(pos);
masm->RecordPosition(pos);
+ if (right_here) {
+ return masm->WriteRecordedPositions();
+ }
}
+ return false;
}
void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) RecordPositions(masm(), fun->start_position());
+ if (FLAG_debug_info) RecordPositions(masm(), fun->start_position(), false);
}
void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) RecordPositions(masm(), fun->end_position());
+ if (FLAG_debug_info) RecordPositions(masm(), fun->end_position(), false);
}
void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
- if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos());
+ if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos(), false);
}
+
void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) {
- if (FLAG_debug_info) RecordPositions(masm(), stmt->condition_position());
+ if (FLAG_debug_info)
+ RecordPositions(masm(), stmt->condition_position(), false);
}
+
void CodeGenerator::CodeForSourcePosition(int pos) {
if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
masm()->RecordPosition(pos);
diff --git a/src/codegen.h b/src/codegen.h
index 358c6fcc..0576fbb9 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -110,8 +110,9 @@ namespace internal {
F(ClassOf, 1, 1) \
F(ValueOf, 1, 1) \
F(SetValueOf, 2, 1) \
- F(FastCharCodeAt, 2, 1) \
- F(CharFromCode, 1, 1) \
+ F(StringCharCodeAt, 2, 1) \
+ F(StringCharFromCode, 1, 1) \
+ F(StringCharAt, 2, 1) \
F(ObjectEquals, 2, 1) \
F(Log, 3, 1) \
F(RandomHeapNumber, 0, 1) \
@@ -179,6 +180,111 @@ class CodeGeneratorScope BASE_EMBEDDED {
};
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+
+// State of used registers in a virtual frame.
+class FrameRegisterState {
+ public:
+ // Captures the current state of the given frame.
+ explicit FrameRegisterState(VirtualFrame* frame);
+
+ // Saves the state in the stack.
+ void Save(MacroAssembler* masm) const;
+
+ // Restores the state from the stack.
+ void Restore(MacroAssembler* masm) const;
+
+ private:
+ // Constants indicating special actions. They should not be multiples
+ // of kPointerSize so they will not collide with valid offsets from
+ // the frame pointer.
+ static const int kIgnore = -1;
+ static const int kPush = 1;
+
+ // This flag is ored with a valid offset from the frame pointer, so
+ // it should fit in the low zero bits of a valid offset.
+ static const int kSyncedFlag = 2;
+
+ int registers_[RegisterAllocator::kNumRegisters];
+};
+
+#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+
+
+class FrameRegisterState {
+ public:
+ inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
+
+ inline const VirtualFrame* frame() const { return &frame_; }
+
+ private:
+ VirtualFrame frame_;
+};
+
+#else
+
+#error Unsupported target architecture.
+
+#endif
+
+
+// Helper interface to prepare to/restore after making runtime calls.
+class RuntimeCallHelper {
+ public:
+ virtual ~RuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const = 0;
+
+ virtual void AfterCall(MacroAssembler* masm) const = 0;
+
+ protected:
+ RuntimeCallHelper() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
+};
+
+
+// RuntimeCallHelper implementation that saves/restores state of a
+// virtual frame.
+class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
+ public:
+ // Does not take ownership of |frame_state|.
+ explicit VirtualFrameRuntimeCallHelper(const FrameRegisterState* frame_state)
+ : frame_state_(frame_state) {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const;
+
+ virtual void AfterCall(MacroAssembler* masm) const;
+
+ private:
+ const FrameRegisterState* frame_state_;
+};
+
+
+// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
+// newly created internal frame before/after the runtime call.
+class ICRuntimeCallHelper : public RuntimeCallHelper {
+ public:
+ ICRuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const;
+
+ virtual void AfterCall(MacroAssembler* masm) const;
+};
+
+
+// Trivial RuntimeCallHelper implementation.
+class NopRuntimeCallHelper : public RuntimeCallHelper {
+ public:
+ NopRuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const {}
+
+ virtual void AfterCall(MacroAssembler* masm) const {}
+};
+
+
// Deferred code objects are small pieces of code that are compiled
// out of line. They are used to defer the compilation of uncommon
// paths thereby avoiding expensive jumps around uncommon code parts.
@@ -209,6 +315,8 @@ class DeferredCode: public ZoneObject {
inline void Branch(Condition cc);
void BindExit() { masm_->bind(&exit_label_); }
+ const FrameRegisterState* frame_state() const { return &frame_state_; }
+
void SaveRegisters();
void RestoreRegisters();
@@ -216,28 +324,13 @@ class DeferredCode: public ZoneObject {
MacroAssembler* masm_;
private:
- // Constants indicating special actions. They should not be multiples
- // of kPointerSize so they will not collide with valid offsets from
- // the frame pointer.
- static const int kIgnore = -1;
- static const int kPush = 1;
-
- // This flag is ored with a valid offset from the frame pointer, so
- // it should fit in the low zero bits of a valid offset.
- static const int kSyncedFlag = 2;
-
int statement_position_;
int position_;
Label entry_label_;
Label exit_label_;
- // C++ doesn't allow zero length arrays, so we make the array length 1 even
- // if we don't need it.
- static const int kRegistersArrayLength =
- (RegisterAllocator::kNumRegisters == 0) ?
- 1 : RegisterAllocator::kNumRegisters;
- int registers_[kRegistersArrayLength];
+ FrameRegisterState frame_state_;
#ifdef DEBUG
const char* comment_;
@@ -611,6 +704,163 @@ class ToBooleanStub: public CodeStub {
};
+enum StringIndexFlags {
+ // Accepts smis or heap numbers.
+ STRING_INDEX_IS_NUMBER,
+
+ // Accepts smis or heap numbers that are valid array indices
+ // (ECMA-262 15.4). Invalid indices are reported as being out of
+ // range.
+ STRING_INDEX_IS_ARRAY_INDEX
+};
+
+
+// Generates code implementing String.prototype.charCodeAt.
+//
+// Only supports the case when the receiver is a string and the index
+// is a number (smi or heap number) that is a valid index into the
+// string. Additional index constraints are specified by the
+// flags. Otherwise, bails out to the provided labels.
+//
+// Register usage: |object| may be changed to another string in a way
+// that doesn't affect charCodeAt/charAt semantics, |index| is
+// preserved, |scratch| and |result| are clobbered.
+class StringCharCodeAtGenerator {
+ public:
+ StringCharCodeAtGenerator(Register object,
+ Register index,
+ Register scratch,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_number,
+ Label* index_out_of_range,
+ StringIndexFlags index_flags)
+ : object_(object),
+ index_(index),
+ scratch_(scratch),
+ result_(result),
+ receiver_not_string_(receiver_not_string),
+ index_not_number_(index_not_number),
+ index_out_of_range_(index_out_of_range),
+ index_flags_(index_flags) {
+ ASSERT(!scratch_.is(object_));
+ ASSERT(!scratch_.is(index_));
+ ASSERT(!scratch_.is(result_));
+ ASSERT(!result_.is(object_));
+ ASSERT(!result_.is(index_));
+ }
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ Register object_;
+ Register index_;
+ Register scratch_;
+ Register result_;
+
+ Label* receiver_not_string_;
+ Label* index_not_number_;
+ Label* index_out_of_range_;
+
+ StringIndexFlags index_flags_;
+
+ Label call_runtime_;
+ Label index_not_smi_;
+ Label got_smi_index_;
+ Label exit_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
+};
+
+
+// Generates code for creating a one-char string from a char code.
+class StringCharFromCodeGenerator {
+ public:
+ StringCharFromCodeGenerator(Register code,
+ Register result)
+ : code_(code),
+ result_(result) {
+ ASSERT(!code_.is(result_));
+ }
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ Register code_;
+ Register result_;
+
+ Label slow_case_;
+ Label exit_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
+};
+
+
+// Generates code implementing String.prototype.charAt.
+//
+// Only supports the case when the receiver is a string and the index
+// is a number (smi or heap number) that is a valid index into the
+// string. Additional index constraints are specified by the
+// flags. Otherwise, bails out to the provided labels.
+//
+// Register usage: |object| may be changed to another string in a way
+// that doesn't affect charCodeAt/charAt semantics, |index| is
+// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
+class StringCharAtGenerator {
+ public:
+ StringCharAtGenerator(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_number,
+ Label* index_out_of_range,
+ StringIndexFlags index_flags)
+ : char_code_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ receiver_not_string,
+ index_not_number,
+ index_out_of_range,
+ index_flags),
+ char_from_code_generator_(scratch2, result) {}
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ StringCharCodeAtGenerator char_code_at_generator_;
+ StringCharFromCodeGenerator char_from_code_generator_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
+};
+
+
} // namespace internal
} // namespace v8
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index cec10fd2..14252a58 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -79,6 +79,8 @@ class CompilationSubCache {
// young generation.
void Age();
+ bool HasFunction(SharedFunctionInfo* function_info);
+
// GC support.
void Iterate(ObjectVisitor* v);
@@ -204,6 +206,27 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
}
+bool CompilationSubCache::HasFunction(SharedFunctionInfo* function_info) {
+ if (function_info->script()->IsUndefined() ||
+ Script::cast(function_info->script())->source()->IsUndefined()) {
+ return false;
+ }
+
+ String* source =
+ String::cast(Script::cast(function_info->script())->source());
+ // Check all generations.
+ for (int generation = 0; generation < generations(); generation++) {
+ if (tables_[generation]->IsUndefined()) continue;
+
+ CompilationCacheTable* table =
+ CompilationCacheTable::cast(tables_[generation]);
+ Object* object = table->Lookup(source);
+ if (object->IsSharedFunctionInfo()) return true;
+ }
+ return false;
+}
+
+
void CompilationSubCache::Age() {
// Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) {
@@ -506,6 +529,11 @@ void CompilationCache::Clear() {
}
+bool CompilationCache::HasFunction(SharedFunctionInfo* function_info) {
+ return script.HasFunction(function_info);
+}
+
+
void CompilationCache::Iterate(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches[i]->Iterate(v);
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 6358a260..583f04c4 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -79,6 +79,9 @@ class CompilationCache {
// Clear the cache - also used to initialize the cache at startup.
static void Clear();
+
+ static bool HasFunction(SharedFunctionInfo* function_info);
+
// GC support.
static void Iterate(ObjectVisitor* v);
diff --git a/src/compiler.cc b/src/compiler.cc
index ca92ed92..ebb97435 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -601,6 +601,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
function_info->set_try_full_codegen(lit->try_full_codegen());
+ function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
}
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 31c4658e..c8d29f8c 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -31,6 +31,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
+#include "frames-inl.h"
#include "log-inl.h"
#include "../include/v8-profiler.h"
@@ -49,7 +50,8 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
- enqueue_order_(0) { }
+ enqueue_order_(0) {
+}
void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
@@ -181,6 +183,24 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent(
}
+void ProfilerEventsProcessor::AddCurrentStack() {
+ TickSampleEventRecord record;
+ TickSample* sample = &record.sample;
+ sample->state = VMState::current_state();
+ sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
+ sample->frames_count = 0;
+ for (StackTraceFrameIterator it;
+ !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
+ it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ sample->stack[sample->frames_count++] =
+ reinterpret_cast<Address>(frame->function());
+ }
+ record.order = enqueue_order_;
+ ticks_from_vm_buffer_.Enqueue(record);
+}
+
+
bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
if (!events_buffer_.IsEmpty()) {
CodeEventsContainer record;
@@ -205,9 +225,16 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
while (true) {
+ if (!ticks_from_vm_buffer_.IsEmpty()
+ && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
+ TickSampleEventRecord record;
+ ticks_from_vm_buffer_.Dequeue(&record);
+ generator_->RecordTickSample(record.sample);
+ }
+
const TickSampleEventRecord* rec =
TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
- if (rec == NULL) return false;
+ if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
if (rec->order == dequeue_order) {
generator_->RecordTickSample(rec->sample);
ticks_buffer_.FinishDequeue();
@@ -267,7 +294,8 @@ CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
int CpuProfiler::GetProfilesCount() {
ASSERT(singleton_ != NULL);
// The count of profiles doesn't depend on a security token.
- return singleton_->profiles_->Profiles(CodeEntry::kNoSecurityToken)->length();
+ return singleton_->profiles_->Profiles(
+ TokenEnumerator::kNoSecurityToken)->length();
}
@@ -353,7 +381,7 @@ void CpuProfiler::CodeDeleteEvent(Address from) {
void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
- int security_token_id = CodeEntry::kNoSecurityToken;
+ int security_token_id = TokenEnumerator::kNoSecurityToken;
if (function->unchecked_context()->IsContext()) {
security_token_id = singleton_->token_enumerator_->GetTokenId(
function->context()->global_context()->security_token());
@@ -416,13 +444,12 @@ void CpuProfiler::StartCollectingProfile(const char* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted();
}
+ processor_->AddCurrentStack();
}
void CpuProfiler::StartCollectingProfile(String* title) {
- if (profiles_->StartProfiling(title, next_profile_uid_++)) {
- StartProcessorIfNotStarted();
- }
+ StartCollectingProfile(profiles_->GetName(title));
}
@@ -434,10 +461,6 @@ void CpuProfiler::StartProcessorIfNotStarted() {
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
processor_->Start();
- // Enable stack sampling.
- // It is important to have it started prior to logging, see issue 683:
- // http://code.google.com/p/v8/issues/detail?id=683
- reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
// Enumerate stuff we already have in the heap.
if (Heap::HasBeenSetup()) {
Logger::LogCodeObjects();
@@ -445,6 +468,8 @@ void CpuProfiler::StartProcessorIfNotStarted() {
Logger::LogFunctionObjects();
Logger::LogAccessorCallbacks();
}
+ // Enable stack sampling.
+ reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
}
}
@@ -452,9 +477,10 @@ void CpuProfiler::StartProcessorIfNotStarted() {
CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile();
- CpuProfile* result = profiles_->StopProfiling(CodeEntry::kNoSecurityToken,
- title,
- actual_sampling_rate);
+ CpuProfile* result =
+ profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
+ title,
+ actual_sampling_rate);
if (result != NULL) {
result->Print();
}
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 81f9ae39..03b81764 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -105,6 +105,11 @@ class CodeAliasEventRecord : public CodeEventRecord {
class TickSampleEventRecord BASE_EMBEDDED {
public:
+ TickSampleEventRecord()
+ : filler(1) {
+ ASSERT(filler != SamplingCircularQueue::kClear);
+ }
+
// The first machine word of a TickSampleEventRecord must not ever
// become equal to SamplingCircularQueue::kClear. As both order and
// TickSample's first field are not reliable in this sense (order
@@ -119,9 +124,6 @@ class TickSampleEventRecord BASE_EMBEDDED {
}
INLINE(static TickSampleEventRecord* init(void* value));
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(TickSampleEventRecord);
};
@@ -159,6 +161,8 @@ class ProfilerEventsProcessor : public Thread {
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
Address start, unsigned size);
+ // Puts current stack into tick sample events buffer.
+ void AddCurrentStack();
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
@@ -184,6 +188,7 @@ class ProfilerEventsProcessor : public Thread {
bool running_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
+ UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
};
diff --git a/src/d8.cc b/src/d8.cc
index a69320a2..7fd7925b 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -576,6 +576,9 @@ Handle<String> Shell::ReadFile(const char* name) {
void Shell::RunShell() {
LineEditor* editor = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), editor->name());
+ if (i::FLAG_debugger) {
+ printf("JavaScript debugger enabled\n");
+ }
editor->Open();
while (true) {
Locker locker;
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 4e7620ac..55d85825 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -318,6 +318,9 @@ Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
Variable* loop_var = init->target()->AsVariableProxy()->AsVariable();
if (loop_var == NULL || !loop_var->IsStackAllocated()) return NULL;
+ // Don't try to get clever with const or dynamic variables.
+ if (loop_var->mode() != Variable::VAR) return NULL;
+
// The initial value has to be a smi.
Literal* init_lit = init->value()->AsLiteral();
if (init_lit == NULL || !init_lit->handle()->IsSmi()) return NULL;
diff --git a/src/debug.cc b/src/debug.cc
index 8cb95efd..98e366c7 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -62,13 +62,14 @@ static void PrintLn(v8::Local<v8::Value> value) {
}
-static Handle<Code> ComputeCallDebugBreak(int argc) {
- CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugBreak(argc), Code);
+static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
+ CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugBreak(argc, kind), Code);
}
-static Handle<Code> ComputeCallDebugPrepareStepIn(int argc) {
- CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugPrepareStepIn(argc), Code);
+static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
+ CALL_HEAP_FUNCTION(
+ StubCache::ComputeCallDebugPrepareStepIn(argc, kind), Code);
}
@@ -128,10 +129,14 @@ void BreakLocationIterator::Next() {
ASSERT(statement_position_ >= 0);
}
- // Check for breakable code target. Look in the original code as setting
- // break points can cause the code targets in the running (debugged) code to
- // be of a different kind than in the original code.
- if (RelocInfo::IsCodeTarget(rmode())) {
+ if (IsDebugBreakSlot()) {
+ // There is always a possible break point at a debug break slot.
+ break_point_++;
+ return;
+ } else if (RelocInfo::IsCodeTarget(rmode())) {
+ // Check for breakable code target. Look in the original code as setting
+ // break points can cause the code targets in the running (debugged) code
+ // to be of a different kind than in the original code.
Address target = original_rinfo()->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
if ((code->is_inline_cache_stub() &&
@@ -328,6 +333,9 @@ void BreakLocationIterator::SetDebugBreak() {
if (RelocInfo::IsJSReturn(rmode())) {
// Patch the frame exit code with a break point.
SetDebugBreakAtReturn();
+ } else if (IsDebugBreakSlot()) {
+ // Patch the code in the break slot.
+ SetDebugBreakAtSlot();
} else {
// Patch the IC call.
SetDebugBreakAtIC();
@@ -345,6 +353,9 @@ void BreakLocationIterator::ClearDebugBreak() {
if (RelocInfo::IsJSReturn(rmode())) {
// Restore the frame exit code.
ClearDebugBreakAtReturn();
+ } else if (IsDebugBreakSlot()) {
+ // Restore the code in the break slot.
+ ClearDebugBreakAtSlot();
} else {
// Patch the IC call.
ClearDebugBreakAtIC();
@@ -360,13 +371,14 @@ void BreakLocationIterator::PrepareStepIn() {
// construct call or CallFunction stub call.
Address target = rinfo()->target_address();
Handle<Code> code(Code::GetCodeFromTargetAddress(target));
- if (code->is_call_stub()) {
+ if (code->is_call_stub() || code->is_keyed_call_stub()) {
// Step in through IC call is handled by the runtime system. Therefore make
// sure that the any current IC is cleared and the runtime system is
// called. If the executing code has a debug break at the location change
// the call in the original code as it is the code there that will be
// executed in place of the debug break call.
- Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count());
+ Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
+ code->kind());
if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry());
} else {
@@ -415,6 +427,8 @@ bool BreakLocationIterator::HasBreakPoint() {
bool BreakLocationIterator::IsDebugBreak() {
if (RelocInfo::IsJSReturn(rmode())) {
return IsDebugBreakAtReturn();
+ } else if (IsDebugBreakSlot()) {
+ return IsDebugBreakAtSlot();
} else {
return Debug::IsDebugBreak(rinfo()->target_address());
}
@@ -476,6 +490,11 @@ bool BreakLocationIterator::IsDebuggerStatement() {
}
+bool BreakLocationIterator::IsDebugBreakSlot() {
+ return RelocInfo::DEBUG_BREAK_SLOT == rmode();
+}
+
+
Object* BreakLocationIterator::BreakPointObjects() {
return debug_info_->GetBreakPointObjects(code_position());
}
@@ -571,6 +590,7 @@ bool Debug::break_on_uncaught_exception_ = true;
Handle<Context> Debug::debug_context_ = Handle<Context>();
Code* Debug::debug_break_return_ = NULL;
+Code* Debug::debug_break_slot_ = NULL;
void ScriptCache::Add(Handle<Script> script) {
@@ -654,6 +674,10 @@ void Debug::Setup(bool create_heap_objects) {
debug_break_return_ =
Builtins::builtin(Builtins::Return_DebugBreak);
ASSERT(debug_break_return_->IsCode());
+ // Get code to handle debug break in debug break slots.
+ debug_break_slot_ =
+ Builtins::builtin(Builtins::Slot_DebugBreak);
+ ASSERT(debug_break_slot_->IsCode());
}
}
@@ -822,6 +846,7 @@ void Debug::PreemptionWhileInDebugger() {
void Debug::Iterate(ObjectVisitor* v) {
v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_return_)));
+ v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_slot_)));
}
@@ -1187,7 +1212,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
Address target = it.rinfo()->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
- if (code->is_call_stub()) {
+ if (code->is_call_stub() || code->is_keyed_call_stub()) {
is_call_target = true;
}
if (code->is_inline_cache_stub()) {
@@ -1373,7 +1398,8 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
if (code->is_inline_cache_stub()) {
switch (code->kind()) {
case Code::CALL_IC:
- return ComputeCallDebugBreak(code->arguments_count());
+ case Code::KEYED_CALL_IC:
+ return ComputeCallDebugBreak(code->arguments_count(), code->kind());
case Code::LOAD_IC:
return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
@@ -1628,16 +1654,21 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// break point is still active after processing the break point.
Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
- // Check if the location is at JS exit.
+ // Check if the location is at JS exit or debug break slot.
bool at_js_return = false;
bool break_at_js_return_active = false;
+ bool at_debug_break_slot = false;
RelocIterator it(debug_info->code());
- while (!it.done()) {
+ while (!it.done() && !at_js_return && !at_debug_break_slot) {
if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
at_js_return = (it.rinfo()->pc() ==
addr - Assembler::kPatchReturnSequenceAddressOffset);
break_at_js_return_active = it.rinfo()->IsPatchedReturnSequence();
}
+ if (RelocInfo::IsDebugBreakSlot(it.rinfo()->rmode())) {
+ at_debug_break_slot = (it.rinfo()->pc() ==
+ addr - Assembler::kPatchDebugBreakSlotAddressOffset);
+ }
it.next();
}
@@ -1654,29 +1685,68 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Move back to where the call instruction sequence started.
thread_local_.after_break_target_ =
addr - Assembler::kPatchReturnSequenceAddressOffset;
- } else {
- // Check if there still is a debug break call at the target address. If the
- // break point has been removed it will have disappeared. If it have
- // disappeared don't try to look in the original code as the running code
- // will have the right address. This takes care of the case where the last
- // break point is removed from the function and therefore no "original code"
- // is available. If the debug break call is still there find the address in
- // the original code.
- if (IsDebugBreak(Assembler::target_address_at(addr))) {
- // If the break point is still there find the call address which was
- // overwritten in the original code by the call to DebugBreakXXX.
-
- // Find the corresponding address in the original code.
- addr += original_code->instruction_start() - code->instruction_start();
- }
+ } else if (at_debug_break_slot) {
+ // Address of where the debug break slot starts.
+ addr = addr - Assembler::kPatchDebugBreakSlotAddressOffset;
+
+ // Continue just after the slot.
+ thread_local_.after_break_target_ = addr + Assembler::kDebugBreakSlotLength;
+ } else if (IsDebugBreak(Assembler::target_address_at(addr))) {
+ // We now know that there is still a debug break call at the target address,
+ // so the break point is still there and the original code will hold the
+ // address to jump to in order to complete the call which is replaced by a
+ // call to DebugBreakXXX.
+
+ // Find the corresponding address in the original code.
+ addr += original_code->instruction_start() - code->instruction_start();
// Install jump to the call address in the original code. This will be the
// call which was overwritten by the call to DebugBreakXXX.
thread_local_.after_break_target_ = Assembler::target_address_at(addr);
+ } else {
+ // There is no longer a break point present. Don't try to look in the
+ // original code as the running code will have the right address. This takes
+ // care of the case where the last break point is removed from the function
+ // and therefore no "original code" is available.
+ thread_local_.after_break_target_ = Assembler::target_address_at(addr);
}
}
+bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
+ HandleScope scope;
+
+ // Get the executing function in which the debug break occurred.
+ Handle<SharedFunctionInfo> shared =
+ Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+ if (!EnsureDebugInfo(shared)) {
+ // Return if we failed to retrieve the debug info.
+ return false;
+ }
+ Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+ Handle<Code> code(debug_info->code());
+#ifdef DEBUG
+ // Get the code which is actually executing.
+ Handle<Code> frame_code(frame->code());
+ ASSERT(frame_code.is_identical_to(code));
+#endif
+
+ // Find the call address in the running code.
+ Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
+
+ // Check if the location is at JS return.
+ RelocIterator it(debug_info->code());
+ while (!it.done()) {
+ if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
+ return (it.rinfo()->pc() ==
+ addr - Assembler::kPatchReturnSequenceAddressOffset);
+ }
+ it.next();
+ }
+ return false;
+}
+
+
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id) {
thread_local_.frames_are_dropped_ = true;
thread_local_.break_frame_id_ = new_break_frame_id;
diff --git a/src/debug.h b/src/debug.h
index e2eecb8b..1c674711 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -146,6 +146,11 @@ class BreakLocationIterator {
void SetDebugBreakAtReturn();
void ClearDebugBreakAtReturn();
+ bool IsDebugBreakSlot();
+ bool IsDebugBreakAtSlot();
+ void SetDebugBreakAtSlot();
+ void ClearDebugBreakAtSlot();
+
DISALLOW_COPY_AND_ASSIGN(BreakLocationIterator);
};
@@ -265,6 +270,9 @@ class Debug {
// Check whether a global object is the debug global object.
static bool IsDebugGlobal(GlobalObject* global);
+ // Check whether this frame is just about to return.
+ static bool IsBreakAtReturn(JavaScriptFrame* frame);
+
// Fast check to see if any break points are active.
inline static bool has_break_points() { return has_break_points_; }
@@ -323,6 +331,7 @@ class Debug {
enum AddressId {
k_after_break_target_address,
k_debug_break_return_address,
+ k_debug_break_slot_address,
k_register_address
};
@@ -342,6 +351,12 @@ class Debug {
return &debug_break_return_;
}
+ // Access to the debug break in debug break slot code.
+ static Code* debug_break_slot() { return debug_break_slot_; }
+ static Code** debug_break_slot_address() {
+ return &debug_break_slot_;
+ }
+
static const int kEstimatedNofDebugInfoEntries = 16;
static const int kEstimatedNofBreakPointsInFunction = 16;
@@ -370,6 +385,7 @@ class Debug {
static void AfterGarbageCollection();
// Code generator routines.
+ static void GenerateSlot(MacroAssembler* masm);
static void GenerateLoadICDebugBreak(MacroAssembler* masm);
static void GenerateStoreICDebugBreak(MacroAssembler* masm);
static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
@@ -377,6 +393,7 @@ class Debug {
static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
static void GenerateReturnDebugBreak(MacroAssembler* masm);
static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
+ static void GenerateSlotDebugBreak(MacroAssembler* masm);
static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
@@ -472,6 +489,9 @@ class Debug {
// Code to call for handling debug break on return.
static Code* debug_break_return_;
+ // Code to call for handling debug break in debug break slots.
+ static Code* debug_break_slot_;
+
DISALLOW_COPY_AND_ASSIGN(Debug);
};
@@ -895,6 +915,8 @@ class Debug_Address {
return reinterpret_cast<Address>(Debug::after_break_target_address());
case Debug::k_debug_break_return_address:
return reinterpret_cast<Address>(Debug::debug_break_return_address());
+ case Debug::k_debug_break_slot_address:
+ return reinterpret_cast<Address>(Debug::debug_break_slot_address());
case Debug::k_register_address:
return reinterpret_cast<Address>(Debug::register_address(reg_));
default:
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 8473cd9f..19cb6af7 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -246,7 +246,7 @@ static int DecodeIt(FILE* f,
if (code->ic_in_loop() == IN_LOOP) {
out.AddFormatted(", in_loop");
}
- if (kind == Code::CALL_IC) {
+ if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());
}
} else if (kind == Code::STUB) {
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index c086df4c..91477f9a 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -191,6 +191,8 @@ DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
+DEFINE_bool(flush_code, false,
+ "flush code that we expect not to use again before full gc")
// v8.cc
DEFINE_bool(use_idle_notification, true,
@@ -277,7 +279,7 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
DEFINE_bool(help, false, "Print usage message, including flags, on console")
DEFINE_bool(dump_counters, false, "Dump counters on exit")
-DEFINE_bool(debugger, true, "Enable JavaScript debugger")
+DEFINE_bool(debugger, false, "Enable JavaScript debugger")
DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
"debugger agent in another process")
DEFINE_bool(debugger_agent, false, "Enable debugger agent")
@@ -333,7 +335,6 @@ DEFINE_bool(code_stats, false, "report code statistics after GC")
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
DEFINE_bool(print_handles, false, "report handles after GC")
DEFINE_bool(print_global_handles, false, "report global handles after GC")
-DEFINE_bool(print_rset, false, "print remembered sets before GC")
// ic.cc
DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 2ccbca87..b64a1790 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -439,6 +439,231 @@ void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
#undef CHECK_BAILOUT
+void BreakableStatementChecker::Check(Statement* stmt) {
+ Visit(stmt);
+}
+
+
+void BreakableStatementChecker::Check(Expression* expr) {
+ Visit(expr);
+}
+
+
+void BreakableStatementChecker::VisitDeclaration(Declaration* decl) {
+}
+
+
+void BreakableStatementChecker::VisitBlock(Block* stmt) {
+}
+
+
+void BreakableStatementChecker::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
+ // Check if expression is breakable.
+ Visit(stmt->expression());
+}
+
+
+void BreakableStatementChecker::VisitEmptyStatement(EmptyStatement* stmt) {
+}
+
+
+void BreakableStatementChecker::VisitIfStatement(IfStatement* stmt) {
+ // If the condition is breakable the if statement is breakable.
+ Visit(stmt->condition());
+}
+
+
+void BreakableStatementChecker::VisitContinueStatement(
+ ContinueStatement* stmt) {
+}
+
+
+void BreakableStatementChecker::VisitBreakStatement(BreakStatement* stmt) {
+}
+
+
+void BreakableStatementChecker::VisitReturnStatement(ReturnStatement* stmt) {
+ // Return is breakable if the expression is.
+ Visit(stmt->expression());
+}
+
+
+void BreakableStatementChecker::VisitWithEnterStatement(
+ WithEnterStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void BreakableStatementChecker::VisitWithExitStatement(
+ WithExitStatement* stmt) {
+}
+
+
+void BreakableStatementChecker::VisitSwitchStatement(SwitchStatement* stmt) {
+ // Switch statements breakable if the tag expression is.
+ Visit(stmt->tag());
+}
+
+
+void BreakableStatementChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ // Mark do while as breakable to avoid adding a break slot in front of it.
+ is_breakable_ = true;
+}
+
+
+void BreakableStatementChecker::VisitWhileStatement(WhileStatement* stmt) {
+ // Mark while statements breakable if the condition expression is.
+ Visit(stmt->cond());
+}
+
+
+void BreakableStatementChecker::VisitForStatement(ForStatement* stmt) {
+ // Mark for statements breakable if the condition expression is.
+ if (stmt->cond() != NULL) {
+ Visit(stmt->cond());
+ }
+}
+
+
+void BreakableStatementChecker::VisitForInStatement(ForInStatement* stmt) {
+ // Mark for in statements breakable if the enumerable expression is.
+ Visit(stmt->enumerable());
+}
+
+
+void BreakableStatementChecker::VisitTryCatchStatement(
+ TryCatchStatement* stmt) {
+ // Mark try catch as breakable to avoid adding a break slot in front of it.
+ is_breakable_ = true;
+}
+
+
+void BreakableStatementChecker::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
+ // Mark try finally as breakable to avoid adding a break slot in front of it.
+ is_breakable_ = true;
+}
+
+
+void BreakableStatementChecker::VisitDebuggerStatement(
+ DebuggerStatement* stmt) {
+ // The debugger statement is breakable.
+ is_breakable_ = true;
+}
+
+
+void BreakableStatementChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
+}
+
+
+void BreakableStatementChecker::VisitSharedFunctionInfoLiteral(
+ SharedFunctionInfoLiteral* expr) {
+}
+
+
+void BreakableStatementChecker::VisitConditional(Conditional* expr) {
+}
+
+
+void BreakableStatementChecker::VisitSlot(Slot* expr) {
+}
+
+
+void BreakableStatementChecker::VisitVariableProxy(VariableProxy* expr) {
+}
+
+
+void BreakableStatementChecker::VisitLiteral(Literal* expr) {
+}
+
+
+void BreakableStatementChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
+}
+
+
+void BreakableStatementChecker::VisitObjectLiteral(ObjectLiteral* expr) {
+}
+
+
+void BreakableStatementChecker::VisitArrayLiteral(ArrayLiteral* expr) {
+}
+
+
+void BreakableStatementChecker::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+}
+
+
+void BreakableStatementChecker::VisitAssignment(Assignment* expr) {
+ // If assigning to a property (including a global property) the assignment is
+ // breakable.
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->target()->AsProperty();
+ if (prop != NULL || (var != NULL && var->is_global())) {
+ is_breakable_ = true;
+ return;
+ }
+
+ // Otherwise the assignment is breakable if the assigned value is.
+ Visit(expr->value());
+}
+
+
+void BreakableStatementChecker::VisitThrow(Throw* expr) {
+ // Throw is breakable if the expression is.
+ Visit(expr->exception());
+}
+
+
+void BreakableStatementChecker::VisitProperty(Property* expr) {
+ // Property load is breakable.
+ is_breakable_ = true;
+}
+
+
+void BreakableStatementChecker::VisitCall(Call* expr) {
+ // Function calls both through IC and call stub are breakable.
+ is_breakable_ = true;
+}
+
+
+void BreakableStatementChecker::VisitCallNew(CallNew* expr) {
+ // Function calls through new are breakable.
+ is_breakable_ = true;
+}
+
+
+void BreakableStatementChecker::VisitCallRuntime(CallRuntime* expr) {
+}
+
+
+void BreakableStatementChecker::VisitUnaryOperation(UnaryOperation* expr) {
+ Visit(expr->expression());
+}
+
+
+void BreakableStatementChecker::VisitCountOperation(CountOperation* expr) {
+ Visit(expr->expression());
+}
+
+
+void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
+ Visit(expr->left());
+ Visit(expr->right());
+}
+
+
+void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
+ Visit(expr->left());
+ Visit(expr->right());
+}
+
+
+void BreakableStatementChecker::VisitThisFunction(ThisFunction* expr) {
+}
+
+
#define __ ACCESS_MASM(masm())
Handle<Code> FullCodeGenerator::MakeCode(CompilationInfo* info) {
@@ -552,7 +777,60 @@ void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
if (FLAG_debug_info) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (!Debugger::IsDebuggerActive()) {
+ CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ } else {
+ // Check if the statement will be breakable without adding a debug break
+ // slot.
+ BreakableStatementChecker checker;
+ checker.Check(stmt);
+ // Record the statement position right here if the statement is not
+ // breakable. For breakable statements the actual recording of the
+ // position will be postponed to the breakable code (typically an IC).
+ bool position_recorded = CodeGenerator::RecordPositions(
+ masm_, stmt->statement_pos(), !checker.is_breakable());
+ // If the position recording did record a new position generate a debug
+ // break slot to make the statement breakable.
+ if (position_recorded) {
+ Debug::GenerateSlot(masm_);
+ }
+ }
+#else
CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+#endif
+ }
+}
+
+
+void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
+ if (FLAG_debug_info) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (!Debugger::IsDebuggerActive()) {
+ CodeGenerator::RecordPositions(masm_, pos);
+ } else {
+ // Check if the expression will be breakable without adding a debug break
+ // slot.
+ BreakableStatementChecker checker;
+ checker.Check(expr);
+ // Record a statement position right here if the expression is not
+ // breakable. For breakable expressions the actual recording of the
+ // position will be postponed to the breakable code (typically an IC).
+ // NOTE this will record a statement position for something which might
+ // not be a statement. As stepping in the debugger will only stop at
+ // statement positions this is used for e.g. the condition expression of
+ // a do while loop.
+ bool position_recorded = CodeGenerator::RecordPositions(
+ masm_, pos, !checker.is_breakable());
+ // If the position recording did record a new position generate a debug
+ // break slot to make the statement breakable.
+ if (position_recorded) {
+ Debug::GenerateSlot(masm_);
+ }
+ }
+#else
+ CodeGenerator::RecordPositions(masm_, pos);
+#endif
}
}
@@ -571,6 +849,78 @@ void FullCodeGenerator::SetSourcePosition(int pos) {
}
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+ Handle<String> name = expr->name();
+ if (strcmp("_IsSmi", *name->ToCString()) == 0) {
+ EmitIsSmi(expr->arguments());
+ } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
+ EmitIsNonNegativeSmi(expr->arguments());
+ } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
+ EmitIsObject(expr->arguments());
+ } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
+ EmitIsUndetectableObject(expr->arguments());
+ } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
+ EmitIsFunction(expr->arguments());
+ } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
+ EmitIsArray(expr->arguments());
+ } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
+ EmitIsRegExp(expr->arguments());
+ } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
+ EmitIsConstructCall(expr->arguments());
+ } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
+ EmitObjectEquals(expr->arguments());
+ } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
+ EmitArguments(expr->arguments());
+ } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
+ EmitArgumentsLength(expr->arguments());
+ } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
+ EmitClassOf(expr->arguments());
+ } else if (strcmp("_Log", *name->ToCString()) == 0) {
+ EmitLog(expr->arguments());
+ } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
+ EmitRandomHeapNumber(expr->arguments());
+ } else if (strcmp("_SubString", *name->ToCString()) == 0) {
+ EmitSubString(expr->arguments());
+ } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
+ EmitRegExpExec(expr->arguments());
+ } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
+ EmitValueOf(expr->arguments());
+ } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
+ EmitSetValueOf(expr->arguments());
+ } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
+ EmitNumberToString(expr->arguments());
+ } else if (strcmp("_StringCharFromCode", *name->ToCString()) == 0) {
+ EmitStringCharFromCode(expr->arguments());
+ } else if (strcmp("_StringCharCodeAt", *name->ToCString()) == 0) {
+ EmitStringCharCodeAt(expr->arguments());
+ } else if (strcmp("_StringCharAt", *name->ToCString()) == 0) {
+ EmitStringCharAt(expr->arguments());
+ } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
+ EmitStringAdd(expr->arguments());
+ } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
+ EmitStringCompare(expr->arguments());
+ } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
+ EmitMathPow(expr->arguments());
+ } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
+ EmitMathSin(expr->arguments());
+ } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
+ EmitMathCos(expr->arguments());
+ } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
+ EmitMathSqrt(expr->arguments());
+ } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
+ EmitCallFunction(expr->arguments());
+ } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
+ EmitRegExpConstructResult(expr->arguments());
+ } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
+ EmitSwapElements(expr->arguments());
+ } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
+ EmitGetFromCache(expr->arguments());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
Label eval_right, done;
@@ -727,7 +1077,7 @@ void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
}
__ Drop(stack_depth);
- EmitReturnSequence(stmt->statement_pos());
+ EmitReturnSequence();
}
@@ -776,7 +1126,11 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
__ bind(&stack_check_success);
__ bind(loop_statement.continue_target());
- SetStatementPosition(stmt->condition_position());
+
+ // Record the position of the do while condition and make sure it is possible
+ // to break on the condition.
+ SetExpressionPosition(stmt->cond(), stmt->condition_position());
+
VisitForControl(stmt->cond(), &body, loop_statement.break_target());
__ bind(&stack_limit_hit);
@@ -792,7 +1146,6 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement");
- SetStatementPosition(stmt);
Label body, stack_limit_hit, stack_check_success;
Iteration loop_statement(this, stmt);
@@ -805,6 +1158,9 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Visit(stmt->body());
__ bind(loop_statement.continue_target());
+ // Emit the statement position here as this is where the while statement code
+ // starts.
+ SetStatementPosition(stmt);
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
@@ -824,7 +1180,6 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
Comment cmnt(masm_, "[ ForStatement");
- SetStatementPosition(stmt);
Label test, body, stack_limit_hit, stack_check_success;
Iteration loop_statement(this, stmt);
@@ -847,6 +1202,9 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
}
__ bind(&test);
+ // Emit the statement position here as this is where the for statement code
+ // starts.
+ SetStatementPosition(stmt);
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
@@ -992,6 +1350,8 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
VisitForControl(expr->condition(), &true_case, &false_case);
__ bind(&true_case);
+ SetExpressionPosition(expr->then_expression(),
+ expr->then_expression_position());
Visit(expr->then_expression());
// If control flow falls through Visit, jump to done.
if (context_ == Expression::kEffect || context_ == Expression::kValue) {
@@ -999,6 +1359,8 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
}
__ bind(&false_case);
+ SetExpressionPosition(expr->else_expression(),
+ expr->else_expression_position());
Visit(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
if (context_ == Expression::kEffect || context_ == Expression::kValue) {
diff --git a/src/full-codegen.h b/src/full-codegen.h
index c7d00937..3d562324 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -59,6 +59,31 @@ class FullCodeGenSyntaxChecker: public AstVisitor {
};
+// AST node visitor which can tell whether a given statement will be breakable
+// when the code is compiled by the full compiler in the debugger. This means
+// that there will be an IC (load/store/call) in the code generated for the
+// debugger to piggybag on.
+class BreakableStatementChecker: public AstVisitor {
+ public:
+ BreakableStatementChecker() : is_breakable_(false) {}
+
+ void Check(Statement* stmt);
+ void Check(Expression* stmt);
+
+ bool is_breakable() { return is_breakable_; }
+
+ private:
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ bool is_breakable_;
+
+ DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
+};
+
+
// -----------------------------------------------------------------------------
// Full code generator.
@@ -364,11 +389,12 @@ class FullCodeGenerator: public AstVisitor {
FunctionLiteral* function);
// Platform-specific return sequence
- void EmitReturnSequence(int position);
+ void EmitReturnSequence();
// Platform-specific code sequences for calls
void EmitCallWithStub(Call* expr);
void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
+ void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
// Platform-specific code for inline runtime calls.
@@ -388,8 +414,9 @@ class FullCodeGenerator: public AstVisitor {
void EmitValueOf(ZoneList<Expression*>* arguments);
void EmitSetValueOf(ZoneList<Expression*>* arguments);
void EmitNumberToString(ZoneList<Expression*>* arguments);
- void EmitCharFromCode(ZoneList<Expression*>* arguments);
- void EmitFastCharCodeAt(ZoneList<Expression*>* arguments);
+ void EmitStringCharFromCode(ZoneList<Expression*>* arguments);
+ void EmitStringCharCodeAt(ZoneList<Expression*>* arguments);
+ void EmitStringCharAt(ZoneList<Expression*>* arguments);
void EmitStringCompare(ZoneList<Expression*>* arguments);
void EmitStringAdd(ZoneList<Expression*>* arguments);
void EmitLog(ZoneList<Expression*>* arguments);
@@ -456,6 +483,7 @@ class FullCodeGenerator: public AstVisitor {
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
+ void SetExpressionPosition(Expression* expr, int pos);
void SetStatementPosition(int pos);
void SetSourcePosition(int pos);
diff --git a/src/globals.h b/src/globals.h
index 292d8d80..6cf26261 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -303,7 +303,6 @@ class HeapObject;
class IC;
class InterceptorInfo;
class IterationStatement;
-class Array;
class JSArray;
class JSFunction;
class JSObject;
@@ -544,16 +543,16 @@ enum StateTag {
#define HAS_FAILURE_TAG(value) \
((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
-// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
-#define OBJECT_SIZE_ALIGN(value) \
+// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
+#define OBJECT_POINTER_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
-// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
-#define MAP_SIZE_ALIGN(value) \
+// MAP_POINTER_ALIGN returns the value aligned as a map pointer.
+#define MAP_POINTER_ALIGN(value) \
(((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
// The expression OFFSET_OF(type, field) computes the byte-offset
@@ -648,7 +647,9 @@ F FUNCTION_CAST(Address addr) {
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
-enum CpuFeature { SSE3 = 32, // x86
+// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
+enum CpuFeature { SSE4_1 = 32 + 19, // x86
+ SSE3 = 32 + 0, // x86
SSE2 = 26, // x86
CMOV = 15, // x86
RDTSC = 4, // x86
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 82e1a912..5cb24eec 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -117,7 +117,12 @@ void Heap::FinalizeExternalString(String* string) {
reinterpret_cast<byte*>(string) +
ExternalString::kResourceOffset -
kHeapObjectTag);
- delete *resource_addr;
+
+ // Dispose of the C++ object if it has not already been disposed.
+ if (*resource_addr != NULL) {
+ (*resource_addr)->Dispose();
+ }
+
// Clear the resource pointer in the string.
*resource_addr = NULL;
}
@@ -184,19 +189,16 @@ void Heap::RecordWrite(Address address, int offset) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
- Page::SetRSet(address, offset);
+ Page::FromAddress(address)->MarkRegionDirty(address + offset);
}
void Heap::RecordWrites(Address address, int start, int len) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
- for (int offset = start;
- offset < start + len * kPointerSize;
- offset += kPointerSize) {
- SLOW_ASSERT(Contains(address + offset));
- Page::SetRSet(address, offset);
- }
+ Page* page = Page::FromAddress(address);
+ page->SetRegionMarks(page->GetRegionMarks() |
+ page->GetRegionMaskForSpan(address + start, len * kPointerSize));
}
@@ -234,13 +236,40 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
}
-void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
+void Heap::CopyBlock(Address dst, Address src, int byte_size) {
+ ASSERT(IsAligned(byte_size, kPointerSize));
+ CopyWords(reinterpret_cast<Object**>(dst),
+ reinterpret_cast<Object**>(src),
+ byte_size / kPointerSize);
+}
+
+
+void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
- CopyWords(dst, src, byte_size / kPointerSize);
+
+ Page* page = Page::FromAddress(dst);
+ uint32_t marks = page->GetRegionMarks();
+
+ for (int remaining = byte_size / kPointerSize;
+ remaining > 0;
+ remaining--) {
+ Memory::Object_at(dst) = Memory::Object_at(src);
+
+ if (Heap::InNewSpace(Memory::Object_at(dst))) {
+ marks |= page->GetRegionMaskForAddress(dst);
+ }
+
+ dst += kPointerSize;
+ src += kPointerSize;
+ }
+
+ page->SetRegionMarks(marks);
}
-void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
+void Heap::MoveBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
int size_in_words = byte_size / kPointerSize;
@@ -250,10 +279,12 @@ void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
((OffsetFrom(reinterpret_cast<Address>(src)) -
OffsetFrom(reinterpret_cast<Address>(dst))) >= kPointerSize));
- Object** end = src + size_in_words;
+ Object** src_slot = reinterpret_cast<Object**>(src);
+ Object** dst_slot = reinterpret_cast<Object**>(dst);
+ Object** end_slot = src_slot + size_in_words;
- while (src != end) {
- *dst++ = *src++;
+ while (src_slot != end_slot) {
+ *dst_slot++ = *src_slot++;
}
} else {
memmove(dst, src, byte_size);
@@ -261,6 +292,17 @@ void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
}
+void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size) {
+ ASSERT(IsAligned(byte_size, kPointerSize));
+ ASSERT((dst >= (src + byte_size)) ||
+ ((OffsetFrom(src) - OffsetFrom(dst)) >= kPointerSize));
+
+ CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
+}
+
+
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
ASSERT(InFromSpace(object));
diff --git a/src/heap.cc b/src/heap.cc
index d554a3ba..3fc7d02b 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -326,13 +326,6 @@ void Heap::GarbageCollectionPrologue() {
}
if (FLAG_gc_verbose) Print();
-
- if (FLAG_print_rset) {
- // Not all spaces have remembered set bits that we care about.
- old_pointer_space_->PrintRSet();
- map_space_->PrintRSet();
- lo_space_->PrintRSet();
- }
#endif
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@@ -519,9 +512,8 @@ void Heap::ReserveSpace(
Heap::CollectGarbage(cell_space_size, CELL_SPACE);
gc_performed = true;
}
- // We add a slack-factor of 2 in order to have space for the remembered
- // set and a series of large-object allocations that are only just larger
- // than the page size.
+ // We add a slack-factor of 2 in order to have space for a series of
+ // large-object allocations that are only just larger than the page size.
large_object_size *= 2;
// The ReserveSpace method on the large object space checks how much
// we can expand the old generation. This includes expansion caused by
@@ -568,8 +560,27 @@ class ClearThreadJSFunctionResultCachesVisitor: public ThreadVisitor {
void Heap::ClearJSFunctionResultCaches() {
if (Bootstrapper::IsActive()) return;
ClearThreadJSFunctionResultCachesVisitor visitor;
- ThreadManager::IterateThreads(&visitor);
+ ThreadManager::IterateArchivedThreads(&visitor);
+}
+
+
+#ifdef DEBUG
+
+enum PageWatermarkValidity {
+ ALL_VALID,
+ ALL_INVALID
+};
+
+static void VerifyPageWatermarkValidity(PagedSpace* space,
+ PageWatermarkValidity validity) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ bool expected_value = (validity == ALL_VALID);
+ while (it.has_next()) {
+ Page* page = it.next();
+ ASSERT(page->IsWatermarkValid() == expected_value);
+ }
}
+#endif
void Heap::PerformGarbageCollection(AllocationSpace space,
@@ -594,6 +605,11 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
EnsureFromSpaceIsCommitted();
if (collector == MARK_COMPACTOR) {
+ if (FLAG_flush_code) {
+ // Flush all potentially unused code.
+ FlushCode();
+ }
+
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
@@ -646,18 +662,19 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
- if (MarkCompactCollector::IsCompacting()) {
- mc_count_++;
- } else {
- ms_count_++;
- }
- tracer->set_full_gc_count(mc_count_);
LOG(ResourceEvent("markcompact", "begin"));
MarkCompactCollector::Prepare(tracer);
bool is_compacting = MarkCompactCollector::IsCompacting();
+ if (is_compacting) {
+ mc_count_++;
+ } else {
+ ms_count_++;
+ }
+ tracer->set_full_gc_count(mc_count_ + ms_count_);
+
MarkCompactPrologue(is_compacting);
MarkCompactCollector::CollectGarbage();
@@ -816,6 +833,20 @@ void Heap::Scavenge() {
gc_state_ = SCAVENGE;
+ Page::FlipMeaningOfInvalidatedWatermarkFlag();
+#ifdef DEBUG
+ VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
+ VerifyPageWatermarkValidity(map_space_, ALL_VALID);
+#endif
+
+ // We do not update an allocation watermark of the top page during linear
+ // allocation to avoid overhead. So to maintain the watermark invariant
+ // we have to manually cache the watermark and mark the top page as having an
+ // invalid watermark. This guarantees that dirty regions iteration will use a
+ // correct watermark even if a linear allocation happens.
+ old_pointer_space_->FlushTopPageWatermark();
+ map_space_->FlushTopPageWatermark();
+
// Implements Cheney's copying algorithm
LOG(ResourceEvent("scavenge", "begin"));
@@ -858,9 +889,17 @@ void Heap::Scavenge() {
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
- IterateRSet(old_pointer_space_, &ScavengePointer);
- IterateRSet(map_space_, &ScavengePointer);
- lo_space_->IterateRSet(&ScavengePointer);
+ IterateDirtyRegions(old_pointer_space_,
+ &IteratePointersInDirtyRegion,
+ &ScavengePointer,
+ WATERMARK_CAN_BE_INVALID);
+
+ IterateDirtyRegions(map_space_,
+ &IteratePointersInDirtyMapsRegion,
+ &ScavengePointer,
+ WATERMARK_CAN_BE_INVALID);
+
+ lo_space_->IterateDirtyRegions(&ScavengePointer);
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
@@ -963,9 +1002,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// Copy the from-space object to its new location (given by the
// forwarding address) and fix its map.
HeapObject* target = source->map_word().ToForwardingAddress();
- CopyBlock(reinterpret_cast<Object**>(target->address()),
- reinterpret_cast<Object**>(source->address()),
- source->SizeFromMap(map));
+ int size = source->SizeFromMap(map);
+ CopyBlock(target->address(), source->address(), size);
target->set_map(map);
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@@ -973,8 +1011,10 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
RecordCopiedObject(target);
#endif
// Visit the newly copied object for pointers to new space.
- target->Iterate(scavenge_visitor);
- UpdateRSet(target);
+ ASSERT(!target->IsMap());
+ IterateAndMarkPointersToNewSpace(target->address(),
+ target->address() + size,
+ &ScavengePointer);
}
// Take another spin if there are now unswept objects in new space
@@ -985,117 +1025,6 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
}
-void Heap::ClearRSetRange(Address start, int size_in_bytes) {
- uint32_t start_bit;
- Address start_word_address =
- Page::ComputeRSetBitPosition(start, 0, &start_bit);
- uint32_t end_bit;
- Address end_word_address =
- Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
- 0,
- &end_bit);
-
- // We want to clear the bits in the starting word starting with the
- // first bit, and in the ending word up to and including the last
- // bit. Build a pair of bitmasks to do that.
- uint32_t start_bitmask = start_bit - 1;
- uint32_t end_bitmask = ~((end_bit << 1) - 1);
-
- // If the start address and end address are the same, we mask that
- // word once, otherwise mask the starting and ending word
- // separately and all the ones in between.
- if (start_word_address == end_word_address) {
- Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
- } else {
- Memory::uint32_at(start_word_address) &= start_bitmask;
- Memory::uint32_at(end_word_address) &= end_bitmask;
- start_word_address += kIntSize;
- memset(start_word_address, 0, end_word_address - start_word_address);
- }
-}
-
-
-class UpdateRSetVisitor: public ObjectVisitor {
- public:
-
- void VisitPointer(Object** p) {
- UpdateRSet(p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- // Update a store into slots [start, end), used (a) to update remembered
- // set when promoting a young object to old space or (b) to rebuild
- // remembered sets after a mark-compact collection.
- for (Object** p = start; p < end; p++) UpdateRSet(p);
- }
- private:
-
- void UpdateRSet(Object** p) {
- // The remembered set should not be set. It should be clear for objects
- // newly copied to old space, and it is cleared before rebuilding in the
- // mark-compact collector.
- ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
- if (Heap::InNewSpace(*p)) {
- Page::SetRSet(reinterpret_cast<Address>(p), 0);
- }
- }
-};
-
-
-int Heap::UpdateRSet(HeapObject* obj) {
- ASSERT(!InNewSpace(obj));
- // Special handling of fixed arrays to iterate the body based on the start
- // address and offset. Just iterating the pointers as in UpdateRSetVisitor
- // will not work because Page::SetRSet needs to have the start of the
- // object for large object pages.
- if (obj->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(obj);
- int length = array->length();
- for (int i = 0; i < length; i++) {
- int offset = FixedArray::kHeaderSize + i * kPointerSize;
- ASSERT(!Page::IsRSetSet(obj->address(), offset));
- if (Heap::InNewSpace(array->get(i))) {
- Page::SetRSet(obj->address(), offset);
- }
- }
- } else if (!obj->IsCode()) {
- // Skip code object, we know it does not contain inter-generational
- // pointers.
- UpdateRSetVisitor v;
- obj->Iterate(&v);
- }
- return obj->Size();
-}
-
-
-void Heap::RebuildRSets() {
- // By definition, we do not care about remembered set bits in code,
- // data, or cell spaces.
- map_space_->ClearRSet();
- RebuildRSets(map_space_);
-
- old_pointer_space_->ClearRSet();
- RebuildRSets(old_pointer_space_);
-
- Heap::lo_space_->ClearRSet();
- RebuildRSets(lo_space_);
-}
-
-
-void Heap::RebuildRSets(PagedSpace* space) {
- HeapObjectIterator it(space);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- Heap::UpdateRSet(obj);
-}
-
-
-void Heap::RebuildRSets(LargeObjectSpace* space) {
- LargeObjectIterator it(space);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- Heap::UpdateRSet(obj);
-}
-
-
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::RecordCopiedObject(HeapObject* obj) {
bool should_record = false;
@@ -1121,9 +1050,7 @@ HeapObject* Heap::MigrateObject(HeapObject* source,
HeapObject* target,
int size) {
// Copy the content of source to target.
- CopyBlock(reinterpret_cast<Object**>(target->address()),
- reinterpret_cast<Object**>(source->address()),
- size);
+ CopyBlock(target->address(), source->address(), size);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
@@ -1178,21 +1105,30 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
if (object_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawFixedArray(object_size);
if (!result->IsFailure()) {
- // Save the from-space object pointer and its map pointer at the
- // top of the to space to be swept and copied later. Write the
- // forwarding address over the map word of the from-space
- // object.
HeapObject* target = HeapObject::cast(result);
- promotion_queue.insert(object, first_word.ToMap());
- object->set_map_word(MapWord::FromForwardingAddress(target));
- // Give the space allocated for the result a proper map by
- // treating it as a free list node (not linked into the free
- // list).
- FreeListNode* node = FreeListNode::FromAddress(target->address());
- node->set_size(object_size);
+ if (object->IsFixedArray()) {
+ // Save the from-space object pointer and its map pointer at the
+ // top of the to space to be swept and copied later. Write the
+ // forwarding address over the map word of the from-space
+ // object.
+ promotion_queue.insert(object, first_word.ToMap());
+ object->set_map_word(MapWord::FromForwardingAddress(target));
+
+ // Give the space allocated for the result a proper map by
+ // treating it as a free list node (not linked into the free
+ // list).
+ FreeListNode* node = FreeListNode::FromAddress(target->address());
+ node->set_size(object_size);
+
+ *p = target;
+ } else {
+ // In large object space only fixed arrays might possibly contain
+ // intergenerational references.
+ // All other objects can be copied immediately and not revisited.
+ *p = MigrateObject(object, target, object_size);
+ }
- *p = target;
tracer()->increment_promoted_objects_size(object_size);
return;
}
@@ -1682,7 +1618,7 @@ bool Heap::CreateInitialObjects() {
// loop above because it needs to be allocated manually with the special
// hash code in place. The hash code for the hidden_symbol is zero to ensure
// that it will always be at the first entry in property descriptors.
- obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
+ obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
if (obj->IsFailure()) return false;
hidden_symbol_ = String::cast(obj);
@@ -1918,6 +1854,9 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_compiler_hints(0);
share->set_this_property_assignments_count(0);
share->set_this_property_assignments(undefined_value());
+ share->set_num_literals(0);
+ share->set_end_position(0);
+ share->set_function_token_position(0);
return result;
}
@@ -2179,8 +2118,8 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
: lo_space_->AllocateRaw(size);
if (result->IsFailure()) return result;
- reinterpret_cast<Array*>(result)->set_map(byte_array_map());
- reinterpret_cast<Array*>(result)->set_length(length);
+ reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
+ reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@@ -2195,8 +2134,8 @@ Object* Heap::AllocateByteArray(int length) {
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
- reinterpret_cast<Array*>(result)->set_map(byte_array_map());
- reinterpret_cast<Array*>(result)->set_length(length);
+ reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
+ reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@@ -2250,6 +2189,87 @@ Object* Heap::AllocateExternalArray(int length,
}
+// The StackVisitor is used to traverse all the archived threads to see if
+// there are activations on any of the stacks corresponding to the code.
+class FlushingStackVisitor : public ThreadVisitor {
+ public:
+ explicit FlushingStackVisitor(Code* code) : found_(false), code_(code) {}
+
+ void VisitThread(ThreadLocalTop* top) {
+ // If we already found the code in a previous traversed thread we return.
+ if (found_) return;
+
+ for (StackFrameIterator it(top); !it.done(); it.Advance()) {
+ if (code_->contains(it.frame()->pc())) {
+ found_ = true;
+ return;
+ }
+ }
+ }
+ bool FoundCode() {return found_;}
+
+ private:
+ bool found_;
+ Code* code_;
+};
+
+
+static void FlushCodeForFunction(SharedFunctionInfo* function_info) {
+ // The function must be compiled and have the source code available,
+ // to be able to recompile it in case we need the function again.
+ if (!(function_info->is_compiled() && function_info->HasSourceCode())) return;
+
+ // We never flush code for Api functions.
+ if (function_info->IsApiFunction()) return;
+
+ // Only flush code for functions.
+ if (!function_info->code()->kind() == Code::FUNCTION) return;
+
+ // Function must be lazy compilable.
+ if (!function_info->allows_lazy_compilation()) return;
+
+ // If this is a full script wrapped in a function we do no flush the code.
+ if (function_info->is_toplevel()) return;
+
+ // If this function is in the compilation cache we do not flush the code.
+ if (CompilationCache::HasFunction(function_info)) return;
+
+ // Make sure we are not referencing the code from the stack.
+ for (StackFrameIterator it; !it.done(); it.Advance()) {
+ if (function_info->code()->contains(it.frame()->pc())) return;
+ }
+ // Iterate the archived stacks in all threads to check if
+ // the code is referenced.
+ FlushingStackVisitor threadvisitor(function_info->code());
+ ThreadManager::IterateArchivedThreads(&threadvisitor);
+ if (threadvisitor.FoundCode()) return;
+
+ HandleScope scope;
+ // Compute the lazy compilable version of the code.
+ function_info->set_code(*ComputeLazyCompile(function_info->length()));
+}
+
+
+void Heap::FlushCode() {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Do not flush code if the debugger is loaded or there are breakpoints.
+ if (Debug::IsLoaded() || Debug::has_break_points()) return;
+#endif
+ HeapObjectIterator it(old_pointer_space());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ if (obj->IsJSFunction()) {
+ JSFunction* jsfunction = JSFunction::cast(obj);
+
+ // The function must have a valid context and not be a builtin.
+ if (jsfunction->unchecked_context()->IsContext() &&
+ !jsfunction->IsBuiltin()) {
+ FlushCodeForFunction(jsfunction->shared());
+ }
+ }
+ }
+}
+
+
Object* Heap::CreateCode(const CodeDesc& desc,
ZoneScopeInfo* sinfo,
Code::Flags flags,
@@ -2312,9 +2332,7 @@ Object* Heap::CopyCode(Code* code) {
// Copy code object.
Address old_addr = code->address();
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
- CopyBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ CopyBlock(new_addr, old_addr, obj_size);
// Relocate the copy.
Code* new_code = Code::cast(result);
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
@@ -2460,8 +2478,8 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Copy the content. The arguments boilerplate doesn't have any
// fields that point to new space so it's safe to skip the write
// barrier here.
- CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
- reinterpret_cast<Object**>(boilerplate->address()),
+ CopyBlock(HeapObject::cast(result)->address(),
+ boilerplate->address(),
kArgumentsObjectSize);
// Set the two properties.
@@ -2683,8 +2701,8 @@ Object* Heap::CopyJSObject(JSObject* source) {
clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
if (clone->IsFailure()) return clone;
Address clone_address = HeapObject::cast(clone)->address();
- CopyBlock(reinterpret_cast<Object**>(clone_address),
- reinterpret_cast<Object**>(source->address()),
+ CopyBlock(clone_address,
+ source->address(),
object_size);
// Update write barrier for all fields that lie beyond the header.
RecordWrites(clone_address,
@@ -2696,8 +2714,8 @@ Object* Heap::CopyJSObject(JSObject* source) {
ASSERT(Heap::InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
- CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
- reinterpret_cast<Object**>(source->address()),
+ CopyBlock(HeapObject::cast(clone)->address(),
+ source->address(),
object_size);
}
@@ -2968,8 +2986,8 @@ Object* Heap::AllocateEmptyFixedArray() {
Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
// Initialize the object.
- reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
- reinterpret_cast<Array*>(result)->set_length(0);
+ reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
+ reinterpret_cast<FixedArray*>(result)->set_length(0);
return result;
}
@@ -2994,9 +3012,7 @@ Object* Heap::CopyFixedArray(FixedArray* src) {
if (obj->IsFailure()) return obj;
if (Heap::InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
- CopyBlock(reinterpret_cast<Object**>(dst->address()),
- reinterpret_cast<Object**>(src->address()),
- FixedArray::SizeFor(len));
+ CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
return obj;
}
HeapObject::cast(obj)->set_map(src->map());
@@ -3017,8 +3033,8 @@ Object* Heap::AllocateFixedArray(int length) {
Object* result = AllocateRawFixedArray(length);
if (!result->IsFailure()) {
// Initialize header.
- reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
- FixedArray* array = FixedArray::cast(result);
+ FixedArray* array = reinterpret_cast<FixedArray*>(result);
+ array->set_map(fixed_array_map());
array->set_length(length);
// Initialize body.
ASSERT(!Heap::InNewSpace(undefined_value()));
@@ -3045,27 +3061,10 @@ Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
space = LO_SPACE;
}
- // Specialize allocation for the space.
- Object* result = Failure::OutOfMemoryException();
- if (space == NEW_SPACE) {
- // We cannot use Heap::AllocateRaw() because it will not properly
- // allocate extra remembered set bits if always_allocate() is true and
- // new space allocation fails.
- result = new_space_.AllocateRaw(size);
- if (result->IsFailure() && always_allocate()) {
- if (size <= MaxObjectSizeInPagedSpace()) {
- result = old_pointer_space_->AllocateRaw(size);
- } else {
- result = lo_space_->AllocateRawFixedArray(size);
- }
- }
- } else if (space == OLD_POINTER_SPACE) {
- result = old_pointer_space_->AllocateRaw(size);
- } else {
- ASSERT(space == LO_SPACE);
- result = lo_space_->AllocateRawFixedArray(size);
- }
- return result;
+ AllocationSpace retry_space =
+ (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
+
+ return AllocateRaw(size, space, retry_space);
}
@@ -3113,7 +3112,7 @@ Object* Heap::AllocateUninitializedFixedArray(int length) {
Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
Object* result = Heap::AllocateFixedArray(length, pretenure);
if (result->IsFailure()) return result;
- reinterpret_cast<Array*>(result)->set_map(hash_table_map());
+ reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
ASSERT(result->IsHashTable());
return result;
}
@@ -3365,6 +3364,49 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
#ifdef DEBUG
+static void DummyScavengePointer(HeapObject** p) {
+}
+
+
+static void VerifyPointersUnderWatermark(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+ while (it.has_next()) {
+ Page* page = it.next();
+ Address start = page->ObjectAreaStart();
+ Address end = page->AllocationWatermark();
+
+ Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
+ start,
+ end,
+ visit_dirty_region,
+ &DummyScavengePointer);
+ }
+}
+
+
+static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
+ LargeObjectIterator it(space);
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+ if (object->IsFixedArray()) {
+ Address slot_address = object->address();
+ Address end = object->address() + object->Size();
+
+ while (slot_address < end) {
+ HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+ // When we are not in GC the Heap::InNewSpace() predicate
+ // checks that pointers which satisfy predicate point into
+ // the active semispace.
+ Heap::InNewSpace(*slot);
+ slot_address += kPointerSize;
+ }
+ }
+ }
+}
+
+
void Heap::Verify() {
ASSERT(HasBeenSetup());
@@ -3373,14 +3415,23 @@ void Heap::Verify() {
new_space_.Verify();
- VerifyPointersAndRSetVisitor rset_visitor;
- old_pointer_space_->Verify(&rset_visitor);
- map_space_->Verify(&rset_visitor);
+ VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
+ old_pointer_space_->Verify(&dirty_regions_visitor);
+ map_space_->Verify(&dirty_regions_visitor);
+
+ VerifyPointersUnderWatermark(old_pointer_space_,
+ &IteratePointersInDirtyRegion);
+ VerifyPointersUnderWatermark(map_space_,
+ &IteratePointersInDirtyMapsRegion);
+ VerifyPointersUnderWatermark(lo_space_);
- VerifyPointersVisitor no_rset_visitor;
- old_data_space_->Verify(&no_rset_visitor);
- code_space_->Verify(&no_rset_visitor);
- cell_space_->Verify(&no_rset_visitor);
+ VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
+ VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
+
+ VerifyPointersVisitor no_dirty_regions_visitor;
+ old_data_space_->Verify(&no_dirty_regions_visitor);
+ code_space_->Verify(&no_dirty_regions_visitor);
+ cell_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
}
@@ -3433,65 +3484,253 @@ void Heap::ZapFromSpace() {
#endif // DEBUG
-int Heap::IterateRSetRange(Address object_start,
- Address object_end,
- Address rset_start,
- ObjectSlotCallback copy_object_func) {
- Address object_address = object_start;
- Address rset_address = rset_start;
- int set_bits_count = 0;
-
- // Loop over all the pointers in [object_start, object_end).
- while (object_address < object_end) {
- uint32_t rset_word = Memory::uint32_at(rset_address);
- if (rset_word != 0) {
- uint32_t result_rset = rset_word;
- for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
- // Do not dereference pointers at or past object_end.
- if ((rset_word & bitmask) != 0 && object_address < object_end) {
- Object** object_p = reinterpret_cast<Object**>(object_address);
- if (Heap::InNewSpace(*object_p)) {
- copy_object_func(reinterpret_cast<HeapObject**>(object_p));
- }
- // If this pointer does not need to be remembered anymore, clear
- // the remembered set bit.
- if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
- set_bits_count++;
- }
- object_address += kPointerSize;
+bool Heap::IteratePointersInDirtyRegion(Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
+ Address slot_address = start;
+ bool pointers_to_new_space_found = false;
+
+ while (slot_address < end) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (Heap::InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ copy_object_func(reinterpret_cast<HeapObject**>(slot));
+ if (Heap::InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ pointers_to_new_space_found = true;
}
- // Update the remembered set if it has changed.
- if (result_rset != rset_word) {
- Memory::uint32_at(rset_address) = result_rset;
+ }
+ slot_address += kPointerSize;
+ }
+ return pointers_to_new_space_found;
+}
+
+
+// Compute start address of the first map following given addr.
+static inline Address MapStartAlign(Address addr) {
+ Address page = Page::FromAddress(addr)->ObjectAreaStart();
+ return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
+}
+
+
+// Compute end address of the first map preceding given addr.
+static inline Address MapEndAlign(Address addr) {
+ Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
+ return page + ((addr - page) / Map::kSize * Map::kSize);
+}
+
+
+static bool IteratePointersInDirtyMaps(Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
+ ASSERT(MapStartAlign(start) == start);
+ ASSERT(MapEndAlign(end) == end);
+
+ Address map_address = start;
+ bool pointers_to_new_space_found = false;
+
+ while (map_address < end) {
+ ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
+ ASSERT(Memory::Object_at(map_address)->IsMap());
+
+ Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
+ Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
+
+ if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)) {
+ pointers_to_new_space_found = true;
+ }
+
+ map_address += Map::kSize;
+ }
+
+ return pointers_to_new_space_found;
+}
+
+
+bool Heap::IteratePointersInDirtyMapsRegion(
+ Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
+ Address map_aligned_start = MapStartAlign(start);
+ Address map_aligned_end = MapEndAlign(end);
+
+ bool contains_pointers_to_new_space = false;
+
+ if (map_aligned_start != start) {
+ Address prev_map = map_aligned_start - Map::kSize;
+ ASSERT(Memory::Object_at(prev_map)->IsMap());
+
+ Address pointer_fields_start =
+ Max(start, prev_map + Map::kPointerFieldsBeginOffset);
+
+ Address pointer_fields_end =
+ Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
+
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyRegion(pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
+ }
+
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyMaps(map_aligned_start,
+ map_aligned_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
+
+ if (map_aligned_end != end) {
+ ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
+
+ Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
+
+ Address pointer_fields_end =
+ Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
+
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyRegion(pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
+ }
+
+ return contains_pointers_to_new_space;
+}
+
+
+void Heap::IterateAndMarkPointersToNewSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback) {
+ Address slot_address = start;
+ Page* page = Page::FromAddress(start);
+
+ uint32_t marks = page->GetRegionMarks();
+
+ while (slot_address < end) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (Heap::InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ callback(reinterpret_cast<HeapObject**>(slot));
+ if (Heap::InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ marks |= page->GetRegionMaskForAddress(slot_address);
}
- } else {
- // No bits in the word were set. This is the common case.
- object_address += kPointerSize * kBitsPerInt;
}
- rset_address += kIntSize;
+ slot_address += kPointerSize;
}
- return set_bits_count;
+
+ page->SetRegionMarks(marks);
}
-void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
- ASSERT(Page::is_rset_in_use());
- ASSERT(space == old_pointer_space_ || space == map_space_);
+uint32_t Heap::IterateDirtyRegions(
+ uint32_t marks,
+ Address area_start,
+ Address area_end,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback copy_object_func) {
+ uint32_t newmarks = 0;
+ uint32_t mask = 1;
+
+ if (area_start >= area_end) {
+ return newmarks;
+ }
- static void* paged_rset_histogram = StatsTable::CreateHistogram(
- "V8.RSetPaged",
- 0,
- Page::kObjectAreaSize / kPointerSize,
- 30);
+ Address region_start = area_start;
+
+ // area_start does not necessarily coincide with start of the first region.
+ // Thus to calculate the beginning of the next region we have to align
+ // area_start by Page::kRegionSize.
+ Address second_region =
+ reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
+ ~Page::kRegionAlignmentMask);
+
+ // Next region might be beyond area_end.
+ Address region_end = Min(second_region, area_end);
+
+ if (marks & mask) {
+ if (visit_dirty_region(region_start, region_end, copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+ mask <<= 1;
+
+ // Iterate subsequent regions which fully lay inside [area_start, area_end[.
+ region_start = region_end;
+ region_end = region_start + Page::kRegionSize;
+
+ while (region_end <= area_end) {
+ if (marks & mask) {
+ if (visit_dirty_region(region_start, region_end, copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+
+ region_start = region_end;
+ region_end = region_start + Page::kRegionSize;
+
+ mask <<= 1;
+ }
+
+ if (region_start != area_end) {
+ // A small piece of area left uniterated because area_end does not coincide
+ // with region end. Check whether region covering last part of area is
+ // dirty.
+ if (marks & mask) {
+ if (visit_dirty_region(region_start, area_end, copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+ }
+
+ return newmarks;
+}
+
+
+
+void Heap::IterateDirtyRegions(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback copy_object_func,
+ ExpectedPageWatermarkState expected_page_watermark_state) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
+
while (it.has_next()) {
Page* page = it.next();
- int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
- page->RSetStart(), copy_object_func);
- if (paged_rset_histogram != NULL) {
- StatsTable::AddHistogramSample(paged_rset_histogram, count);
+ uint32_t marks = page->GetRegionMarks();
+
+ if (marks != Page::kAllRegionsCleanMarks) {
+ Address start = page->ObjectAreaStart();
+
+ // Do not try to visit pointers beyond page allocation watermark.
+ // Page can contain garbage pointers there.
+ Address end;
+
+ if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
+ page->IsWatermarkValid()) {
+ end = page->AllocationWatermark();
+ } else {
+ end = page->CachedAllocationWatermark();
+ }
+
+ ASSERT(space == old_pointer_space_ ||
+ (space == map_space_ &&
+ ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
+
+ page->SetRegionMarks(IterateDirtyRegions(marks,
+ start,
+ end,
+ visit_dirty_region,
+ copy_object_func));
}
+
+ // Mark page watermark as invalid to maintain watermark validity invariant.
+ // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
+ page->InvalidateWatermark(true);
}
}
diff --git a/src/heap.h b/src/heap.h
index 74e5a31b..0db40083 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -206,6 +206,10 @@ class HeapStats;
typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
+typedef bool (*DirtyRegionCallback)(Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func);
+
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
@@ -740,17 +744,54 @@ class Heap : public AllStatic {
// Iterates over all the other roots in the heap.
static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates remembered set of an old space.
- static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
+ enum ExpectedPageWatermarkState {
+ WATERMARK_SHOULD_BE_VALID,
+ WATERMARK_CAN_BE_INVALID
+ };
+
+ // For each dirty region on a page in use from an old space call
+ // visit_dirty_region callback.
+ // If either visit_dirty_region or callback can cause an allocation
+ // in old space and changes in allocation watermark then
+ // can_preallocate_during_iteration should be set to true.
+ // All pages will be marked as having invalid watermark upon
+ // iteration completion.
+ static void IterateDirtyRegions(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback callback,
+ ExpectedPageWatermarkState expected_page_watermark_state);
+
+ // Interpret marks as a bitvector of dirty marks for regions of size
+ // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
+ // memory interval from start to top. For each dirty region call a
+ // visit_dirty_region callback. Return updated bitvector of dirty marks.
+ static uint32_t IterateDirtyRegions(uint32_t marks,
+ Address start,
+ Address end,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback callback);
+
+ // Iterate pointers to new space found in memory interval from start to end.
+ // Update dirty marks for page containing start address.
+ static void IterateAndMarkPointersToNewSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback);
+
+ // Iterate pointers to new space found in memory interval from start to end.
+ // Return true if pointers to new space was found.
+ static bool IteratePointersInDirtyRegion(Address start,
+ Address end,
+ ObjectSlotCallback callback);
+
+
+ // Iterate pointers to new space found in memory interval from start to end.
+ // This interval is considered to belong to the map space.
+ // Return true if pointers to new space was found.
+ static bool IteratePointersInDirtyMapsRegion(Address start,
+ Address end,
+ ObjectSlotCallback callback);
- // Iterates a range of remembered set addresses starting with rset_start
- // corresponding to the range of allocated pointers
- // [object_start, object_end).
- // Returns the number of bits that were set.
- static int IterateRSetRange(Address object_start,
- Address object_end,
- Address rset_start,
- ObjectSlotCallback copy_object_func);
// Returns whether the object resides in new space.
static inline bool InNewSpace(Object* object);
@@ -852,17 +893,6 @@ class Heap : public AllStatic {
static void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
- // Clear a range of remembered set addresses corresponding to the object
- // area address 'start' with size 'size_in_bytes', eg, when adding blocks
- // to the free list.
- static void ClearRSetRange(Address start, int size_in_bytes);
-
- // Rebuild remembered set in old and map spaces.
- static void RebuildRSets();
-
- // Update an old object's remembered set
- static int UpdateRSet(HeapObject* obj);
-
// Commits from space if it is uncommitted.
static void EnsureFromSpaceIsCommitted();
@@ -955,11 +985,19 @@ class Heap : public AllStatic {
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
- static inline void CopyBlock(Object** dst, Object** src, int byte_size);
+ static inline void CopyBlock(Address dst, Address src, int byte_size);
+
+ static inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size);
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
- static inline void MoveBlock(Object** dst, Object** src, int byte_size);
+ static inline void MoveBlock(Address dst, Address src, int byte_size);
+
+ static inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size);
// Check new space expansion criteria and expand semispaces if it was hit.
static void CheckNewSpaceExpansionCriteria();
@@ -1207,12 +1245,6 @@ class Heap : public AllStatic {
static void ReportStatisticsAfterGC();
#endif
- // Rebuild remembered set in an old space.
- static void RebuildRSets(PagedSpace* space);
-
- // Rebuild remembered set in the large object space.
- static void RebuildRSets(LargeObjectSpace* space);
-
// Slow part of scavenge object.
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
@@ -1234,6 +1266,10 @@ class Heap : public AllStatic {
// Flush the number to string cache.
static void FlushNumberStringCache();
+ // Flush code from functions we do not expect to use again. The code will
+ // be replaced with a lazy compilable version.
+ static void FlushCode();
+
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@@ -1301,11 +1337,11 @@ class LinearAllocationScope {
#ifdef DEBUG
-// Visitor class to verify interior pointers that do not have remembered set
-// bits. All heap object pointers have to point into the heap to a location
-// that has a map pointer at its first word. Caveat: Heap::Contains is an
-// approximation because it can return true for objects in a heap space but
-// above the allocation pointer.
+// Visitor class to verify interior pointers in spaces that do not contain
+// or care about intergenerational references. All heap object pointers have to
+// point into the heap to a location that has a map pointer at its first word.
+// Caveat: Heap::Contains is an approximation because it can return true for
+// objects in a heap space but above the allocation pointer.
class VerifyPointersVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
@@ -1320,10 +1356,11 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
-// Visitor class to verify interior pointers that have remembered set bits.
-// As VerifyPointersVisitor but also checks that remembered set bits are
-// always set for pointers into new space.
-class VerifyPointersAndRSetVisitor: public ObjectVisitor {
+// Visitor class to verify interior pointers in spaces that use region marks
+// to keep track of intergenerational references.
+// As VerifyPointersVisitor but also checks that dirty marks are set
+// for regions covering intergenerational references.
+class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
@@ -1332,7 +1369,9 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor {
ASSERT(Heap::Contains(object));
ASSERT(object->map()->IsMap());
if (Heap::InNewSpace(object)) {
- ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
+ ASSERT(Heap::InToSpace(object));
+ Address addr = reinterpret_cast<Address>(current);
+ ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
}
}
}
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 9c96e197..a851b427 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -52,16 +52,21 @@ Condition NegateCondition(Condition cc) {
void RelocInfo::apply(intptr_t delta) {
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p -= delta; // relocate entry
+ *p -= delta; // Relocate entry.
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // relocate entry
+ *p -= delta; // Relocate entry.
+ } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
+ // Special handling of a debug break slot when a break point is set (call
+ // instruction has been inserted).
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // Relocate entry.
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // relocate entry
+ *p += delta; // Relocate entry.
}
}
@@ -154,6 +159,11 @@ bool RelocInfo::IsPatchedReturnSequence() {
}
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ return !Assembler::IsNop(pc());
+}
+
+
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
@@ -164,8 +174,10 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
- RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) {
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 4690c672..d4dff330 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -206,6 +206,7 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
// Add the requested number of int3 instructions after the call.
+ ASSERT_GE(guard_bytes, 0);
for (int i = 0; i < guard_bytes; i++) {
patcher.masm()->int3();
}
@@ -1328,6 +1329,15 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
}
+void Assembler::test_b(const Operand& op, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF6);
+ emit_operand(eax, op);
+ EMIT(imm8);
+}
+
+
void Assembler::xor_(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2221,6 +2231,40 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
}
+void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x38);
+ EMIT(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movntdq(const Operand& dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xE7);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::prefetch(const Operand& src, int level) {
+ ASSERT(is_uint2(level));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0x18);
+ XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M.
+ emit_sse_operand(code, src);
+}
+
+
void Assembler::movdbl(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2300,7 +2344,6 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
@@ -2329,6 +2372,13 @@ void Assembler::RecordJSReturn() {
}
+void Assembler::RecordDebugBreakSlot() {
+ WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
void Assembler::RecordComment(const char* msg) {
if (FLAG_debug_code) {
EnsureSpace ensure_space(this);
@@ -2351,13 +2401,16 @@ void Assembler::RecordStatementPosition(int pos) {
}
-void Assembler::WriteRecordedPositions() {
+bool Assembler::WriteRecordedPositions() {
+ bool written = false;
+
// Write the statement position if it is different from what was written last
// time.
if (current_statement_position_ != written_statement_position_) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
written_statement_position_ = current_statement_position_;
+ written = true;
}
// Write the position if it is different from what was written last time and
@@ -2367,7 +2420,11 @@ void Assembler::WriteRecordedPositions() {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::POSITION, current_position_);
written_position_ = current_position_;
+ written = true;
}
+
+ // Return whether something was written.
+ return written;
}
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 9ece7443..7dcbab5c 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -468,9 +468,16 @@ class Assembler : public Malloced {
// to jump to.
static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
+ // Distance between start of patched debug break slot and the emitted address
+ // to jump to.
+ static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
+
static const int kCallInstructionLength = 5;
static const int kJSReturnSequenceLength = 6;
+ // The debug break slot must be able to contain a call instruction.
+ static const int kDebugBreakSlotLength = kCallInstructionLength;
+
// ---------------------------------------------------------------------------
// Code generation
//
@@ -637,6 +644,7 @@ class Assembler : public Malloced {
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
+ void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
void xor_(Register dst, const Operand& src);
@@ -790,6 +798,15 @@ class Assembler : public Malloced {
void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
+ // Parallel XMM operations.
+ void movntdqa(XMMRegister src, const Operand& dst);
+ void movntdq(const Operand& dst, XMMRegister src);
+ // Prefetch src position into cache level.
+ // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
+ // non-temporal
+ void prefetch(const Operand& src, int level);
+ // TODO(lrn): Need SFENCE for movnt?
+
// Debugging
void Print();
@@ -799,13 +816,16 @@ class Assembler : public Malloced {
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
// Record a comment relocation entry that can be used by a disassembler.
// Use --debug_code to enable.
void RecordComment(const char* msg);
void RecordPosition(int pos);
void RecordStatementPosition(int pos);
- void WriteRecordedPositions();
+ bool WriteRecordedPositions();
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
@@ -823,6 +843,8 @@ class Assembler : public Malloced {
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+ static bool IsNop(Address addr) { return *addr == 0x90; }
+
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 60862581..3adb014b 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -226,8 +226,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edx: number of elements
// ecx: start of next object
__ mov(eax, Factory::fixed_array_map());
- __ mov(Operand(edi, JSObject::kMapOffset), eax); // setup the map
- __ mov(Operand(edi, Array::kLengthOffset), edx); // and length
+ __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
+ __ SmiTag(edx);
+ __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
// Initialize the fields to undefined.
// ebx: JSObject
@@ -330,10 +331,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(greater_equal, &exit, not_taken);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, &exit, not_taken);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
@@ -468,11 +467,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ cmp(ebx, Factory::undefined_value());
__ j(equal, &use_global_receiver);
+ // We don't use IsObjectJSObjectType here because we jump on success.
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(below, &convert_to_object);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
__ j(below_equal, &shift_arguments);
__ bind(&convert_to_object);
@@ -548,6 +547,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(ebx);
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ cmp(eax, Operand(ebx));
@@ -615,12 +615,12 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// If given receiver is already a JavaScript object then there's no
// reason for converting it.
+ // We don't use IsObjectJSObjectType here because we jump on success.
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(less, &call_to_object);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(less_equal, &push_receiver);
+ __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
+ __ j(below_equal, &push_receiver);
// Convert the receiver to an object.
__ bind(&call_to_object);
@@ -752,15 +752,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ lea(scratch1, Operand(result, JSArray::kSize));
__ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
- // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
- __ mov(FieldOperand(scratch1, JSObject::kMapOffset),
+ __ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
Factory::fixed_array_map());
- __ mov(FieldOperand(scratch1, Array::kLengthOffset),
- Immediate(initial_capacity));
+ __ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
+ Immediate(Smi::FromInt(initial_capacity)));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
@@ -847,23 +847,22 @@ static void AllocateJSArray(MacroAssembler* masm,
__ lea(elements_array, Operand(result, JSArray::kSize));
__ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
- // Initialize the fixed array. FixedArray length is not stored as a smi.
+ // Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
// array_size: size of array (smi)
- ASSERT(kSmiTag == 0);
- __ SmiUntag(array_size); // Convert from smi to value.
- __ mov(FieldOperand(elements_array, JSObject::kMapOffset),
+ __ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
Factory::fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
- __ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+ __ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array: elements array
if (fill_with_hole) {
+ __ SmiUntag(array_size);
__ lea(edi, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ mov(eax, Factory::the_hole_value());
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index c55ec7b2..29b6c691 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -46,12 +46,12 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm)
// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
+// Platform-specific FrameRegisterState functions.
-void DeferredCode::SaveRegisters() {
+void FrameRegisterState::Save(MacroAssembler* masm) const {
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int action = registers_[i];
if (action == kPush) {
@@ -63,7 +63,7 @@ void DeferredCode::SaveRegisters() {
}
-void DeferredCode::RestoreRegisters() {
+void FrameRegisterState::Restore(MacroAssembler* masm) const {
// Restore registers in reverse order due to the stack.
for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
int action = registers_[i];
@@ -77,6 +77,45 @@ void DeferredCode::RestoreRegisters() {
}
+#undef __
+#define __ ACCESS_MASM(masm_)
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+ frame_state_.Save(masm_);
+}
+
+
+void DeferredCode::RestoreRegisters() {
+ frame_state_.Restore(masm_);
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ frame_state_->Save(masm);
+}
+
+
+void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ frame_state_->Restore(masm);
+}
+
+
+void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterInternalFrame();
+}
+
+
+void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveInternalFrame();
+}
+
+
// -------------------------------------------------------------------------
// CodeGenState implementation.
@@ -1407,10 +1446,40 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
}
-static void CheckTwoForSminess(MacroAssembler* masm,
- Register left, Register right, Register scratch,
- TypeInfo left_info, TypeInfo right_info,
- DeferredInlineBinaryOperation* deferred);
+void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ Register scratch,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred) {
+ if (left.is(right)) {
+ if (!left_info.IsSmi()) {
+ __ test(left, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(left);
+ }
+ } else if (!left_info.IsSmi()) {
+ if (!right_info.IsSmi()) {
+ __ mov(scratch, left);
+ __ or_(scratch, Operand(right));
+ __ test(scratch, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ } else {
+ __ test(left, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ if (FLAG_debug_code) __ AbortIfNotSmi(right);
+ }
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(left);
+ if (!right_info.IsSmi()) {
+ __ test(right, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(right);
+ }
+ }
+}
// Implements a binary operation using a deferred code object and some
@@ -1500,19 +1569,11 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left_type_info,
right_type_info,
overwrite_mode);
- if (left->reg().is(right->reg())) {
- __ test(left->reg(), Immediate(kSmiTagMask));
- } else {
- // Use the quotient register as a scratch for the tag check.
- if (!left_is_in_eax) __ mov(eax, left->reg());
- left_is_in_eax = false; // About to destroy the value in eax.
- __ or_(eax, Operand(right->reg()));
- ASSERT(kSmiTag == 0); // Adjust test if not the case.
- __ test(eax, Immediate(kSmiTagMask));
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), edx,
+ left_type_info, right_type_info, deferred);
+ if (!left_is_in_eax) {
+ __ mov(eax, left->reg());
}
- deferred->Branch(not_zero);
-
- if (!left_is_in_eax) __ mov(eax, left->reg());
// Sign extend eax into edx:eax.
__ cdq();
// Check for 0 divisor.
@@ -1635,8 +1696,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
__ cmp(answer.reg(), 0xc0000000);
deferred->Branch(negative);
} else {
- CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info, deferred);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
+ left_type_info, right_type_info, deferred);
// Untag both operands.
__ mov(answer.reg(), left->reg());
@@ -1712,8 +1773,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left_type_info,
right_type_info,
overwrite_mode);
- CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info, deferred);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
+ left_type_info, right_type_info, deferred);
__ mov(answer.reg(), left->reg());
switch (op) {
@@ -2585,9 +2646,8 @@ void CodeGenerator::Comparison(AstNode* node,
ASSERT(temp.is_valid());
__ mov(temp.reg(),
FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ movzx_b(temp.reg(),
- FieldOperand(temp.reg(), Map::kBitFieldOffset));
- __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
temp.Unuse();
operand.Unuse();
dest->Split(not_zero);
@@ -2681,11 +2741,9 @@ void CodeGenerator::Comparison(AstNode* node,
// left_side is a sequential ASCII string.
left_side = Result(left_reg);
right_side = Result(right_val);
- Result temp2 = allocator_->Allocate();
- ASSERT(temp2.is_valid());
// Test string equality and comparison.
+ Label comparison_done;
if (cc == equal) {
- Label comparison_done;
__ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ j(not_equal, &comparison_done);
@@ -2693,34 +2751,25 @@ void CodeGenerator::Comparison(AstNode* node,
static_cast<uint8_t>(String::cast(*right_val)->Get(0));
__ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
char_value);
- __ bind(&comparison_done);
} else {
- __ mov(temp2.reg(),
- FieldOperand(left_side.reg(), String::kLengthOffset));
- __ SmiUntag(temp2.reg());
- __ sub(Operand(temp2.reg()), Immediate(1));
- Label comparison;
- // If the length is 0 then the subtraction gave -1 which compares less
- // than any character.
- __ j(negative, &comparison);
- // Otherwise load the first character.
- __ movzx_b(temp2.reg(),
- FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
- __ bind(&comparison);
+ __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ // If the length is 0 then the jump is taken and the flags
+ // correctly represent being less than the one-character string.
+ __ j(below, &comparison_done);
// Compare the first character of the string with the
// constant 1-character string.
uint8_t char_value =
static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmp(Operand(temp2.reg()), Immediate(char_value));
- Label characters_were_different;
- __ j(not_equal, &characters_were_different);
+ __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
+ char_value);
+ __ j(not_equal, &comparison_done);
// If the first character is the same then the long string sorts after
// the short one.
__ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
Immediate(Smi::FromInt(1)));
- __ bind(&characters_were_different);
}
- temp2.Unuse();
+ __ bind(&comparison_done);
left_side.Unuse();
right_side.Unuse();
dest->Split(cc);
@@ -4109,9 +4158,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// eax: value to be iterated over
__ test(eax, Immediate(kSmiTagMask));
primitive.Branch(zero);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
jsobject.Branch(above_equal);
primitive.Bind();
@@ -4198,7 +4245,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(eax); // <- slot 3
frame_->EmitPush(edx); // <- slot 2
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- __ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
entry.Jump();
@@ -4210,7 +4256,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Push the length of the array and the initial index onto the stack.
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
@@ -5725,26 +5770,66 @@ void CodeGenerator::VisitCall(Call* node) {
// Allocate a frame slot for the receiver.
frame_->Push(Factory::undefined_value());
+
+ // Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
- // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ // Result to hold the result of the function resolution and the
+ // final result of the eval call.
+ Result result;
+
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ JumpTarget done;
+ if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ ASSERT(var->slot()->type() == Slot::LOOKUP);
+ JumpTarget slow;
+ // Prepare the stack for the call to
+ // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
+ // function, the first argument to the eval call and the
+ // receiver.
+ Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ frame_->Push(&fun);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+ frame_->PushParameterAt(-1);
+
+ // Resolve the call.
+ result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+
+ done.Jump(&result);
+ slow.Bind();
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval by
+ // pushing the loaded function, the first argument to the eval
+ // call and the receiver.
frame_->PushElementAt(arg_count + 1);
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
frame_->Push(Factory::undefined_value());
}
-
- // Push the receiver.
frame_->PushParameterAt(-1);
// Resolve the call.
- Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+
+ // If we generated fast-case code bind the jump-target where fast
+ // and slow case merge.
+ if (done.is_linked()) done.Bind(&result);
// The runtime call returns a pair of values in eax (function) and
// edx (receiver). Touch up the stack with the right values.
@@ -5912,18 +5997,31 @@ void CodeGenerator::VisitCall(Call* node) {
ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
+ // Call the function.
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
} else {
+ // Push the receiver onto the frame.
Load(property->obj());
- frame()->Dup();
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ frame_->SpillTop();
+ }
+
+ // Load the name of the function.
Load(property->key());
- Result function = EmitKeyedLoad();
- Result receiver = frame_->Pop();
- frame_->Push(&function);
- frame_->Push(&receiver);
- }
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result =
+ frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ frame_->Push(&result);
+ }
}
} else {
@@ -6020,29 +6118,67 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-// This generates code that performs a charCodeAt() call or returns
-// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It can handle flat, 8 and 16 bit characters and cons strings where the
-// answer is found in the left hand branch of the cons. The slow case will
-// flatten the string, which will ensure that the answer is in the left hand
-// side the next time around.
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateFastCharCodeAt");
+class DeferredStringCharCodeAt : public DeferredCode {
+ public:
+ DeferredStringCharCodeAt(Register object,
+ Register index,
+ Register scratch,
+ Register result)
+ : result_(result),
+ char_code_at_generator_(object,
+ index,
+ scratch,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharCodeAtGenerator* fast_case_generator() {
+ return &char_code_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_code_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ Set(result_, Immediate(Factory::undefined_value()));
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ Set(result_, Immediate(Factory::nan_value()));
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharCodeAtGenerator char_code_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charCodeAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharCodeAt");
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
Result index = frame_->Pop();
Result object = frame_->Pop();
-
- // We will mutate the index register and possibly the object register.
- // The case where they are somehow the same register is handled
- // because we only mutate them in the case where the receiver is a
- // heap object and the index is not.
object.ToRegister();
index.ToRegister();
+ // We might mutate the object register.
frame_->Spill(object.reg());
- frame_->Spill(index.reg());
// We need two extra registers.
Result result = allocator()->Allocate();
@@ -6050,33 +6186,40 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid());
- // There is no virtual frame effect from here up to the final result
- // push.
- Label slow_case;
- Label exit;
- StringHelper::GenerateFastCharCodeAt(masm_,
- object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg(),
- &slow_case,
- &slow_case,
- &slow_case,
- &slow_case);
- __ jmp(&exit);
-
- __ bind(&slow_case);
- // Move the undefined value into the result register, which will
- // trigger the slow case.
- __ Set(result.reg(), Immediate(Factory::undefined_value()));
-
- __ bind(&exit);
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(object.reg(),
+ index.reg(),
+ scratch.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->Push(&result);
}
-void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateCharFromCode");
+class DeferredStringCharFromCode : public DeferredCode {
+ public:
+ DeferredStringCharFromCode(Register code,
+ Register result)
+ : char_from_code_generator_(code, result) {}
+
+ StringCharFromCodeGenerator* fast_case_generator() {
+ return &char_from_code_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_from_code_generator_.GenerateSlow(masm(), call_helper);
+ }
+
+ private:
+ StringCharFromCodeGenerator char_from_code_generator_;
+};
+
+
+// Generates code for creating a one-char string from a char code.
+void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharFromCode");
ASSERT(args->length() == 1);
Load(args->at(0));
@@ -6085,16 +6228,97 @@ void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
code.ToRegister();
ASSERT(code.is_valid());
- // StringHelper::GenerateCharFromCode may do a runtime call.
- frame_->SpillAll();
-
Result result = allocator()->Allocate();
ASSERT(result.is_valid());
- StringHelper::GenerateCharFromCode(masm_,
- code.reg(),
- result.reg(),
- CALL_FUNCTION);
+ DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
+ code.reg(), result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->Push(&result);
+}
+
+
+class DeferredStringCharAt : public DeferredCode {
+ public:
+ DeferredStringCharAt(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result)
+ : result_(result),
+ char_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharAtGenerator* fast_case_generator() {
+ return &char_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Set(result_, Immediate(Smi::FromInt(0)));
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ Set(result_, Immediate(Factory::empty_string()));
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharAtGenerator char_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharAt");
+ ASSERT(args->length() == 2);
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Result index = frame_->Pop();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ index.ToRegister();
+ // We might mutate the object register.
+ frame_->Spill(object.reg());
+
+ // We need three extra registers.
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ Result scratch1 = allocator()->Allocate();
+ ASSERT(scratch1.is_valid());
+ Result scratch2 = allocator()->Allocate();
+ ASSERT(scratch2.is_valid());
+
+ DeferredStringCharAt* deferred =
+ new DeferredStringCharAt(object.reg(),
+ index.reg(),
+ scratch1.reg(),
+ scratch2.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->Push(&result);
}
@@ -6154,14 +6378,15 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
ASSERT(map.is_valid());
__ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
- __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
destination()->false_target()->Branch(not_zero);
- __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ // Do a range test for JSObject type. We can't use
+ // MacroAssembler::IsInstanceJSObjectType, because we are using a
+ // ControlDestination, so we copy its implementation here.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
- __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(below);
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
+ __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
obj.Unuse();
map.Unuse();
destination()->Split(below_equal);
@@ -6197,9 +6422,8 @@ void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(temp.is_valid());
__ mov(temp.reg(),
FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzx_b(temp.reg(),
- FieldOperand(temp.reg(), Map::kBitFieldOffset));
- __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
obj.Unuse();
temp.Unuse();
destination()->Split(not_zero);
@@ -6273,20 +6497,16 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- { Result tmp = allocator()->Allocate();
- __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
- __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
- null.Branch(below);
+ __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
+ null.Branch(below);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(tmp.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
- }
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
+ function.Branch(equal);
// Check if the constructor in the map is a function.
{ Result tmp = allocator()->Allocate();
@@ -6600,9 +6820,9 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ mov(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
// Set length.
- __ SmiUntag(ecx);
__ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
// Fill contents of fixed-array with the-hole.
+ __ SmiUntag(ecx);
__ mov(edx, Immediate(Factory::the_hole_value()));
__ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
// Fill fixed array elements with hole.
@@ -6706,7 +6926,6 @@ void DeferredSearchCache::Generate() {
// Check if we could add new entry to cache.
__ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ SmiTag(ebx);
__ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
__ j(greater, &add_new_entry);
@@ -6868,8 +7087,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
// has no indexed interceptor.
__ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
deferred->Branch(below);
- __ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
- __ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
+ KeyedLoadIC::kSlowCaseBitFieldMask);
deferred->Branch(not_zero);
// Check the object's elements are in fast case.
@@ -6904,12 +7123,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
// (or them and test against Smi mask.)
__ mov(tmp2.reg(), tmp1.reg());
- RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
- __ CallStub(&recordWrite1);
-
- RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
- __ CallStub(&recordWrite2);
-
+ __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
+ __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
__ bind(&done);
deferred->BindExit();
@@ -8127,10 +8342,10 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
- __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
destination()->false_target()->Branch(not_zero);
- __ CmpObjectType(answer.reg(), FIRST_NONSTRING_TYPE, temp.reg());
+ __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
temp.Unuse();
answer.Unuse();
destination()->Split(below);
@@ -8152,9 +8367,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
// It can be an undetectable object.
frame_->Spill(answer.reg());
__ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ movzx_b(answer.reg(),
- FieldOperand(answer.reg(), Map::kBitFieldOffset));
- __ test(answer.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
answer.Unuse();
destination()->Split(not_zero);
@@ -8181,14 +8395,15 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->false_target()->Branch(equal);
// It can be an undetectable object.
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
- __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
destination()->false_target()->Branch(not_zero);
- __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ // Do a range test for JSObject type. We can't use
+ // MacroAssembler::IsInstanceJSObjectType, because we are using a
+ // ControlDestination, so we copy its implementation here.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
- __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(below);
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
+ __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
answer.Unuse();
map.Unuse();
destination()->Split(below_equal);
@@ -8608,13 +8823,11 @@ Result CodeGenerator::EmitKeyedLoad() {
key.ToRegister();
receiver.ToRegister();
- // Use a fresh temporary for the index and later the loaded
- // value.
- result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
+ // If key and receiver are shared registers on the frame, their values will
+ // be automatically saved and restored when going to deferred code.
+ // The result is in elements, which is guaranteed non-shared.
DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(result.reg(),
+ new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
key.reg());
@@ -8647,19 +8860,20 @@ Result CodeGenerator::EmitKeyedLoad() {
Immediate(Factory::fixed_array_map()));
deferred->Branch(not_equal);
- // Shift the key to get the actual index value and check that
- // it is within bounds. Use unsigned comparison to handle negative keys.
- __ mov(result.reg(), key.reg());
- __ SmiUntag(result.reg());
- __ cmp(result.reg(),
+ // Check that the key is within bounds.
+ __ cmp(key.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
- __ mov(result.reg(), Operand(elements.reg(),
- result.reg(),
- times_4,
- FixedArray::kHeaderSize - kHeapObjectTag));
- elements.Unuse();
+ // Load and check that the result is not the hole.
+ // Key holds a smi.
+ ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+ __ mov(elements.reg(),
+ FieldOperand(elements.reg(),
+ key.reg(),
+ times_2,
+ FixedArray::kHeaderSize));
+ result = elements;
__ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
@@ -8744,7 +8958,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
// Check whether it is possible to omit the write barrier. If the elements
// array is in new space or the value written is a smi we can safely update
- // the elements array without updating the remembered set.
+ // the elements array without write barrier.
Label in_new_space;
__ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
if (!value_is_constant) {
@@ -8784,40 +8998,6 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
#define __ ACCESS_MASM(masm)
-static void CheckTwoForSminess(MacroAssembler* masm,
- Register left, Register right, Register scratch,
- TypeInfo left_info, TypeInfo right_info,
- DeferredInlineBinaryOperation* deferred) {
- if (left.is(right)) {
- if (!left_info.IsSmi()) {
- __ test(left, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- __ mov(scratch, left);
- __ or_(scratch, Operand(right));
- __ test(scratch, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- __ test(left, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
- if (!right_info.IsSmi()) {
- __ test(right, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
- }
- }
-}
-
-
Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED);
Property* property = expression_->AsProperty();
@@ -9014,7 +9194,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
- __ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
+ __ mov(FieldOperand(eax, Context::kLengthOffset),
+ Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
__ xor_(ebx, Operand(ebx)); // Set to NULL.
@@ -9115,20 +9296,19 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
// Undetectable => false.
- __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
- __ and_(ebx, 1 << Map::kIsUndetectable);
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
__ j(not_zero, &false_result);
// JavaScript object => true.
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
__ j(above_equal, &true_result);
// String value => false iff empty.
- __ cmp(ecx, FIRST_NONSTRING_TYPE);
+ __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string);
- __ mov(edx, FieldOperand(eax, String::kLengthOffset));
ASSERT(kSmiTag == 0);
- __ test(edx, Operand(edx));
+ __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -10110,13 +10290,14 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ mov(ecx, ebx);
__ xor_(ecx, Operand(edx));
__ mov(eax, ecx);
- __ sar(eax, 16);
+ __ shr(eax, 16);
__ xor_(ecx, Operand(eax));
__ mov(eax, ecx);
- __ sar(eax, 8);
+ __ shr(eax, 8);
__ xor_(ecx, Operand(eax));
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
__ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
+
// ST[0] == double value.
// ebx = low 32 bits of double value.
// edx = high 32 bits of double value.
@@ -10977,9 +11158,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ test(ecx, Operand(ecx));
__ j(zero, &done);
- // Get the parameters pointer from the stack and untag the length.
+ // Get the parameters pointer from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize));
- __ SmiUntag(ecx);
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -10988,6 +11168,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+ // Untag the length for the loop below.
+ __ SmiUntag(ecx);
// Copy the fixed array slots.
Label loop;
@@ -11116,6 +11298,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the
// additional information.
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ SmiUntag(eax);
__ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
__ cmp(edx, Operand(eax));
__ j(greater, &runtime);
@@ -11359,7 +11542,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, 1); // Divide length by two (length is not a smi).
+ __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
__ sub(Operand(mask), Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
@@ -11450,12 +11633,6 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- masm->RecordWriteHelper(object_, addr_, scratch_);
- masm->ret(0);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -11588,13 +11765,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// There is no test for undetectability in strict equality.
// Get the type of the first operand.
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-
// If the first object is a JS object, we have done pointer comparison.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
Label first_non_object;
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &first_non_object);
// Return non-zero (eax is not zero)
@@ -11605,17 +11779,14 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
- __ cmp(ecx, ODDBALL_TYPE);
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
- __ cmp(ecx, ODDBALL_TYPE);
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
// Fall through to the general case.
@@ -12257,12 +12428,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(zero, &slow, not_taken);
// Check that the left hand is a JS object.
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map
- __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow, not_taken);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow, not_taken);
+ __ IsObjectJSObjectType(eax, eax, edx, &slow);
// Get the prototype of the function.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
@@ -12287,12 +12453,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Check that the function prototype is a JS object.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow, not_taken);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow, not_taken);
+ __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
// Register mapping:
// eax is object map.
@@ -12390,152 +12551,205 @@ const char* CompareStub::GetName() {
}
-void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case) {
- Label not_a_flat_string;
- Label try_again_with_new_string;
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
Label ascii_string;
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
ASSERT(kSmiTag == 0);
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, receiver_not_string);
+ __ test(object_, Immediate(kSmiTagMask));
+ __ j(zero, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
- __ test(result, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string);
+ __ test(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
// If the index is non-smi trigger the non-smi case.
ASSERT(kSmiTag == 0);
- __ test(index, Immediate(kSmiTagMask));
- __ j(not_zero, index_not_smi);
+ __ test(index_, Immediate(kSmiTagMask));
+ __ j(not_zero, &index_not_smi_);
- // Check for index out of range.
- __ cmp(index, FieldOperand(object, String::kLengthOffset));
- __ j(above_equal, index_out_of_range);
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
- __ bind(&try_again_with_new_string);
- // ----------- S t a t e -------------
- // -- object : string to access
- // -- result : instance type of the string
- // -- scratch : non-negative index < length
- // -----------------------------------
+ // Check for index out of range.
+ __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
// We need special handling for non-flat strings.
ASSERT(kSeqStringTag == 0);
- __ test(result, Immediate(kStringRepresentationMask));
- __ j(not_zero, &not_a_flat_string);
-
- // Check for 1-byte or 2-byte string.
- ASSERT(kAsciiStringTag != 0);
- __ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1); // index is smi (powered by 2).
- __ movzx_w(result, FieldOperand(object,
- index, times_1,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
+ __ test(result_, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
// Handle non-flat strings.
- __ bind(&not_a_flat_string);
- __ and_(result, kStringRepresentationMask);
- __ cmp(result, kConsStringTag);
- __ j(not_equal, slow_case);
+ __ test(result_, Immediate(kIsConsStringMask));
+ __ j(zero, &call_runtime_);
// ConsString.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
- __ mov(result, FieldOperand(object, ConsString::kSecondOffset));
- __ cmp(Operand(result), Factory::empty_string());
- __ j(not_equal, slow_case);
+ __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
+ Immediate(Factory::empty_string()));
+ __ j(not_equal, &call_runtime_);
// Get the first of the two strings and load its instance type.
- __ mov(object, FieldOperand(object, ConsString::kFirstOffset));
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
- __ jmp(&try_again_with_new_string);
+ __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ ASSERT(kSeqStringTag == 0);
+ __ test(result_, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &call_runtime_);
- // ASCII string.
- __ bind(&ascii_string);
- // Put untagged index into scratch register.
- __ mov(scratch, index);
- __ SmiUntag(scratch);
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ ASSERT(kAsciiStringTag != 0);
+ __ test(result_, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ movzx_w(result_, FieldOperand(object_,
+ scratch_, times_1, // Scratch is smi-tagged.
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+ // ASCII string.
// Load the byte into the result register.
- __ movzx_b(result, FieldOperand(object,
- scratch, times_1,
- SeqAsciiString::kHeaderSize));
+ __ bind(&ascii_string);
+ __ SmiUntag(scratch_);
+ __ movzx_b(result_, FieldOperand(object_,
+ scratch_, times_1,
+ SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
- __ SmiTag(result);
+ __ SmiTag(result_);
+ __ bind(&exit_);
}
-void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register result,
- InvokeFlag flag) {
- ASSERT(!code.is(result));
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
- Label slow_case;
- Label exit;
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(eax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ mov(scratch_, eax);
+ }
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ ASSERT(kSmiTag == 0);
+ __ test(scratch_, Immediate(kSmiTagMask));
+ __ j(not_zero, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ test(code,
+ __ test(code_,
Immediate(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case, not_taken);
+ __ j(not_zero, &slow_case_, not_taken);
- __ Set(result, Immediate(Factory::single_character_string_cache()));
+ __ Set(result_, Immediate(Factory::single_character_string_cache()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
ASSERT(kSmiShiftSize == 0);
// At this point code register contains smi tagged ascii char code.
- __ mov(result, FieldOperand(result,
- code, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result, Factory::undefined_value());
- __ j(equal, &slow_case, not_taken);
- __ jmp(&exit);
+ __ mov(result_, FieldOperand(result_,
+ code_, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(result_, Factory::undefined_value());
+ __ j(equal, &slow_case_, not_taken);
+ __ bind(&exit_);
+}
- __ bind(&slow_case);
- if (flag == CALL_FUNCTION) {
- __ push(code);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result.is(eax)) {
- __ mov(result, eax);
- }
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- ASSERT(result.is(eax));
- __ pop(eax); // Save return address.
- __ push(code);
- __ push(eax); // Restore return address.
- __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
- }
- __ bind(&exit);
- if (flag == JUMP_FUNCTION) {
- ASSERT(result.is(eax));
- __ ret(0);
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
}
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -12674,14 +12888,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// ebx: length of resulting flat string as a smi
// edx: second string
Label non_ascii_string_add_flat_result;
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ test(ecx, Immediate(kAsciiStringTag));
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(zero, &non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ test(ecx, Immediate(kAsciiStringTag));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(zero, &string_add_runtime);
__ bind(&make_flat_ascii_string);
@@ -12722,8 +12934,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// edx: second string
__ bind(&non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kAsciiStringTag);
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(not_zero, &string_add_runtime);
// Both strings are two byte strings. As they are short they are both
// flat.
@@ -13289,6 +13500,211 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
#undef __
+#define __ masm.
+
+MemCopyFunction CreateMemCopyFunction() {
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler masm(buffer, static_cast<int>(actual_size));
+
+ // Generated code is put into a fixed, unmovable, buffer, and not into
+ // the V8 heap. We can't, and don't, refer to any relocatable addresses
+ // (e.g. the JavaScript nan-object).
+
+ // 32-bit C declaration function calls pass arguments on stack.
+
+ // Stack layout:
+ // esp[12]: Third argument, size.
+ // esp[8]: Second argument, source pointer.
+ // esp[4]: First argument, destination pointer.
+ // esp[0]: return address
+
+ const int kDestinationOffset = 1 * kPointerSize;
+ const int kSourceOffset = 2 * kPointerSize;
+ const int kSizeOffset = 3 * kPointerSize;
+
+ int stack_offset = 0; // Update if we change the stack height.
+
+ if (FLAG_debug_code) {
+ __ cmp(Operand(esp, kSizeOffset + stack_offset),
+ Immediate(kMinComplexMemCopy));
+ Label ok;
+ __ j(greater_equal, &ok);
+ __ int3();
+ __ bind(&ok);
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope enable(SSE2);
+ __ push(edi);
+ __ push(esi);
+ stack_offset += 2 * kPointerSize;
+ Register dst = edi;
+ Register src = esi;
+ Register count = ecx;
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
+
+
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(Operand(dst, 0), xmm0);
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ neg(edx);
+ __ add(Operand(edx), Immediate(16));
+ __ add(dst, Operand(edx));
+ __ add(src, Operand(edx));
+ __ sub(Operand(count), edx);
+
+ // edi is now aligned. Check if esi is also aligned.
+ Label unaligned_source;
+ __ test(Operand(src), Immediate(0x0F));
+ __ j(not_zero, &unaligned_source);
+ {
+ __ IncrementCounter(&Counters::memcopy_aligned, 1);
+ // Copy loop for aligned source and destination.
+ __ mov(edx, count);
+ Register loop_count = ecx;
+ Register count = edx;
+ __ shr(loop_count, 5);
+ {
+ // Main copy loop.
+ Label loop;
+ __ bind(&loop);
+ __ prefetch(Operand(src, 0x20), 1);
+ __ movdqa(xmm0, Operand(src, 0x00));
+ __ movdqa(xmm1, Operand(src, 0x10));
+ __ add(Operand(src), Immediate(0x20));
+
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ __ add(Operand(dst), Immediate(0x20));
+
+ __ dec(loop_count);
+ __ j(not_zero, &loop);
+ }
+
+ // At most 31 bytes to copy.
+ Label move_less_16;
+ __ test(Operand(count), Immediate(0x10));
+ __ j(zero, &move_less_16);
+ __ movdqa(xmm0, Operand(src, 0));
+ __ add(Operand(src), Immediate(0x10));
+ __ movdqa(Operand(dst, 0), xmm0);
+ __ add(Operand(dst), Immediate(0x10));
+ __ bind(&move_less_16);
+
+ // At most 15 bytes to copy. Copy 16 bytes at end of string.
+ __ and_(count, 0xF);
+ __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
+ }
+ __ Align(16);
+ {
+ // Copy loop for unaligned source and aligned destination.
+ // If source is not aligned, we can't read it as efficiently.
+ __ bind(&unaligned_source);
+ __ IncrementCounter(&Counters::memcopy_unaligned, 1);
+ __ mov(edx, ecx);
+ Register loop_count = ecx;
+ Register count = edx;
+ __ shr(loop_count, 5);
+ {
+ // Main copy loop
+ Label loop;
+ __ bind(&loop);
+ __ prefetch(Operand(src, 0x20), 1);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ add(Operand(src), Immediate(0x20));
+
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ __ add(Operand(dst), Immediate(0x20));
+
+ __ dec(loop_count);
+ __ j(not_zero, &loop);
+ }
+
+ // At most 31 bytes to copy.
+ Label move_less_16;
+ __ test(Operand(count), Immediate(0x10));
+ __ j(zero, &move_less_16);
+ __ movdqu(xmm0, Operand(src, 0));
+ __ add(Operand(src), Immediate(0x10));
+ __ movdqa(Operand(dst, 0), xmm0);
+ __ add(Operand(dst), Immediate(0x10));
+ __ bind(&move_less_16);
+
+ // At most 15 bytes to copy. Copy 16 bytes at end of string.
+ __ and_(count, 0x0F);
+ __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
+ }
+
+ } else {
+ __ IncrementCounter(&Counters::memcopy_noxmm, 1);
+ // SSE2 not supported. Unlikely to happen in practice.
+ __ push(edi);
+ __ push(esi);
+ stack_offset += 2 * kPointerSize;
+ __ cld();
+ Register dst = edi;
+ Register src = esi;
+ Register count = ecx;
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
+
+ // Copy the first word.
+ __ mov(eax, Operand(src, 0));
+ __ mov(Operand(dst, 0), eax);
+
+ // Increment src,dstso that dst is aligned.
+ __ mov(edx, dst);
+ __ and_(edx, 0x03);
+ __ neg(edx);
+ __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
+ __ add(dst, Operand(edx));
+ __ add(src, Operand(edx));
+ __ sub(Operand(count), edx);
+ // edi is now aligned, ecx holds number of remaning bytes to copy.
+
+ __ mov(edx, count);
+ count = edx;
+ __ shr(ecx, 2); // Make word count instead of byte count.
+ __ rep_movs();
+
+ // At most 3 bytes left to copy. Copy 4 bytes at end of string.
+ __ and_(count, 3);
+ __ mov(eax, Operand(src, count, times_1, -4));
+ __ mov(Operand(dst, count, times_1, -4), eax);
+
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ // Call the function from C++.
+ return FUNCTION_CAST<MemCopyFunction>(buffer);
+}
+
+#undef __
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index a098dc38..a432c13f 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -38,8 +38,10 @@ namespace internal {
// Forward declarations
class CompilationInfo;
class DeferredCode;
+class FrameRegisterState;
class RegisterAllocator;
class RegisterFile;
+class RuntimeCallHelper;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
@@ -314,7 +316,9 @@ class CodeGenerator: public AstVisitor {
static bool ShouldGenerateLog(Expression* type);
#endif
- static void RecordPositions(MacroAssembler* masm, int pos);
+ static bool RecordPositions(MacroAssembler* masm,
+ int pos,
+ bool right_here = false);
// Accessors
MacroAssembler* masm() { return masm_; }
@@ -515,6 +519,16 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode);
+ // Emits code sequence that jumps to deferred code if the inputs
+ // are not both smis. Cannot be in MacroAssembler because it takes
+ // advantage of TypeInfo to skip unneeded checks.
+ void JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ Register scratch,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred);
+
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect.
@@ -592,6 +606,8 @@ class CodeGenerator: public AstVisitor {
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+ static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
+
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
@@ -621,10 +637,13 @@ class CodeGenerator: public AstVisitor {
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
- void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
- void GenerateCharFromCode(ZoneList<Expression*>* args);
+ void GenerateStringCharFromCode(ZoneList<Expression*>* args);
+
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -910,37 +929,6 @@ class GenericBinaryOpStub: public CodeStub {
class StringHelper : public AllStatic {
public:
- // Generates fast code for getting a char code out of a string
- // object at the given index. May bail out for four reasons (in the
- // listed order):
- // * Receiver is not a string (receiver_not_string label).
- // * Index is not a smi (index_not_smi label).
- // * Index is out of range (index_out_of_range).
- // * Some other reason (slow_case label). In this case it's
- // guaranteed that the above conditions are not violated,
- // e.g. it's safe to assume the receiver is a string and the
- // index is a non-negative smi < length.
- // When successful, object, index, and scratch are clobbered.
- // Otherwise, scratch and result are clobbered.
- static void GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case);
-
- // Generates code for creating a one-char string from the given char
- // code. May do a runtime call, so any register can be clobbered
- // and, if the given invoke flag specifies a call, an internal frame
- // is required. In tail call mode the result must be eax register.
- static void GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register result,
- InvokeFlag flag);
-
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much
@@ -1083,42 +1071,6 @@ class NumberToStringStub: public CodeStub {
};
-class RecordWriteStub : public CodeStub {
- public:
- RecordWriteStub(Register object, Register addr, Register scratch)
- : object_(object), addr_(addr), scratch_(scratch) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register object_;
- Register addr_;
- Register scratch_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
- object_.code(), addr_.code(), scratch_.code());
- }
-#endif
-
- // Minor key encoding in 12 bits. 4 bits for each of the three
- // registers (object, address and scratch) OOOOAAAASSSS.
- class ScratchBits: public BitField<uint32_t, 0, 4> {};
- class AddressBits: public BitField<uint32_t, 4, 4> {};
- class ObjectBits: public BitField<uint32_t, 8, 4> {};
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- // Encode the registers.
- return ObjectBits::encode(object_.code()) |
- AddressBits::encode(addr_.code()) |
- ScratchBits::encode(scratch_.code());
- }
-};
-
-
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 9780f3b0..9b558bd9 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -69,6 +69,27 @@ bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
}
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCodeWithCall(
+ Debug::debug_break_slot()->entry(),
+ Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -208,10 +229,31 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction.
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
+ __ nop();
+ }
+ ASSERT_EQ(Assembler::kDebugBreakSlotLength,
+ masm->SizeOfCodeGeneratedSince(&check_codesize));
+}
+
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, true);
+}
+
+
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->ret(0);
}
+
// FrameDropper is a code replacement for a JavaScript frame with possibly
// several frames above.
// There is no calling conventions here, because it never actually gets called,
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 58c22afc..dc4c27e8 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -817,6 +817,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
// Returns NULL if the instruction is not handled here.
static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
+ case 0x18: return "prefetch";
case 0xA2: return "cpuid";
case 0x31: return "rdtsc";
case 0xBE: return "movsx_b";
@@ -923,14 +924,18 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0xF6:
- { int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- if (mod == 3 && regop == eax) {
- AppendToBuffer("test_b %s,%d", NameOfCPURegister(rm), *(data+2));
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (regop == eax) {
+ AppendToBuffer("test_b ");
+ data += PrintRightOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
} else {
UnimplementedInstruction();
}
- data += 3;
}
break;
@@ -942,7 +947,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x0F:
{ byte f0byte = *(data+1);
const char* f0mnem = F0Mnem(f0byte);
- if (f0byte == 0xA2 || f0byte == 0x31) {
+ if (f0byte == 0x18) {
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* suffix[] = {"nta", "1", "2", "3"};
+ AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
+ data += PrintRightOperand(data);
+ } else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
} else if ((f0byte & 0xF0) == 0x80) {
@@ -1070,6 +1081,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x2A) {
+ // movntdqa
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
} else {
UnimplementedInstruction();
}
@@ -1122,6 +1140,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0xE7) {
+ AppendToBuffer("movntdq ");
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0xEF) {
data++;
int mod, regop, rm;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 1b78772f..c7504440 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -186,12 +186,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
{ Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the body.
__ mov(eax, Factory::undefined_value());
- EmitReturnSequence(function()->end_position());
+ EmitReturnSequence();
}
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -207,7 +207,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
- CodeGenerator::RecordPositions(masm_, position);
+ CodeGenerator::RecordPositions(masm_, function()->end_position());
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
@@ -1009,7 +1009,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Map.
__ push(edx); // Enumeration cache.
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- __ SmiTag(eax);
__ push(eax); // Enumeration cache length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
__ jmp(&loop);
@@ -1019,7 +1018,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(Immediate(Smi::FromInt(0))); // Map (0) - force slow check.
__ push(eax);
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ SmiTag(eax);
__ push(eax); // Fixed array length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
@@ -1728,6 +1726,29 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
}
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key,
+ RelocInfo::Mode mode) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ VisitForValue(key, kAccumulator);
+ __ mov(ecx, eax);
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(
+ arg_count, in_loop);
+ __ call(ic, mode);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ Apply(context_, eax);
+}
+
+
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
@@ -1817,37 +1838,31 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
- // Call to a keyed property, use keyed load IC followed by function
- // call.
+ // Call to a keyed property.
+ // For a synthetic property use keyed load IC followed by function call,
+ // for a regular property use keyed CallIC.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
if (prop->is_synthetic()) {
+ VisitForValue(prop->key(), kAccumulator);
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
__ pop(edx); // We do not need to keep the receiver.
- } else {
- __ mov(edx, Operand(esp, 0)); // Keep receiver, to call function on.
- }
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test eax,..."
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
- if (prop->is_synthetic()) {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test eax,..."
+ // instruction after the call as it is treated specially
+ // by the LoadIC code.
+ __ nop();
// Push result (function).
__ push(eax);
// Push Global receiver.
__ mov(ecx, CodeGenerator::GlobalObject());
__ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+ EmitCallWithStub(expr);
} else {
- // Pop receiver.
- __ pop(ebx);
- // Push result (function).
- __ push(eax);
- __ push(ebx);
+ EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
- EmitCallWithStub(expr);
}
} else {
// Call to some other expression. If the expression is an anonymous
@@ -1904,76 +1919,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (strcmp("_IsSmi", *name->ToCString()) == 0) {
- EmitIsSmi(expr->arguments());
- } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
- EmitIsNonNegativeSmi(expr->arguments());
- } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
- EmitIsObject(expr->arguments());
- } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
- EmitIsUndetectableObject(expr->arguments());
- } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
- EmitIsFunction(expr->arguments());
- } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
- EmitIsArray(expr->arguments());
- } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
- EmitIsRegExp(expr->arguments());
- } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
- EmitIsConstructCall(expr->arguments());
- } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
- EmitObjectEquals(expr->arguments());
- } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
- EmitArguments(expr->arguments());
- } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
- EmitArgumentsLength(expr->arguments());
- } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
- EmitClassOf(expr->arguments());
- } else if (strcmp("_Log", *name->ToCString()) == 0) {
- EmitLog(expr->arguments());
- } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
- EmitRandomHeapNumber(expr->arguments());
- } else if (strcmp("_SubString", *name->ToCString()) == 0) {
- EmitSubString(expr->arguments());
- } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
- EmitRegExpExec(expr->arguments());
- } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
- EmitValueOf(expr->arguments());
- } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
- EmitSetValueOf(expr->arguments());
- } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
- EmitNumberToString(expr->arguments());
- } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
- EmitCharFromCode(expr->arguments());
- } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
- EmitFastCharCodeAt(expr->arguments());
- } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
- EmitStringAdd(expr->arguments());
- } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
- EmitStringCompare(expr->arguments());
- } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
- EmitMathPow(expr->arguments());
- } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
- EmitMathSin(expr->arguments());
- } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
- EmitMathCos(expr->arguments());
- } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
- EmitMathSqrt(expr->arguments());
- } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
- EmitCallFunction(expr->arguments());
- } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
- EmitRegExpConstructResult(expr->arguments());
- } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
- EmitSwapElements(expr->arguments());
- } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
- EmitGetFromCache(expr->arguments());
- } else {
- UNREACHABLE();
- }
-}
-
-
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2432,50 +2377,120 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
- Label slow_case, done;
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ test(eax,
- Immediate(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case);
- __ Set(ebx, Immediate(Factory::single_character_string_cache()));
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiTagSize == 1);
- ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ascii char code.
- __ mov(ebx, FieldOperand(ebx,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(ebx, Factory::undefined_value());
- __ j(equal, &slow_case);
- __ mov(eax, ebx);
+ Label done;
+ StringCharFromCodeGenerator generator(eax, ebx);
+ generator.GenerateFast(masm_);
__ jmp(&done);
- __ bind(&slow_case);
- __ push(eax);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, eax);
+ Apply(context_, ebx);
}
-void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
- // TODO(fsc): Port the complete implementation from the classic back-end.
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = ebx;
+ Register index = eax;
+ Register scratch = ecx;
+ Register result = edx;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ Set(result, Immediate(Factory::nan_value()));
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
// Move the undefined value into the result register, which will
- // trigger the slow case.
- __ Set(eax, Immediate(Factory::undefined_value()));
- Apply(context_, eax);
+ // trigger conversion.
+ __ Set(result, Immediate(Factory::undefined_value()));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ Apply(context_, result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = ebx;
+ Register index = eax;
+ Register scratch1 = ecx;
+ Register scratch2 = edx;
+ Register result = eax;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ Set(result, Immediate(Factory::empty_string()));
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Set(result, Immediate(Smi::FromInt(0)));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ Apply(context_, result);
}
+
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 644d2007..f339d2e1 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -57,6 +57,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Register r0,
Register r1,
Register r2,
+ Register result,
DictionaryCheck check_dictionary) {
// Register use:
//
@@ -66,9 +67,10 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// r0 - used to hold the property dictionary.
//
// r1 - used for the index into the property dictionary
- // - holds the result on exit.
//
// r2 - used to hold the capacity of the property dictionary.
+ //
+ // result - holds the result on exit.
Label done;
@@ -149,7 +151,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(r1, Operand(r0, r1, times_4, kValueOffset - kHeapObjectTag));
+ __ mov(result, Operand(r0, r1, times_4, kValueOffset - kHeapObjectTag));
}
@@ -159,14 +161,13 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Register key,
Register r0,
Register r1,
- Register r2) {
+ Register r2,
+ Register result) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
//
- // key - holds the smi key on entry and is unchanged if a branch is
- // performed to the miss label. If the load succeeds and we
- // fall through, key holds the result on exit.
+ // key - holds the smi key on entry and is unchanged.
//
// Scratch registers:
//
@@ -175,6 +176,9 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
+ //
+ // result - holds the result on exit if the load succeeds and we fall through.
+
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
@@ -246,7 +250,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
- __ mov(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+ __ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -298,63 +302,168 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, check_string, index_int, index_string;
- Label check_pixel_array, probe_dictionary;
- Label check_number_dictionary;
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register r0,
+ Label* slow) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // Scratch registers:
+ // r0 - used to hold the map of the receiver.
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, slow, not_taken);
// Get the map of the receiver.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
- __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(kSlowCaseBitFieldMask));
- __ j(not_zero, &slow, not_taken);
+ __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
+ KeyedLoadIC::kSlowCaseBitFieldMask);
+ __ j(not_zero, slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing
- // into string objects work as intended.
+ // into string objects works as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
- __ j(below, &slow, not_taken);
- // Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string, not_taken);
- __ mov(ebx, eax);
- __ SmiUntag(ebx);
- // Get the elements array of the object.
- __ bind(&index_int);
- __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ CmpInstanceType(r0, JS_OBJECT_TYPE);
+ __ j(below, slow, not_taken);
+}
+
+
+// Loads an indexed element from a fast case array.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register scratch,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key and is unchanged (must be a smi).
+ // Scratch registers:
+ // scratch - used to hold elements of the receiver and the loaded value.
+ // result - holds the result on exit if the load succeeds and
+ // we fall through.
+
+ __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
+ __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true);
// Check that the key (index) is within bounds.
- __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
+ __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
+ __ j(above_equal, out_of_range);
// Fast case: Do the load.
- __ mov(ecx, FieldOperand(ecx, ebx, times_4, FixedArray::kHeaderSize));
- __ cmp(Operand(ecx), Immediate(Factory::the_hole_value()));
+ ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+ __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
+ __ cmp(Operand(scratch), Immediate(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
- __ j(equal, &slow);
- __ mov(eax, ecx);
+ __ j(equal, out_of_range);
+ if (!result.is(scratch)) {
+ __ mov(result, scratch);
+ }
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if a key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_symbol) {
+ // Register use:
+ // key - holds the key and is unchanged. Assumed to be non-smi.
+ // Scratch registers:
+ // map - used to hold the map of the key.
+ // hash - used to hold the hash of the key.
+ __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
+ __ j(above_equal, not_symbol);
+
+ // Is the string an array index, with cached numeric value?
+ __ mov(hash, FieldOperand(key, String::kHashFieldOffset));
+ __ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
+ __ j(zero, index_string, not_taken);
+
+ // Is the string a symbol?
+ ASSERT(kSymbolTag != 0);
+ __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsSymbolMask);
+ __ j(zero, not_symbol, not_taken);
+}
+
+
+// Picks out an array index from the hash field.
+// The generated code never falls through.
+static void GenerateIndexFromHash(MacroAssembler* masm,
+ Register key,
+ Register hash,
+ Label* index_smi) {
+ // Register use:
+ // key - holds the overwritten key on exit.
+ // hash - holds the key's hash. Clobbered.
+
+ // The assert checks that the constants for the maximum number of digits
+ // for an array index cached in the hash field and the number of bits
+ // reserved for it does not conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ // key: string key
+ // ebx: hash field.
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ and_(hash, String::kArrayIndexValueMask);
+ __ shr(hash, String::kHashShift - kSmiTagSize);
+ // Here we actually clobber the key which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
+ __ mov(key, hash);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(index_smi);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, check_string, index_smi, index_string;
+ Label check_pixel_array, probe_dictionary, check_number_dictionary;
+
+ GenerateKeyedLoadReceiverCheck(masm, edx, ecx, &slow);
+
+ // Check that the key is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &check_string, not_taken);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from
+ // where a numeric string is converted to a smi.
+
+ GenerateFastArrayLoad(masm,
+ edx,
+ eax,
+ ecx,
+ eax,
+ &check_pixel_array,
+ &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
__ bind(&check_pixel_array);
// Check whether the elements is a pixel array.
// edx: receiver
- // ebx: untagged index
// eax: key
// ecx: elements
+ __ mov(ebx, eax);
+ __ SmiUntag(ebx);
__ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
@@ -380,7 +489,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
eax,
ebx,
edx,
- edi);
+ edi,
+ eax);
// Pop receiver before returning.
__ pop(edx);
__ ret(0);
@@ -397,22 +507,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
- // The key is not a smi.
- // Is it a string?
- // edx: receiver
- // eax: key
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &slow);
- // Is the string an array index, with cached numeric value?
- __ mov(ebx, FieldOperand(eax, String::kHashFieldOffset));
- __ test(ebx, Immediate(String::kIsArrayIndexMask));
- __ j(not_zero, &index_string, not_taken);
-
- // Is the string a symbol?
- __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- ASSERT(kSymbolTag != 0);
- __ test(ebx, Immediate(kIsSymbolMask));
- __ j(zero, &slow, not_taken);
+ GenerateKeyStringCheck(masm, eax, ecx, ebx, &index_string, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
@@ -453,14 +548,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ cmp(edi, Operand(ecx));
+ __ sub(edi, Operand(ecx));
__ j(above_equal, &slow);
// Load in-object property.
- __ sub(edi, Operand(ecx));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
+ __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
@@ -473,21 +568,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ebx,
ecx,
edi,
+ eax,
DICTIONARY_CHECK_DONE);
- __ mov(eax, ecx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- __ and_(ebx, String::kArrayIndexHashMask);
- __ shr(ebx, String::kHashShift);
- __ jmp(&index_int);
+ GenerateIndexFromHash(masm, eax, ebx, &index_smi);
}
@@ -498,60 +585,29 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- esp[0] : return address
// -----------------------------------
Label miss;
- Label index_not_smi;
Label index_out_of_range;
- Label slow_char_code;
- Label got_char_code;
Register receiver = edx;
Register index = eax;
- Register code = ebx;
- Register scratch = ecx;
-
- StringHelper::GenerateFastCharCodeAt(masm,
- receiver,
- index,
- scratch,
- code,
- &miss, // When not a string.
- &index_not_smi,
- &index_out_of_range,
- &slow_char_code);
- // If we didn't bail out, code register contains smi tagged char
- // code.
- __ bind(&got_char_code);
- StringHelper::GenerateCharFromCode(masm, code, eax, JUMP_FUNCTION);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from char from code tail call");
-#endif
-
- // Check if key is a heap number.
- __ bind(&index_not_smi);
- __ CheckMap(index, Factory::heap_number_map(), &miss, true);
-
- // Push receiver and key on the stack (now that we know they are a
- // string and a number), and call runtime.
- __ bind(&slow_char_code);
- __ EnterInternalFrame();
- __ push(receiver);
- __ push(index);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- ASSERT(!code.is(eax));
- __ mov(code, eax);
- __ LeaveInternalFrame();
+ Register scratch1 = ebx;
+ Register scratch2 = ecx;
+ Register result = eax;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ ret(0);
+
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
- // Check if the runtime call returned NaN char code. If yes, return
- // undefined. Otherwise, we can continue.
- if (FLAG_debug_code) {
- ASSERT(kSmiTag == 0);
- __ test(code, Immediate(kSmiTagMask));
- __ j(zero, &got_char_code);
- __ mov(scratch, FieldOperand(code, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
- __ Assert(equal, "StringCharCodeAt must return smi or heap number");
- }
- __ cmp(code, Factory::nan_value());
- __ j(not_equal, &got_char_code);
__ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::undefined_value()));
__ ret(0);
@@ -583,8 +639,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
- __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow, not_taken);
__ CmpInstanceType(ecx, JS_OBJECT_TYPE);
@@ -708,7 +764,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ fincstp();
// Fall through to slow case.
- // Slow case: Load key and receiver from stack and jump to runtime.
+ // Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
@@ -773,8 +829,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow, not_taken);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
@@ -792,9 +848,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
- __ mov(ebx, Operand(ecx));
- __ SmiUntag(ebx);
- __ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(below, &fast, taken);
// Slow case: call runtime.
@@ -804,7 +858,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
__ bind(&check_pixel_array);
// eax: value
- // ecx: key
+ // ecx: key (a smi)
// edx: receiver
// edi: elements array
__ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
@@ -840,13 +894,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// edi: receiver->elements, a FixedArray
// flags: compare (ecx, edx.length())
__ j(not_equal, &slow, not_taken); // do not leave holes in the array
- __ mov(ebx, ecx);
- __ SmiUntag(ebx); // untag
- __ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(above_equal, &slow, not_taken);
// Add 1 to receiver->length, and go to fast array write.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(1 << kSmiTagSize));
+ Immediate(Smi::FromInt(1)));
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
@@ -895,8 +947,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
@@ -1069,22 +1121,21 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Label* miss) {
// ----------- S t a t e -------------
// -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
+ // -- edx : receiver
// -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ Label number, non_number, non_string, boolean, probe;
// Probe the stub cache.
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
// If the stub cache probing failed, the receiver might be a value.
@@ -1104,7 +1155,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Check for string.
__ bind(&non_number);
- __ cmp(ebx, FIRST_NONSTRING_TYPE);
+ __ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &non_string, taken);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::STRING_FUNCTION_INDEX, edx);
@@ -1115,7 +1166,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ cmp(edx, Factory::true_value());
__ j(equal, &boolean, not_taken);
__ cmp(edx, Factory::false_value());
- __ j(not_equal, &miss, taken);
+ __ j(not_equal, miss, taken);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
@@ -1123,10 +1174,6 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache for the value object.
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm, argc);
}
@@ -1145,7 +1192,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// Search dictionary - put result in register edi.
__ mov(edi, edx);
- GenerateDictionaryLoad(masm, miss, edx, ecx, eax, edi, ebx, CHECK_DICTIONARY);
+ GenerateDictionaryLoad(
+ masm, miss, edx, ecx, eax, edi, ebx, edi, CHECK_DICTIONARY);
// Check that the result is not a smi.
__ test(edi, Immediate(kSmiTagMask));
@@ -1166,8 +1214,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ InvokeFunction(edi, actual, JUMP_FUNCTION);
}
-
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+// The generated code never falls through.
+static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1175,20 +1223,20 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- Label miss, global_object, non_global_object;
+ Label global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, miss, not_taken);
// Check that the receiver is a valid JS object.
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(eax, FIRST_JS_OBJECT_TYPE);
- __ j(below, &miss, not_taken);
+ __ j(below, miss, not_taken);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
@@ -1202,10 +1250,10 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Accessing global object: Load and invoke.
__ bind(&global_object);
// Check that the global object does not require access checks.
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_equal, &miss, not_taken);
- GenerateNormalHelper(masm, argc, true, &miss);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_equal, miss, not_taken);
+ GenerateNormalHelper(masm, argc, true, miss);
// Accessing non-global object: Check for access to global proxy.
Label global_proxy, invoke;
@@ -1214,24 +1262,20 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ j(equal, &global_proxy, not_taken);
// Check that the non-global, non-global-proxy object does not
// require access checks.
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_equal, &miss, not_taken);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_equal, miss, not_taken);
__ bind(&invoke);
- GenerateNormalHelper(masm, argc, false, &miss);
+ GenerateNormalHelper(masm, argc, false, miss);
// Global object proxy access: Check access rights.
__ bind(&global_proxy);
- __ CheckAccessGlobalProxy(edx, eax, &miss);
+ __ CheckAccessGlobalProxy(edx, eax, miss);
__ jmp(&invoke);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm, argc);
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1253,7 +1297,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
CEntryStub stub(1);
__ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(kCallIC_Miss))));
+ __ mov(ebx, Immediate(ExternalReference(IC_Utility(id))));
__ CallStub(&stub);
// Move result to edi and exit the internal frame.
@@ -1284,6 +1328,182 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ Label miss;
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, &miss);
+ __ bind(&miss);
+ GenerateMiss(masm, argc);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ Label miss;
+ GenerateCallNormal(masm, argc, &miss);
+ __ bind(&miss);
+ GenerateMiss(masm, argc);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+}
+
+
+void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ Label do_call, slow_call, slow_load, slow_reload_receiver;
+ Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+ Label index_smi, index_string;
+
+ // Check that the key is a smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &check_string, not_taken);
+
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(masm, edx, eax, &slow_call);
+
+ GenerateFastArrayLoad(masm,
+ edx,
+ ecx,
+ eax,
+ edi,
+ &check_number_dictionary,
+ &slow_load);
+ __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
+
+ __ bind(&do_call);
+ // receiver in edx is not used after this point.
+ // ecx: key
+ // edi: function
+
+ // Check that the value in edi is a JavaScript function.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &slow_call, not_taken);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
+ __ j(not_equal, &slow_call, not_taken);
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+ __ bind(&check_number_dictionary);
+ // eax: elements
+ // ecx: smi key
+ // Check whether the elements is a number dictionary.
+ __ CheckMap(eax, Factory::hash_table_map(), &slow_load, true);
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ // ebx: untagged index
+ // Receiver in edx will be clobbered, need to reload it on miss.
+ GenerateNumberDictionaryLoad(masm,
+ &slow_reload_receiver,
+ eax,
+ ecx,
+ ebx,
+ edx,
+ edi,
+ edi);
+ __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
+ __ jmp(&do_call);
+
+ __ bind(&slow_reload_receiver);
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ __ bind(&slow_load);
+ // This branch is taken when calling KeyedCallIC_Miss is neither required
+ // nor beneficial.
+ __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1);
+ __ EnterInternalFrame();
+ __ push(ecx); // save the key
+ __ push(edx); // pass the receiver
+ __ push(ecx); // pass the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(ecx); // restore the key
+ __ LeaveInternalFrame();
+ __ mov(edi, eax);
+ __ jmp(&do_call);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow_call);
+
+ // The key is known to be a symbol.
+ // If the receiver is a regular JS object with slow properties then do
+ // a quick inline probe of the receiver's dictionary.
+ // Otherwise do the monomorphic cache probe.
+ GenerateKeyedLoadReceiverCheck(masm, edx, eax, &lookup_monomorphic_cache);
+
+ __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(not_equal, &lookup_monomorphic_cache, not_taken);
+
+ GenerateDictionaryLoad(masm,
+ &slow_load,
+ edx,
+ ecx,
+ ebx,
+ eax,
+ edi,
+ edi,
+ DICTIONARY_CHECK_DONE);
+ __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
+ __ jmp(&do_call);
+
+ __ bind(&lookup_monomorphic_cache);
+ __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC, &slow_call);
+ // Fall through on miss.
+
+ __ bind(&slow_call);
+ // This branch is taken if:
+ // - the receiver requires boxing or access check,
+ // - the key is neither smi nor symbol,
+ // - the value loaded is not a function,
+ // - there is hope that the runtime will create a monomorphic call stub
+ // that will get fetched next time.
+ __ IncrementCounter(&Counters::keyed_call_generic_slow, 1);
+ GenerateMiss(masm, argc);
+
+ __ bind(&index_string);
+ GenerateIndexFromHash(masm, ecx, ebx, &index_smi);
+}
+
+
+void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ Label miss;
+ GenerateCallNormal(masm, argc, &miss);
+ __ bind(&miss);
+ GenerateMiss(masm, argc);
+}
+
+
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+}
+
+
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
@@ -1331,8 +1551,8 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ j(equal, &global, not_taken);
// Check for non-global object that requires access check.
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &miss, not_taken);
// Search the dictionary placing the result in eax.
@@ -1344,6 +1564,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
edx,
edi,
ebx,
+ edi,
CHECK_DICTIONARY);
__ mov(eax, edi);
__ ret(0);
@@ -1353,7 +1574,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ CheckAccessGlobalProxy(eax, edx, &miss);
__ jmp(&probe);
- // Cache miss: Restore receiver from stack and jump to runtime.
+ // Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
}
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index ba2fe2dd..b83f9bc7 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -60,49 +60,17 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
- Label fast;
-
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
and_(object, ~Page::kPageAlignmentMask);
- Register page_start = object;
-
- // Compute the bit addr in the remembered set/index of the pointer in the
- // page. Reuse 'addr' as pointer_offset.
- sub(addr, Operand(page_start));
- shr(addr, kObjectAlignmentBits);
- Register pointer_offset = addr;
-
- // If the bit offset lies beyond the normal remembered set range, it is in
- // the extra remembered set area of a large object.
- cmp(pointer_offset, Page::kPageSize / kPointerSize);
- j(less, &fast);
-
- // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
- // extra remembered set after the large object.
-
- // Find the length of the large object (FixedArray).
- mov(scratch, Operand(page_start, Page::kObjectStartOffset
- + FixedArray::kLengthOffset));
- Register array_length = scratch;
-
- // Extra remembered set starts right after the large object (a FixedArray), at
- // page_start + kObjectStartOffset + objectSize
- // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
- // Add the delta between the end of the normal RSet and the start of the
- // extra RSet to 'page_start', so that addressing the bit using
- // 'pointer_offset' hits the extra RSet words.
- lea(page_start,
- Operand(page_start, array_length, times_pointer_size,
- Page::kObjectStartOffset + FixedArray::kHeaderSize
- - Page::kRSetEndOffset));
-
- // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
- // to limit code size. We should probably evaluate this decision by
- // measuring the performance of an equivalent implementation using
- // "simpler" instructions
- bind(&fast);
- bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
+
+ // Compute number of region covering addr. See Page::GetRegionNumberForAddress
+ // method for more details.
+ and_(addr, Page::kPageAlignmentMask);
+ shr(addr, Page::kRegionSizeLog2);
+
+ // Set dirty mark for region.
+ bts(Operand(object, Page::kDirtyFlagOffset), addr);
}
@@ -130,7 +98,7 @@ void MacroAssembler::InNewSpace(Register object,
}
-// Set the remembered set bit for [object+offset].
+// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
@@ -142,9 +110,8 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// registers are esi.
ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi));
- // First, check if a remembered set write is even needed. The tests below
- // catch stores of Smis and stores into young gen (which does not have space
- // for the remembered set bits).
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
Label done;
// Skip barrier if writing a smi.
@@ -160,47 +127,19 @@ void MacroAssembler::RecordWrite(Register object, int offset,
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
- // We use optimized write barrier code if the word being written to is not in
- // a large object chunk or is in the first page of a large object chunk.
- // We make sure that an offset is inside the right limits whether it is
- // tagged or untagged.
- if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
- // Compute the bit offset in the remembered set, leave it in 'value'.
- lea(value, Operand(object, offset));
- and_(value, Page::kPageAlignmentMask);
- shr(value, kPointerSizeLog2);
-
- // Compute the page address from the heap object pointer, leave it in
- // 'object'.
- and_(object, ~Page::kPageAlignmentMask);
-
- // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
- // to limit code size. We should probably evaluate this decision by
- // measuring the performance of an equivalent implementation using
- // "simpler" instructions
- bts(Operand(object, Page::kRSetOffset), value);
+ Register dst = scratch;
+ if (offset != 0) {
+ lea(dst, Operand(object, offset));
} else {
- Register dst = scratch;
- if (offset != 0) {
- lea(dst, Operand(object, offset));
- } else {
- // array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
- // into an array of words.
- ASSERT_EQ(1, kSmiTagSize);
- ASSERT_EQ(0, kSmiTag);
- lea(dst, Operand(object, dst, times_half_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- }
- // If we are already generating a shared stub, not inlining the
- // record write code isn't going to save us any memory.
- if (generating_stub()) {
- RecordWriteHelper(object, dst, value);
- } else {
- RecordWriteStub stub(object, dst, value);
- CallStub(&stub);
- }
+ // Array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
+ // into an array of words.
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+ lea(dst, Operand(object, dst, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
}
+ RecordWriteHelper(object, dst, value);
bind(&done);
@@ -357,6 +296,25 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
}
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
+ sub(Operand(scratch), Immediate(FIRST_JS_OBJECT_TYPE));
+ cmp(scratch, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
+ j(above, fail);
+}
+
+
void MacroAssembler::FCmp() {
if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
@@ -1384,6 +1342,7 @@ void MacroAssembler::InvokeFunction(Register fun,
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ SmiUntag(ebx);
mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
lea(edx, FieldOperand(edx, Code::kHeaderSize));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 9c8dfb28..2018721d 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -59,8 +59,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
- // Set the remebered set bit for an address which points into an
- // object. RecordWriteHelper only works if the object is not in new
+ // For page containing |object| mark region covering |addr| dirty.
+ // RecordWriteHelper only works if the object is not in new
// space.
void RecordWriteHelper(Register object,
Register addr,
@@ -73,7 +73,7 @@ class MacroAssembler: public Assembler {
Condition cc, // equal for new space, not_equal otherwise.
Label* branch);
- // Set the remembered set bit for [object+offset].
+ // For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
@@ -188,6 +188,18 @@ class MacroAssembler: public Assembler {
Register map,
Register instance_type);
+ // Check if a heap object's type is in the JSObject range, not including
+ // JSFunction. The object's map will be loaded in the map register.
+ // Any or all of the three registers may be the same.
+ // The contents of the scratch register will always be overwritten.
+ void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ // The contents of the scratch register will be overwritten.
+ void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
+
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index eb555d70..48d9e674 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -172,6 +172,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
}
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype) {
+ // Get the global function with the given index.
+ JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ // Load its initial map. The global functions all have initial maps.
+ __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
Register scratch,
@@ -300,203 +311,6 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
}
-template <class Compiler>
-static void CompileLoadInterceptor(Compiler* compiler,
- StubCompiler* stub_compiler,
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- // Check that the maps haven't changed.
- Register reg =
- stub_compiler->CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- if (lookup->IsProperty() && lookup->IsCacheable()) {
- compiler->CompileCacheable(masm,
- stub_compiler,
- receiver,
- reg,
- scratch1,
- scratch2,
- holder,
- lookup,
- name,
- miss);
- } else {
- compiler->CompileRegular(masm,
- receiver,
- reg,
- scratch2,
- holder,
- miss);
- }
-}
-
-
-class LoadInterceptorCompiler BASE_EMBEDDED {
- public:
- explicit LoadInterceptorCompiler(Register name) : name_(name) {}
-
- void CompileCacheable(MacroAssembler* masm,
- StubCompiler* stub_compiler,
- Register receiver,
- Register holder,
- Register scratch1,
- Register scratch2,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- Label* miss_label) {
- AccessorInfo* callback = NULL;
- bool optimize = false;
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- if (lookup->type() == FIELD) {
- optimize = true;
- } else if (lookup->type() == CALLBACKS) {
- Object* callback_object = lookup->GetCallbackObject();
- if (callback_object->IsAccessorInfo()) {
- callback = AccessorInfo::cast(callback_object);
- optimize = callback->getter() != NULL;
- }
- }
-
- if (!optimize) {
- CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
- miss_label);
- return;
- }
-
- // Note: starting a frame here makes GC aware of pointers pushed below.
- __ EnterInternalFrame();
-
- if (lookup->type() == CALLBACKS) {
- __ push(receiver);
- }
- __ push(holder);
- __ push(name_);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ cmp(eax, Factory::no_interceptor_result_sentinel());
- __ j(equal, &interceptor_failed);
- __ LeaveInternalFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ pop(name_);
- __ pop(holder);
- if (lookup->type() == CALLBACKS) {
- __ pop(receiver);
- }
-
- __ LeaveInternalFrame();
-
- if (lookup->type() == FIELD) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Check that the maps from interceptor's holder to field's holder
- // haven't changed...
- holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
- lookup->holder(), scratch1,
- scratch2,
- name,
- miss_label);
- // ... and retrieve a field from field's holder.
- stub_compiler->GenerateFastPropertyLoad(masm, eax,
- holder, lookup->holder(),
- lookup->GetFieldIndex());
- __ ret(0);
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- ASSERT(callback != NULL);
- ASSERT(callback->getter() != NULL);
-
- // Prepare for tail call: push receiver to stack after return address.
- Label cleanup;
- __ pop(scratch2); // return address
- __ push(receiver);
- __ push(scratch2);
-
- // Check that the maps from interceptor's holder to callback's holder
- // haven't changed.
- holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
- lookup->holder(), scratch1,
- scratch2,
- name,
- &cleanup);
-
- // Continue tail call preparation: push remaining parameters after
- // return address.
- __ pop(scratch2); // return address
- __ push(holder);
- __ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
- __ push(holder);
- __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
- __ push(name_);
- __ push(scratch2); // restore return address
-
- // Tail call to runtime.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallExternalReference(ref, 5, 1);
-
- // Clean up code: we pushed receiver after return address and
- // need to remove it from there.
- __ bind(&cleanup);
- __ pop(scratch1); // return address.
- __ pop(scratch2); // receiver.
- __ push(scratch1);
- }
- }
-
-
- void CompileRegular(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register scratch,
- JSObject* interceptor_holder,
- Label* miss_label) {
- __ pop(scratch); // save old return address
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
- __ push(scratch); // restore old return address
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallExternalReference(ref, 5, 1);
- }
-
- private:
- Register name_;
-};
-
-
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@@ -683,9 +497,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
- depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver,
+ interceptor_holder, scratch1,
+ scratch2, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -698,10 +512,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(),
- scratch1, scratch2, name,
- depth2, miss);
+ if (interceptor_holder != lookup->holder()) {
+ stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+ lookup->holder(), scratch1,
+ scratch2, name, depth2, miss);
+ } else {
+ // CheckPrototypes has a side effect of fetching a 'holder'
+ // for API (object which is instanceof for the signature). It's
+ // safe to omit it here, as if present, it should be fetched
+ // by the previous CheckPrototypes.
+ ASSERT(depth2 == kInvalidProtoDepth);
+ }
// Invoke function.
if (can_do_fast_api_call) {
@@ -1060,7 +881,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
@@ -1068,18 +889,130 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
- LoadInterceptorCompiler compiler(name_reg);
- CompileLoadInterceptor(&compiler,
- this,
- masm(),
- object,
- holder,
- name,
- lookup,
- receiver,
- scratch1,
- scratch2,
- miss);
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss, not_taken);
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->type() == FIELD) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsAccessorInfo() &&
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+ compile_followup_inline = true;
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ push(receiver);
+ }
+ __ push(holder_reg);
+ __ push(name_reg);
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ ret(0);
+
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ // Check that the maps from interceptor's holder to lookup's holder
+ // haven't changed. And load lookup's holder into holder_reg.
+ if (interceptor_holder != lookup->holder()) {
+ holder_reg = CheckPrototypes(interceptor_holder,
+ holder_reg,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss);
+ }
+
+ if (lookup->type() == FIELD) {
+ // We found FIELD property in prototype chain of interceptor's holder.
+ // Retrieve a field from field's holder.
+ GenerateFastPropertyLoad(masm(), eax, holder_reg,
+ lookup->holder(), lookup->GetFieldIndex());
+ __ ret(0);
+ } else {
+ // We found CALLBACKS property in prototype chain of interceptor's
+ // holder.
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ // Tail call to runtime.
+ // Important invariant in CALLBACKS case: the code above must be
+ // structured to never clobber |receiver| register.
+ __ pop(scratch2); // return address
+ __ push(receiver);
+ __ push(holder_reg);
+ __ mov(holder_reg, Immediate(Handle<AccessorInfo>(callback)));
+ __ push(holder_reg);
+ __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
+ __ push(name_reg);
+ __ push(scratch2); // restore return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ Register holder_reg =
+ CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ __ pop(scratch2); // save old return address
+ PushInterceptorArguments(masm(), receiver, holder_reg,
+ name_reg, interceptor_holder);
+ __ push(scratch2); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
}
@@ -1107,6 +1040,20 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+ if (kind_ == Code::KEYED_CALL_IC) {
+ __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+ __ j(not_equal, miss, not_taken);
+ }
+}
+
+
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+}
+
+
Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
@@ -1120,6 +1067,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1151,8 +1100,7 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
@@ -1180,6 +1128,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1206,7 +1156,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
+ Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@@ -1216,7 +1166,6 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Get the element's length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiTag(ecx);
// Check if we could survive without allocation.
__ cmp(eax, Operand(ecx));
@@ -1234,17 +1183,16 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check if value is a smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &with_rset_update);
+ __ j(not_zero, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
- __ bind(&with_rset_update);
+ __ bind(&with_write_barrier);
__ InNewSpace(ebx, ecx, equal, &exit);
- RecordWriteStub stub(ebx, edx, ecx);
- __ CallStub(&stub);
+ __ RecordWriteHelper(ebx, edx, ecx);
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
@@ -1284,10 +1232,10 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Increment element's and array's sizes.
__ add(FieldOperand(ebx, FixedArray::kLengthOffset),
- Immediate(kAllocationDelta));
+ Immediate(Smi::FromInt(kAllocationDelta)));
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- // Elements are in new space, so no remembered set updates are necessary.
+ // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
@@ -1299,8 +1247,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1328,6 +1275,8 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
Label miss, return_undefined, call_builtin;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1381,8 +1330,139 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(function);
+}
+
+
+Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- ecx : function name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+ GenerateNameCheck(name, &miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ eax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ebx, edx, name, &miss);
+
+ Register receiver = ebx;
+ Register index = edi;
+ Register scratch = edx;
+ Register result = eax;
+ __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
+ if (argc > 0) {
+ __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
+ } else {
+ __ Set(index, Immediate(Factory::undefined_value()));
+ }
+
+ StringCharCodeAtGenerator char_code_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_code_at_generator.GenerateFast(masm());
+ __ ret((argc + 1) * kPointerSize);
+
+ ICRuntimeCallHelper call_helper;
+ char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ Set(eax, Immediate(Factory::nan_value()));
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&miss);
+
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(function);
+}
+
+
+Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- ecx : function name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+
+ GenerateNameCheck(name, &miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ eax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ebx, edx, name, &miss);
+
+ Register receiver = eax;
+ Register index = edi;
+ Register scratch1 = ebx;
+ Register scratch2 = edx;
+ Register result = eax;
+ __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
+ if (argc > 0) {
+ __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
+ } else {
+ __ Set(index, Immediate(Factory::undefined_value()));
+ }
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_at_generator.GenerateFast(masm());
+ __ ret((argc + 1) * kPointerSize);
+
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ Set(eax, Immediate(Factory::empty_string()));
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&miss);
+ // Restore function name in ecx.
+
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1415,6 +1495,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
Label miss_in_smi_check;
+ GenerateNameCheck(name, &miss_in_smi_check);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1466,14 +1548,11 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ jmp(&miss);
} else {
// Check that the object is a string or a symbol.
- __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- __ cmp(eax, FIRST_NONSTRING_TYPE);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
__ j(above_equal, &miss, not_taken);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
}
@@ -1492,9 +1571,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(not_equal, &miss, not_taken);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- eax);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
}
@@ -1514,9 +1592,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(not_equal, &miss, not_taken);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- eax);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
}
@@ -1539,8 +1616,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
FreeSpaceForFastApiCall(masm(), eax);
}
__ bind(&miss_in_smi_check);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1559,6 +1635,8 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -1601,8 +1679,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle load cache miss.
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(argc);
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
@@ -1623,6 +1700,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -1685,8 +1764,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index e22df6ec..36774da0 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -1119,6 +1119,24 @@ Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
}
+Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
+ int arg_count,
+ int loop_nesting) {
+ // Function name, arguments, and receiver are on top of the frame.
+ // The IC expects the name in ecx and the rest on the stack and
+ // drops them all.
+ InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = cgen()->ComputeKeyedCallInitialize(arg_count, in_loop);
+ // Spill args, receiver, and function. The call will drop args and
+ // receiver.
+ Result name = Pop();
+ PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
+ name.ToRegister(ecx);
+ name.Unuse();
+ return RawCallCodeObject(ic, mode);
+}
+
+
Result VirtualFrame::CallConstructor(int arg_count) {
// Arguments, receiver, and function are on top of the frame. The
// IC expects arg count in eax, function in edi, and the arguments
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index a8f23b0c..e00626b7 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -360,6 +360,9 @@ class VirtualFrame: public ZoneObject {
// include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+ // Call keyed call IC. Same calling convention as CallCallIC.
+ Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+
// Allocate and call JS function as constructor. Arguments,
// receiver (global object), and function are found on top of the
// frame. Function is not dropped. The argument count does not
@@ -615,7 +618,7 @@ class VirtualFrame: public ZoneObject {
inline bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
- friend class DeferredCode;
+ friend class FrameRegisterState;
friend class JumpTarget;
};
diff --git a/src/ic.cc b/src/ic.cc
index 678876df..2b77a54e 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -58,7 +58,7 @@ static char TransitionMarkFromState(IC::State state) {
}
void IC::TraceIC(const char* type,
- Handle<String> name,
+ Handle<Object> name,
State old_state,
Code* new_target,
const char* extra_info) {
@@ -152,11 +152,13 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
// to prototype check failure.
int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
- // For keyed load/store, the most likely cause of cache failure is
+ // For keyed load/store/call, the most likely cause of cache failure is
// that the key has changed. We do not distinguish between
// prototype and non-prototype failures for keyed access.
Code::Kind kind = target->kind();
- if (kind == Code::KEYED_LOAD_IC || kind == Code::KEYED_STORE_IC) {
+ if (kind == Code::KEYED_LOAD_IC ||
+ kind == Code::KEYED_STORE_IC ||
+ kind == Code::KEYED_CALL_IC) {
return MONOMORPHIC;
}
@@ -196,9 +198,9 @@ RelocInfo::Mode IC::ComputeMode() {
Failure* IC::TypeError(const char* type,
Handle<Object> object,
- Handle<String> name) {
+ Handle<Object> key) {
HandleScope scope;
- Handle<Object> args[2] = { name, object };
+ Handle<Object> args[2] = { key, object };
Handle<Object> error = Factory::NewTypeError(type, HandleVector(args, 2));
return Top::Throw(*error);
}
@@ -224,6 +226,7 @@ void IC::Clear(Address address) {
case Code::STORE_IC: return StoreIC::Clear(address, target);
case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
+ case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
case Code::BINARY_OP_IC: return; // Clearing these is tricky and does not
// make any performance difference.
default: UNREACHABLE();
@@ -231,12 +234,13 @@ void IC::Clear(Address address) {
}
-void CallIC::Clear(Address address, Code* target) {
+void CallICBase::Clear(Address address, Code* target) {
State state = target->ic_state();
- InLoopFlag in_loop = target->ic_in_loop();
if (state == UNINITIALIZED) return;
Code* code =
- StubCache::FindCallInitialize(target->arguments_count(), in_loop);
+ StubCache::FindCallInitialize(target->arguments_count(),
+ target->ic_in_loop(),
+ target->kind());
SetTargetAtAddress(address, code);
}
@@ -364,7 +368,7 @@ static void LookupForRead(Object* object,
}
-Object* CallIC::TryCallAsFunction(Object* object) {
+Object* CallICBase::TryCallAsFunction(Object* object) {
HandleScope scope;
Handle<Object> target(object);
Handle<Object> delegate = Execution::GetFunctionDelegate(target);
@@ -383,7 +387,7 @@ Object* CallIC::TryCallAsFunction(Object* object) {
return *delegate;
}
-void CallIC::ReceiverToObject(Handle<Object> object) {
+void CallICBase::ReceiverToObject(Handle<Object> object) {
HandleScope scope;
Handle<Object> receiver(object);
@@ -396,9 +400,9 @@ void CallIC::ReceiverToObject(Handle<Object> object) {
}
-Object* CallIC::LoadFunction(State state,
- Handle<Object> object,
- Handle<String> name) {
+Object* CallICBase::LoadFunction(State state,
+ Handle<Object> object,
+ Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
@@ -481,7 +485,7 @@ Object* CallIC::LoadFunction(State state,
}
-void CallIC::UpdateCaches(LookupResult* lookup,
+void CallICBase::UpdateCaches(LookupResult* lookup,
State state,
Handle<Object> object,
Handle<String> name) {
@@ -497,16 +501,21 @@ void CallIC::UpdateCaches(LookupResult* lookup,
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
- code = StubCache::ComputeCallPreMonomorphic(argc, in_loop);
+ code = StubCache::ComputeCallPreMonomorphic(argc, in_loop, kind_);
} else if (state == MONOMORPHIC) {
- code = StubCache::ComputeCallMegamorphic(argc, in_loop);
+ code = StubCache::ComputeCallMegamorphic(argc, in_loop, kind_);
} else {
// Compute monomorphic stub.
switch (lookup->type()) {
case FIELD: {
int index = lookup->GetFieldIndex();
- code = StubCache::ComputeCallField(argc, in_loop, *name, *object,
- lookup->holder(), index);
+ code = StubCache::ComputeCallField(argc,
+ in_loop,
+ kind_,
+ *name,
+ *object,
+ lookup->holder(),
+ index);
break;
}
case CONSTANT_FUNCTION: {
@@ -514,8 +523,13 @@ void CallIC::UpdateCaches(LookupResult* lookup,
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
JSFunction* function = lookup->GetConstantFunction();
- code = StubCache::ComputeCallConstant(argc, in_loop, *name, *object,
- lookup->holder(), function);
+ code = StubCache::ComputeCallConstant(argc,
+ in_loop,
+ kind_,
+ *name,
+ *object,
+ lookup->holder(),
+ function);
break;
}
case NORMAL: {
@@ -530,6 +544,7 @@ void CallIC::UpdateCaches(LookupResult* lookup,
JSFunction* function = JSFunction::cast(cell->value());
code = StubCache::ComputeCallGlobal(argc,
in_loop,
+ kind_,
*name,
*receiver,
global,
@@ -541,13 +556,20 @@ void CallIC::UpdateCaches(LookupResult* lookup,
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return;
- code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver);
+ code = StubCache::ComputeCallNormal(argc,
+ in_loop,
+ kind_,
+ *name,
+ *receiver);
}
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
- code = StubCache::ComputeCallInterceptor(argc, *name, *object,
+ code = StubCache::ComputeCallInterceptor(argc,
+ kind_,
+ *name,
+ *object,
lookup->holder());
break;
}
@@ -569,11 +591,48 @@ void CallIC::UpdateCaches(LookupResult* lookup,
}
#ifdef DEBUG
- TraceIC("CallIC", name, state, target(), in_loop ? " (in-loop)" : "");
+ TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
+ name, state, target(), in_loop ? " (in-loop)" : "");
#endif
}
+Object* KeyedCallIC::LoadFunction(State state,
+ Handle<Object> object,
+ Handle<Object> key) {
+ if (key->IsSymbol()) {
+ return CallICBase::LoadFunction(state, object, Handle<String>::cast(key));
+ }
+
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_call", object, key);
+ }
+
+ if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
+ ReceiverToObject(object);
+ }
+
+ if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
+ int argc = target()->arguments_count();
+ InLoopFlag in_loop = target()->ic_in_loop();
+ Object* code = StubCache::ComputeCallMegamorphic(
+ argc, in_loop, Code::KEYED_CALL_IC);
+ if (!code->IsFailure()) {
+ set_target(Code::cast(code));
+#ifdef DEBUG
+ TraceIC(
+ "KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
+#endif
+ }
+ }
+ Object* result = Runtime::GetObjectProperty(object, key);
+ if (result->IsJSFunction()) return result;
+ result = TryCallAsFunction(result);
+ return result->IsJSFunction() ?
+ result : TypeError("property_not_function", object, key);
+}
+
+
Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@@ -1293,7 +1352,22 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
// Static IC stub generators.
//
-// Used from ic_<arch>.cc.
+static Object* CompileFunction(Object* result,
+ Handle<Object> object,
+ InLoopFlag in_loop) {
+ // Compile now with optimization.
+ HandleScope scope;
+ Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
+ if (in_loop == IN_LOOP) {
+ CompileLazyInLoop(function, object, CLEAR_EXCEPTION);
+ } else {
+ CompileLazy(function, object, CLEAR_EXCEPTION);
+ }
+ return *function;
+}
+
+
+// Used from ic-<arch>.cc.
Object* CallIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
@@ -1312,21 +1386,27 @@ Object* CallIC_Miss(Arguments args) {
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
+ return CompileFunction(result, args.at<Object>(0), ic.target()->ic_in_loop());
+}
- // Compile now with optimization.
- HandleScope scope;
- Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
- InLoopFlag in_loop = ic.target()->ic_in_loop();
- if (in_loop == IN_LOOP) {
- CompileLazyInLoop(function, args.at<Object>(0), CLEAR_EXCEPTION);
- } else {
- CompileLazy(function, args.at<Object>(0), CLEAR_EXCEPTION);
+
+// Used from ic-<arch>.cc.
+Object* KeyedCallIC_Miss(Arguments args) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 2);
+ KeyedCallIC ic;
+ IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+ Object* result =
+ ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
+
+ if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
+ return result;
}
- return *function;
+ return CompileFunction(result, args.at<Object>(0), ic.target()->ic_in_loop());
}
-// Used from ic_<arch>.cc.
+// Used from ic-<arch>.cc.
Object* LoadIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
@@ -1336,7 +1416,7 @@ Object* LoadIC_Miss(Arguments args) {
}
-// Used from ic_<arch>.cc
+// Used from ic-<arch>.cc
Object* KeyedLoadIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
@@ -1346,7 +1426,7 @@ Object* KeyedLoadIC_Miss(Arguments args) {
}
-// Used from ic_<arch>.cc.
+// Used from ic-<arch>.cc.
Object* StoreIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
@@ -1404,7 +1484,7 @@ Object* SharedStoreIC_ExtendStorage(Arguments args) {
}
-// Used from ic_<arch>.cc.
+// Used from ic-<arch>.cc.
Object* KeyedStoreIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
diff --git a/src/ic.h b/src/ic.h
index a7ff6e67..5fd5078f 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -44,6 +44,7 @@ enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
ICU(LoadIC_Miss) \
ICU(KeyedLoadIC_Miss) \
ICU(CallIC_Miss) \
+ ICU(KeyedCallIC_Miss) \
ICU(StoreIC_Miss) \
ICU(StoreIC_ArrayLength) \
ICU(SharedStoreIC_ExtendStorage) \
@@ -139,7 +140,7 @@ class IC {
#ifdef DEBUG
static void TraceIC(const char* type,
- Handle<String> name,
+ Handle<Object> name,
State old_state,
Code* new_target,
const char* extra_info = "");
@@ -147,7 +148,7 @@ class IC {
static Failure* TypeError(const char* type,
Handle<Object> object,
- Handle<String> name);
+ Handle<Object> key);
static Failure* ReferenceError(const char* type, Handle<String> name);
// Access the target code for the given IC address.
@@ -184,22 +185,16 @@ class IC_Utility {
};
-class CallIC: public IC {
- public:
- CallIC() : IC(EXTRA_CALL_FRAME) { ASSERT(target()->is_call_stub()); }
+class CallICBase: public IC {
+ protected:
+ explicit CallICBase(Code::Kind kind) : IC(EXTRA_CALL_FRAME), kind_(kind) {}
+ public:
Object* LoadFunction(State state, Handle<Object> object, Handle<String> name);
+ protected:
+ Code::Kind kind_;
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm, int argc) {
- GenerateMiss(masm, argc);
- }
- static void GenerateMiss(MacroAssembler* masm, int argc);
- static void GenerateMegamorphic(MacroAssembler* masm, int argc);
- static void GenerateNormal(MacroAssembler* masm, int argc);
-
- private:
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
@@ -219,6 +214,38 @@ class CallIC: public IC {
};
+class CallIC: public CallICBase {
+ public:
+ CallIC() : CallICBase(Code::CALL_IC) { ASSERT(target()->is_call_stub()); }
+
+ // Code generator routines.
+ static void GenerateInitialize(MacroAssembler* masm, int argc) {
+ GenerateMiss(masm, argc);
+ }
+ static void GenerateMiss(MacroAssembler* masm, int argc);
+ static void GenerateMegamorphic(MacroAssembler* masm, int argc);
+ static void GenerateNormal(MacroAssembler* masm, int argc);
+};
+
+
+class KeyedCallIC: public CallICBase {
+ public:
+ KeyedCallIC() : CallICBase(Code::KEYED_CALL_IC) {
+ ASSERT(target()->is_keyed_call_stub());
+ }
+
+ Object* LoadFunction(State state, Handle<Object> object, Handle<Object> key);
+
+ // Code generator routines.
+ static void GenerateInitialize(MacroAssembler* masm, int argc) {
+ GenerateMiss(masm, argc);
+ }
+ static void GenerateMiss(MacroAssembler* masm, int argc);
+ static void GenerateMegamorphic(MacroAssembler* masm, int argc);
+ static void GenerateNormal(MacroAssembler* masm, int argc);
+};
+
+
class LoadIC: public IC {
public:
LoadIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_load_stub()); }
diff --git a/src/jump-target-heavy.cc b/src/jump-target-heavy.cc
index 468cf4a5..e0585e79 100644
--- a/src/jump-target-heavy.cc
+++ b/src/jump-target-heavy.cc
@@ -332,22 +332,10 @@ void JumpTarget::ComputeEntryFrame() {
}
-DeferredCode::DeferredCode()
- : masm_(CodeGeneratorScope::Current()->masm()),
- statement_position_(masm_->current_statement_position()),
- position_(masm_->current_position()) {
- ASSERT(statement_position_ != RelocInfo::kNoPosition);
- ASSERT(position_ != RelocInfo::kNoPosition);
-
- CodeGeneratorScope::Current()->AddDeferred(this);
-#ifdef DEBUG
- comment_ = "";
-#endif
-
+FrameRegisterState::FrameRegisterState(VirtualFrame* frame) {
// Copy the register locations from the code generator's frame.
// These are the registers that will be spilled on entry to the
// deferred code and restored on exit.
- VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
int sp_offset = frame->fp_relative(frame->stack_pointer_);
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int loc = frame->register_location(i);
@@ -423,4 +411,19 @@ void BreakTarget::Branch(Condition cc, Hint hint) {
}
}
+
+DeferredCode::DeferredCode()
+ : masm_(CodeGeneratorScope::Current()->masm()),
+ statement_position_(masm_->current_statement_position()),
+ position_(masm_->current_position()),
+ frame_state_(CodeGeneratorScope::Current()->frame()) {
+ ASSERT(statement_position_ != RelocInfo::kNoPosition);
+ ASSERT(position_ != RelocInfo::kNoPosition);
+
+ CodeGeneratorScope::Current()->AddDeferred(this);
+#ifdef DEBUG
+ comment_ = "";
+#endif
+}
+
} } // namespace v8::internal
diff --git a/src/jump-target-light.cc b/src/jump-target-light.cc
index 76c3cb7f..19f7bfec 100644
--- a/src/jump-target-light.cc
+++ b/src/jump-target-light.cc
@@ -37,14 +37,15 @@ namespace internal {
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
- position_(masm_->current_position()) {
+ position_(masm_->current_position()),
+ frame_state_(*CodeGeneratorScope::Current()->frame()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
- CodeGeneratorScope::Current()->frame()->AssertIsSpilled();
+ comment_ = "";
#endif
}
diff --git a/src/liveedit.cc b/src/liveedit.cc
index b14d3d82..950f8e0d 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -794,7 +794,7 @@ class FrameUncookingThreadVisitor : public ThreadVisitor {
static void IterateAllThreads(ThreadVisitor* visitor) {
Top::IterateThread(visitor);
- ThreadManager::IterateThreads(visitor);
+ ThreadManager::IterateArchivedThreads(visitor);
}
// Finds all references to original and replaces them with substitution.
@@ -1386,7 +1386,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// First check inactive threads. Fail if some functions are blocked there.
InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
result);
- ThreadManager::IterateThreads(&inactive_threads_checker);
+ ThreadManager::IterateArchivedThreads(&inactive_threads_checker);
if (inactive_threads_checker.HasBlockedFunctions()) {
return result;
}
diff --git a/src/log.cc b/src/log.cc
index f48b3589..ada73cbe 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1295,6 +1295,10 @@ void Logger::LogCodeObject(Object* object) {
description = "A call IC from the snapshot";
tag = Logger::CALL_IC_TAG;
break;
+ case Code::KEYED_CALL_IC:
+ description = "A keyed call IC from the snapshot";
+ tag = Logger::KEYED_CALL_IC_TAG;
+ break;
}
PROFILE(CodeCreateEvent(tag, code_object, description));
}
diff --git a/src/log.h b/src/log.h
index a1441ac1..160072de 100644
--- a/src/log.h
+++ b/src/log.h
@@ -106,6 +106,18 @@ class CompressionHelper;
V(CALL_MISS_TAG, "CallMiss", "cm") \
V(CALL_NORMAL_TAG, "CallNormal", "cn") \
V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic", "cpm") \
+ V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak", "kcdb") \
+ V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
+ "KeyedCallDebugPrepareStepIn", \
+ "kcdbsi") \
+ V(KEYED_CALL_IC_TAG, "KeyedCallIC", "kcic") \
+ V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize", "kci") \
+ V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic", "kcmm") \
+ V(KEYED_CALL_MISS_TAG, "KeyedCallMiss", "kcm") \
+ V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal", "kcn") \
+ V(KEYED_CALL_PRE_MONOMORPHIC_TAG, \
+ "KeyedCallPreMonomorphic", \
+ "kcpm") \
V(CALLBACK_TAG, "Callback", "cb") \
V(EVAL_TAG, "Eval", "e") \
V(FUNCTION_TAG, "Function", "f") \
diff --git a/src/macros.py b/src/macros.py
index 15337413..7d979182 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -159,3 +159,13 @@ macro LAST_INPUT(array) = ((array)[2]);
macro CAPTURE(index) = (3 + (index));
const CAPTURE0 = 3;
const CAPTURE1 = 4;
+
+# PropertyDescriptor return value indices - must match
+# PropertyDescriptorIndices in runtime.cc.
+const IS_ACCESSOR_INDEX = 0;
+const VALUE_INDEX = 1;
+const GETTER_INDEX = 2;
+const SETTER_INDEX = 3;
+const WRITABLE_INDEX = 4;
+const ENUMERABLE_INDEX = 5;
+const CONFIGURABLE_INDEX = 6;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 554b5795..95afb4ab 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -84,9 +84,6 @@ void MarkCompactCollector::CollectGarbage() {
UpdatePointers();
RelocateObjects();
-
- RebuildRSets();
-
} else {
SweepSpaces();
}
@@ -121,14 +118,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
-#ifdef DEBUG
- if (compacting_collection_) {
- // We will write bookkeeping information to the remembered set area
- // starting now.
- Page::set_rset_state(Page::NOT_IN_USE);
- }
-#endif
-
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
@@ -150,7 +139,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
void MarkCompactCollector::Finish() {
#ifdef DEBUG
- ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
+ ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
state_ = IDLE;
#endif
// The stub cache is not traversed during GC; clear the cache to
@@ -244,8 +233,8 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
}
// Since we don't have the object's start, it is impossible to update the
- // remembered set. Therefore, we only replace the string with its left
- // substring when the remembered set does not change.
+ // page dirty marks. Therefore, we only replace the string with its left
+ // substring when page dirty marks do not change.
Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
@@ -284,8 +273,10 @@ class MarkingVisitor : public ObjectVisitor {
}
void VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence());
+ ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+ rinfo->IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence()));
HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
MarkCompactCollector::MarkObject(code);
}
@@ -776,6 +767,7 @@ void MarkCompactCollector::SweepLargeObjectSpace() {
Heap::lo_space()->FreeUnmarkedObjects();
}
+
// Safe to use during marking phase only.
bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
MapWord metamap = object->map_word();
@@ -783,6 +775,7 @@ bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
return metamap.ToMap()->instance_type() == MAP_TYPE;
}
+
void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
// Iterate over the map space, setting map transitions that go from
@@ -1078,13 +1071,18 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
// first word of object without any encoding. If object is dead we are writing
// NULL as a forwarding address.
// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead objects during traversal of remembered set for
-// map space because remembered set bits corresponding to dead maps are cleared
-// later during map space sweeping.
-static void MigrateObject(Address dst, Address src, int size) {
- Heap::CopyBlock(reinterpret_cast<Object**>(dst),
- reinterpret_cast<Object**>(src),
- size);
+// to encounter pointers to dead objects during traversal of dirty regions we
+// should clear them to avoid encountering them during next dirty regions
+// iteration.
+static void MigrateObject(Address dst,
+ Address src,
+ int size,
+ bool to_old_space) {
+ if (to_old_space) {
+ Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
+ } else {
+ Heap::CopyBlock(dst, src, size);
+ }
Memory::Address_at(src) = dst;
}
@@ -1110,8 +1108,10 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
}
void VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence());
+ ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+ rinfo->IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
VisitPointer(&target);
rinfo->set_call_address(Code::cast(target)->instruction_start());
@@ -1131,6 +1131,7 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
}
};
+
// Visitor for updating pointers from live objects in old spaces to new space.
// It can encounter pointers to dead objects in new space when traversing map
// space (see comment for MigrateObject).
@@ -1142,10 +1143,13 @@ static void UpdatePointerToNewGen(HeapObject** p) {
Address new_addr = Memory::Address_at(old_addr);
- // Object pointed by *p is dead. Update is not required.
- if (new_addr == NULL) return;
-
- *p = HeapObject::FromAddress(new_addr);
+ if (new_addr == NULL) {
+ // We encountered pointer to a dead object. Clear it so we will
+ // not visit it again during next iteration of dirty regions.
+ *p = NULL;
+ } else {
+ *p = HeapObject::FromAddress(new_addr);
+ }
}
@@ -1163,8 +1167,7 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
result = Heap::lo_space()->AllocateRawFixedArray(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(), object->address(), object_size);
- Heap::UpdateRSet(target);
+ MigrateObject(target->address(), object->address(), object_size, true);
MarkCompactCollector::tracer()->
increment_promoted_objects_size(object_size);
return true;
@@ -1177,10 +1180,10 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
result = target_space->AllocateRaw(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(), object->address(), object_size);
- if (target_space == Heap::old_pointer_space()) {
- Heap::UpdateRSet(target);
- }
+ MigrateObject(target->address(),
+ object->address(),
+ object_size,
+ target_space == Heap::old_pointer_space());
MarkCompactCollector::tracer()->
increment_promoted_objects_size(object_size);
return true;
@@ -1222,14 +1225,16 @@ static void SweepNewSpace(NewSpace* space) {
continue;
}
- // Promotion either failed or not required.
- // Copy the content of the object.
+ // Promotion failed. Just migrate object to another semispace.
Object* target = space->AllocateRaw(size);
// Allocation cannot fail at this point: semispaces are of equal size.
ASSERT(!target->IsFailure());
- MigrateObject(HeapObject::cast(target)->address(), current, size);
+ MigrateObject(HeapObject::cast(target)->address(),
+ current,
+ size,
+ false);
} else {
size = object->Size();
Memory::Address_at(current) = NULL;
@@ -1255,9 +1260,12 @@ static void SweepNewSpace(NewSpace* space) {
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
// Update pointers in old spaces.
- Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen);
- Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen);
- Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen);
+ Heap::IterateDirtyRegions(Heap::old_pointer_space(),
+ &Heap::IteratePointersInDirtyRegion,
+ &UpdatePointerToNewGen,
+ Heap::WATERMARK_SHOULD_BE_VALID);
+
+ Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
// Update pointers from cells.
HeapObjectIterator cell_iterator(Heap::cell_space());
@@ -1323,7 +1331,10 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
- dealloc(free_start, static_cast<int>(current - free_start), true);
+ dealloc(free_start,
+ static_cast<int>(current - free_start),
+ true,
+ false);
is_previous_alive = true;
}
} else {
@@ -1353,7 +1364,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
- dealloc(free_start, size_in_bytes, false);
+ dealloc(free_start, size_in_bytes, false, true);
}
}
} else {
@@ -1367,7 +1378,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// If there is a free ending area on one of the previous pages we have
// deallocate that area and put it on the free list.
if (last_free_size > 0) {
- dealloc(last_free_start, last_free_size, true);
+ Page::FromAddress(last_free_start)->
+ SetAllocationWatermark(last_free_start);
+ dealloc(last_free_start, last_free_size, true, true);
last_free_start = NULL;
last_free_size = 0;
}
@@ -1398,7 +1411,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
- dealloc(last_free_start, last_free_size, false);
+ dealloc(last_free_start, last_free_size, false, true);
new_allocation_top = last_free_start;
}
@@ -1421,34 +1434,36 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
- Heap::ClearRSetRange(start, size_in_bytes);
+ bool add_to_freelist,
+ bool last_on_page) {
Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateCodeBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
- Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) {
Heap::map_space()->Free(a, add_to_freelist);
@@ -1458,13 +1473,13 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = Heap::cell_space()->object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
- Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a, add_to_freelist);
@@ -1563,20 +1578,6 @@ class MapCompact {
GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
}
- void FinishMapSpace() {
- // Iterate through to space and finish move.
- MapIterator it;
- HeapObject* o = it.next();
- for (; o != first_map_to_evacuate_; o = it.next()) {
- ASSERT(o != NULL);
- Map* map = reinterpret_cast<Map*>(o);
- ASSERT(!map->IsMarked());
- ASSERT(!map->IsOverflowed());
- ASSERT(map->IsMap());
- Heap::UpdateRSet(map);
- }
- }
-
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
ASSERT(space != Heap::map_space());
@@ -1669,9 +1670,9 @@ class MapCompact {
ASSERT(Map::kSize % 4 == 0);
- Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()),
- reinterpret_cast<Object**>(map_to_evacuate->address()),
- Map::kSize);
+ Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
+ map_to_evacuate->address(),
+ Map::kSize);
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
@@ -1756,6 +1757,12 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepNewSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+
+ Heap::IterateDirtyRegions(Heap::map_space(),
+ &Heap::IteratePointersInDirtyMapsRegion,
+ &UpdatePointerToNewGen,
+ Heap::WATERMARK_SHOULD_BE_VALID);
+
int live_maps_size = Heap::map_space()->Size();
int live_maps = live_maps_size / Map::kSize;
ASSERT(live_map_objects_size_ == live_maps_size);
@@ -1766,7 +1773,6 @@ void MarkCompactCollector::SweepSpaces() {
map_compact.CompactMaps();
map_compact.UpdateMapPointersInRoots();
- map_compact.FinishMapSpace();
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
@@ -1854,8 +1860,10 @@ class UpdatingVisitor: public ObjectVisitor {
}
void VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence());
+ ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+ rinfo->IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
VisitPointer(&target);
rinfo->set_call_address(
@@ -2039,9 +2047,8 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
Page* forwarded_page = Page::FromAddress(first_forwarded);
int forwarded_offset = forwarded_page->Offset(first_forwarded);
- // Find end of allocation of in the page of first_forwarded.
- Address mc_top = forwarded_page->mc_relocation_top;
- int mc_top_offset = forwarded_page->Offset(mc_top);
+ // Find end of allocation in the page of first_forwarded.
+ int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
// Check if current object's forward pointer is in the same page
// as the first live object's forwarding pointer
@@ -2058,7 +2065,7 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
offset += Page::kObjectStartOffset;
ASSERT_PAGE_OFFSET(offset);
- ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
+ ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
return next_page->OffsetToAddress(offset);
}
@@ -2103,16 +2110,12 @@ void MarkCompactCollector::RelocateObjects() {
// Flip from and to spaces
Heap::new_space()->Flip();
+ Heap::new_space()->MCCommitRelocationInfo();
+
// Set age_mark to bottom in to space
Address mark = Heap::new_space()->bottom();
Heap::new_space()->set_age_mark(mark);
- Heap::new_space()->MCCommitRelocationInfo();
-#ifdef DEBUG
- // It is safe to write to the remembered sets as remembered sets on a
- // page-by-page basis after committing the m-c forwarding pointer.
- Page::set_rset_state(Page::IN_USE);
-#endif
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->MCCommitRelocationInfo();
@@ -2139,9 +2142,9 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- Map::kSize);
+ Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ Map::kSize);
}
#ifdef DEBUG
@@ -2198,9 +2201,13 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ if (space == Heap::old_data_space()) {
+ Heap::MoveBlock(new_addr, old_addr, obj_size);
+ } else {
+ Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
+ }
}
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
@@ -2245,9 +2252,7 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ Heap::MoveBlock(new_addr, old_addr, obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@@ -2283,9 +2288,13 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
#endif
// New and old addresses cannot overlap.
- Heap::CopyBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
+ Heap::CopyBlock(new_addr, old_addr, obj_size);
+ } else {
+ Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
+ }
#ifdef DEBUG
if (FLAG_gc_verbose) {
@@ -2302,18 +2311,6 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
}
-// -------------------------------------------------------------------------
-// Phase 5: rebuild remembered sets
-
-void MarkCompactCollector::RebuildRSets() {
-#ifdef DEBUG
- ASSERT(state_ == RELOCATE_OBJECTS);
- state_ = REBUILD_RSETS;
-#endif
- Heap::RebuildRSets();
-}
-
-
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 3950e753..1d289a75 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -41,7 +41,8 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// no attempt to add area to free list is made.
typedef void (*DeallocateFunction)(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
// Forward declarations.
@@ -131,8 +132,7 @@ class MarkCompactCollector: public AllStatic {
SWEEP_SPACES,
ENCODE_FORWARDING_ADDRESSES,
UPDATE_POINTERS,
- RELOCATE_OBJECTS,
- REBUILD_RSETS
+ RELOCATE_OBJECTS
};
// The current stage of the collector.
@@ -269,22 +269,22 @@ class MarkCompactCollector: public AllStatic {
// written to their map word's offset in the inactive
// semispace.
//
- // Bookkeeping data is written to the remembered-set are of
+ // Bookkeeping data is written to the page header of
// eached paged-space page that contains live objects after
// compaction:
//
- // The 3rd word of the page (first word of the remembered
- // set) contains the relocation top address, the address of
- // the first word after the end of the last live object in
- // the page after compaction.
+ // The allocation watermark field is used to track the
+ // relocation top address, the address of the first word
+ // after the end of the last live object in the page after
+ // compaction.
//
- // The 4th word contains the zero-based index of the page in
- // its space. This word is only used for map space pages, in
+ // The Page::mc_page_index field contains the zero-based index of the
+ // page in its space. This word is only used for map space pages, in
// order to encode the map addresses in 21 bits to free 11
// bits per map word for the forwarding address.
//
- // The 5th word contains the (nonencoded) forwarding address
- // of the first live object in the page.
+ // The Page::mc_first_forwarded field contains the (nonencoded)
+ // forwarding address of the first live object in the page.
//
// In both the new space and the paged spaces, a linked list
// of live regions is constructructed (linked through
@@ -319,23 +319,28 @@ class MarkCompactCollector: public AllStatic {
// generation.
static void DeallocateOldPointerBlock(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
static void DeallocateOldDataBlock(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
static void DeallocateCodeBlock(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
static void DeallocateMapBlock(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
static void DeallocateCellBlock(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
@@ -349,9 +354,7 @@ class MarkCompactCollector: public AllStatic {
//
// After: All pointers in live objects, including encoded map
// pointers, are updated to point to their target's new
- // location. The remembered set area of each paged-space
- // page containing live objects still contains bookkeeping
- // information.
+ // location.
friend class UpdatingVisitor; // helper for updating visited objects
@@ -373,13 +376,9 @@ class MarkCompactCollector: public AllStatic {
// Phase 4: Relocating objects.
//
// Before: Pointers to live objects are updated to point to their
- // target's new location. The remembered set area of each
- // paged-space page containing live objects still contains
- // bookkeeping information.
+ // target's new location.
//
- // After: Objects have been moved to their new addresses. The
- // remembered set area of each paged-space page containing
- // live objects still contains bookkeeping information.
+ // After: Objects have been moved to their new addresses.
// Relocates objects in all spaces.
static void RelocateObjects();
@@ -408,17 +407,6 @@ class MarkCompactCollector: public AllStatic {
// Copy a new object.
static int RelocateNewObject(HeapObject* obj);
- // -----------------------------------------------------------------------
- // Phase 5: Rebuilding remembered sets.
- //
- // Before: The heap is in a normal state except that remembered sets
- // in the paged spaces are not correct.
- //
- // After: The heap is in a normal state.
-
- // Rebuild remembered set in old and map spaces.
- static void RebuildRSets();
-
#ifdef DEBUG
// -----------------------------------------------------------------------
// Debugging variables, functions and classes
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index d9617dc7..a3b316b1 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -1046,13 +1046,16 @@ void Assembler::RecordStatementPosition(int pos) {
}
-void Assembler::WriteRecordedPositions() {
+bool Assembler::WriteRecordedPositions() {
+ bool written = false;
+
// Write the statement position if it is different from what was written last
// time.
if (current_statement_position_ != written_statement_position_) {
CheckBuffer();
RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
written_statement_position_ = current_statement_position_;
+ written = true;
}
// Write the position if it is different from what was written last time and
@@ -1062,7 +1065,11 @@ void Assembler::WriteRecordedPositions() {
CheckBuffer();
RecordRelocInfo(RelocInfo::POSITION, current_position_);
written_position_ = current_position_;
+ written = true;
}
+
+ // Return whether something was written.
+ return written;
}
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index cc730f2b..b7c3ebcb 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -355,6 +355,9 @@ class Assembler : public Malloced {
// to jump to.
static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+ // Distance between start of patched debug break slot and the emitted address
+ // to jump to.
+ static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
// ---------------------------------------------------------------------------
// Code generation.
@@ -518,7 +521,7 @@ class Assembler : public Malloced {
void RecordPosition(int pos);
void RecordStatementPosition(int pos);
- void WriteRecordedPositions();
+ bool WriteRecordedPositions();
int32_t pc_offset() const { return pc_ - buffer_; }
int32_t current_position() const { return current_position_; }
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index afda2cbb..17ee531a 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -45,7 +45,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence() {
UNIMPLEMENTED_MIPS();
}
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index 29d00694..761b9b31 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -1240,8 +1240,9 @@ const kFrameDetailsArgumentCountIndex = 3;
const kFrameDetailsLocalCountIndex = 4;
const kFrameDetailsSourcePositionIndex = 5;
const kFrameDetailsConstructCallIndex = 6;
-const kFrameDetailsDebuggerFrameIndex = 7;
-const kFrameDetailsFirstDynamicIndex = 8;
+const kFrameDetailsAtReturnIndex = 7;
+const kFrameDetailsDebuggerFrameIndex = 8;
+const kFrameDetailsFirstDynamicIndex = 9;
const kFrameDetailsNameIndex = 0;
const kFrameDetailsValueIndex = 1;
@@ -1258,8 +1259,11 @@ const kFrameDetailsNameValueSize = 2;
* 4: Local count
* 5: Source position
* 6: Construct call
+ * 7: Is at return
+ * 8: Debugger frame
* Arguments name, value
* Locals name, value
+ * Return value if any
* @param {number} break_id Current break id
* @param {number} index Frame number
* @constructor
@@ -1294,6 +1298,12 @@ FrameDetails.prototype.isConstructCall = function() {
}
+FrameDetails.prototype.isAtReturn = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsAtReturnIndex];
+}
+
+
FrameDetails.prototype.isDebuggerFrame = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsDebuggerFrameIndex];
@@ -1341,7 +1351,8 @@ FrameDetails.prototype.sourcePosition = function() {
FrameDetails.prototype.localName = function(index) {
%CheckExecutionState(this.break_id_);
if (index >= 0 && index < this.localCount()) {
- var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize
+ var locals_offset = kFrameDetailsFirstDynamicIndex +
+ this.argumentCount() * kFrameDetailsNameValueSize
return this.details_[locals_offset +
index * kFrameDetailsNameValueSize +
kFrameDetailsNameIndex]
@@ -1352,7 +1363,8 @@ FrameDetails.prototype.localName = function(index) {
FrameDetails.prototype.localValue = function(index) {
%CheckExecutionState(this.break_id_);
if (index >= 0 && index < this.localCount()) {
- var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize
+ var locals_offset = kFrameDetailsFirstDynamicIndex +
+ this.argumentCount() * kFrameDetailsNameValueSize
return this.details_[locals_offset +
index * kFrameDetailsNameValueSize +
kFrameDetailsValueIndex]
@@ -1360,6 +1372,17 @@ FrameDetails.prototype.localValue = function(index) {
}
+FrameDetails.prototype.returnValue = function() {
+ %CheckExecutionState(this.break_id_);
+ var return_value_offset =
+ kFrameDetailsFirstDynamicIndex +
+ (this.argumentCount() + this.localCount()) * kFrameDetailsNameValueSize;
+ if (this.details_[kFrameDetailsAtReturnIndex]) {
+ return this.details_[return_value_offset];
+ }
+}
+
+
FrameDetails.prototype.scopeCount = function() {
return %GetScopeCount(this.break_id_, this.frameId());
}
@@ -1412,6 +1435,11 @@ FrameMirror.prototype.isConstructCall = function() {
};
+FrameMirror.prototype.isAtReturn = function() {
+ return this.details_.isAtReturn();
+};
+
+
FrameMirror.prototype.isDebuggerFrame = function() {
return this.details_.isDebuggerFrame();
};
@@ -1447,6 +1475,11 @@ FrameMirror.prototype.localValue = function(index) {
};
+FrameMirror.prototype.returnValue = function() {
+ return MakeMirror(this.details_.returnValue());
+};
+
+
FrameMirror.prototype.sourcePosition = function() {
return this.details_.sourcePosition();
};
@@ -1574,6 +1607,11 @@ FrameMirror.prototype.invocationText = function() {
result += ')';
}
+ if (this.isAtReturn()) {
+ result += ' returning ';
+ result += this.returnValue().toText();
+ }
+
return result;
}
@@ -2267,6 +2305,10 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
content.script = this.serializeReference(func.script());
}
content.constructCall = mirror.isConstructCall();
+ content.atReturn = mirror.isAtReturn();
+ if (mirror.isAtReturn()) {
+ content.returnValue = this.serializeReference(mirror.returnValue());
+ }
content.debuggerFrame = mirror.isDebuggerFrame();
var x = new Array(mirror.argumentCount());
for (var i = 0; i < mirror.argumentCount(); i++) {
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index b0a3fd62..f9b20a4b 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -806,7 +806,8 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
VerifyObjectField(JSGlobalProxy::kContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
- CHECK_EQ(0, elements()->length());
+ CHECK(HasFastElements());
+ CHECK_EQ(0, FixedArray::cast(elements())->length());
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index d82d73ec..4112f933 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -759,7 +759,8 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
ASSERT(mode == SKIP_WRITE_BARRIER); \
ASSERT(Heap::InNewSpace(object) || \
!Heap::InNewSpace(READ_FIELD(object, offset)) || \
- Page::IsRSetSet(object->address(), offset)); \
+ Page::FromAddress(object->address())-> \
+ IsRegionDirty(object->address() + offset)); \
}
#define READ_DOUBLE_FIELD(p, offset) \
@@ -1045,6 +1046,10 @@ Address MapWord::ToEncodedAddress() {
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
}
+
+void HeapObject::VerifySmiField(int offset) {
+ ASSERT(READ_FIELD(this, offset)->IsSmi());
+}
#endif
@@ -1064,7 +1069,7 @@ MapWord HeapObject::map_word() {
void HeapObject::set_map_word(MapWord map_word) {
- // WRITE_FIELD does not update the remembered set, but there is no need
+ // WRITE_FIELD does not invoke write barrier, but there is no need
// here.
WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
@@ -1162,16 +1167,16 @@ int HeapNumber::get_sign() {
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
-Array* JSObject::elements() {
+HeapObject* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
// In the assert below Dictionary is covered under FixedArray.
ASSERT(array->IsFixedArray() || array->IsPixelArray() ||
array->IsExternalArray());
- return reinterpret_cast<Array*>(array);
+ return reinterpret_cast<HeapObject*>(array);
}
-void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
+void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
// In the assert below Dictionary is covered under FixedArray.
ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
value->IsExternalArray());
@@ -1342,15 +1347,15 @@ bool JSObject::HasFastProperties() {
}
-bool Array::IndexFromObject(Object* object, uint32_t* index) {
- if (object->IsSmi()) {
- int value = Smi::cast(object)->value();
+bool Object::ToArrayIndex(uint32_t* index) {
+ if (IsSmi()) {
+ int value = Smi::cast(this)->value();
if (value < 0) return false;
*index = value;
return true;
}
- if (object->IsHeapNumber()) {
- double value = HeapNumber::cast(object)->value();
+ if (IsHeapNumber()) {
+ double value = HeapNumber::cast(this)->value();
uint32_t uint_value = static_cast<uint32_t>(value);
if (value == static_cast<double>(uint_value)) {
*index = uint_value;
@@ -1665,7 +1670,11 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
}
-INT_ACCESSORS(Array, length, kLengthOffset)
+SMI_ACCESSORS(FixedArray, length, kLengthOffset)
+SMI_ACCESSORS(ByteArray, length, kLengthOffset)
+
+INT_ACCESSORS(PixelArray, length, kLengthOffset)
+INT_ACCESSORS(ExternalArray, length, kLengthOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
@@ -1678,6 +1687,9 @@ uint32_t String::hash_field() {
void String::set_hash_field(uint32_t value) {
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
+#if V8_HOST_ARCH_64_BIT
+ WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0);
+#endif
}
@@ -2184,7 +2196,8 @@ Code::Flags Code::flags() {
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= (kFlagsKindMask >> kFlagsKindShift)+1);
// Make sure that all call stubs have an arguments count.
- ASSERT(ExtractKindFromFlags(flags) != CALL_IC ||
+ ASSERT((ExtractKindFromFlags(flags) != CALL_IC &&
+ ExtractKindFromFlags(flags) != KEYED_CALL_IC) ||
ExtractArgumentsCountFromFlags(flags) >= 0);
WRITE_INT_FIELD(this, kFlagsOffset, flags);
}
@@ -2220,7 +2233,7 @@ PropertyType Code::type() {
int Code::arguments_count() {
- ASSERT(is_call_stub() || kind() == STUB);
+ ASSERT(is_call_stub() || is_keyed_call_stub() || kind() == STUB);
return ExtractArgumentsCountFromFlags(flags());
}
@@ -2455,23 +2468,70 @@ BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
try_full_codegen,
kTryFullCodegen)
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
+ allows_lazy_compilation,
+ kAllowLazyCompilation)
-INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
-INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
+#if V8_HOST_ARCH_32_BIT
+SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
+SMI_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
kFormalParameterCountOffset)
-INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
+SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
kExpectedNofPropertiesOffset)
-INT_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
-INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
+SMI_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
+SMI_ACCESSORS(SharedFunctionInfo, start_position_and_type,
kStartPositionAndTypeOffset)
-INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
-INT_ACCESSORS(SharedFunctionInfo, function_token_position,
+SMI_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
+SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
-INT_ACCESSORS(SharedFunctionInfo, compiler_hints,
+SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
-INT_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
+SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
+#else
+
+#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
+ int holder::name() { \
+ int value = READ_INT_FIELD(this, offset); \
+ ASSERT(kHeapObjectTag == 1); \
+ ASSERT((value & kHeapObjectTag) == 0); \
+ return value >> 1; \
+ } \
+ void holder::set_##name(int value) { \
+ ASSERT(kHeapObjectTag == 1); \
+ ASSERT((value & 0xC0000000) == 0xC0000000 || \
+ (value & 0xC0000000) == 0x000000000); \
+ WRITE_INT_FIELD(this, \
+ offset, \
+ (value << 1) & ~kHeapObjectTag); \
+ }
+
+#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \
+ INT_ACCESSORS(holder, name, offset)
+
+
+
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, length, kLengthOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, formal_parameter_count,
+ kFormalParameterCountOffset)
+
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, expected_nof_properties,
+ kExpectedNofPropertiesOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
+
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, start_position_and_type,
+ kStartPositionAndTypeOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, end_position, kEndPositionOffset)
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, function_token_position,
+ kFunctionTokenPositionOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, compiler_hints,
+ kCompilerHintsOffset)
+
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, this_property_assignments_count,
+ kThisPropertyAssignmentsCountOffset)
+#endif
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -2785,7 +2845,7 @@ void JSRegExp::SetDataAt(int index, Object* value) {
JSObject::ElementsKind JSObject::GetElementsKind() {
- Array* array = elements();
+ HeapObject* array = elements();
if (array->IsFixedArray()) {
// FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
if (array->map() == Heap::fixed_array_map()) {
@@ -2908,15 +2968,20 @@ NumberDictionary* JSObject::element_dictionary() {
}
+bool String::IsHashFieldComputed(uint32_t field) {
+ return (field & kHashNotComputedMask) == 0;
+}
+
+
bool String::HasHashCode() {
- return (hash_field() & kHashComputedMask) != 0;
+ return IsHashFieldComputed(hash_field());
}
uint32_t String::Hash() {
// Fast case: has hash code already been computed?
uint32_t field = hash_field();
- if (field & kHashComputedMask) return field >> kHashShift;
+ if (IsHashFieldComputed(field)) return field >> kHashShift;
// Slow case: compute hash code and set it.
return ComputeAndSetHash();
}
@@ -2989,7 +3054,9 @@ uint32_t StringHasher::GetHash() {
bool String::AsArrayIndex(uint32_t* index) {
uint32_t field = hash_field();
- if ((field & kHashComputedMask) && !(field & kIsArrayIndexMask)) return false;
+ if (IsHashFieldComputed(field) && (field & kIsNotArrayIndexMask)) {
+ return false;
+ }
return SlowAsArrayIndex(index);
}
@@ -3113,7 +3180,7 @@ void Map::ClearCodeCache() {
void JSArray::EnsureSize(int required_size) {
ASSERT(HasFastElements());
- Array* elts = elements();
+ FixedArray* elts = FixedArray::cast(elements());
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
// Doubling in size would be overkill, but leave some slack to avoid
diff --git a/src/objects.cc b/src/objects.cc
index ab678cb5..1e4d4a4c 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2013,19 +2013,25 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
CustomArguments args(interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
- v8::NamedPropertyQuery query =
- v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
+ v8::NamedPropertyQueryImpl query =
+ v8::ToCData<v8::NamedPropertyQueryImpl>(interceptor->query());
LOG(ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
- v8::Handle<v8::Boolean> result;
+ v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
result = query(v8::Utils::ToLocal(name_handle), info);
}
if (!result.IsEmpty()) {
- // Convert the boolean result to a property attribute
- // specification.
- return result->IsTrue() ? NONE : ABSENT;
+ // Temporary complicated logic, would be removed soon.
+ if (result->IsBoolean()) {
+ // Convert the boolean result to a property attribute
+ // specification.
+ return result->IsTrue() ? NONE : ABSENT;
+ } else {
+ ASSERT(result->IsInt32());
+ return static_cast<PropertyAttributes>(result->Int32Value());
+ }
}
} else if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
@@ -2037,7 +2043,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
VMState state(EXTERNAL);
result = getter(v8::Utils::ToLocal(name_handle), info);
}
- if (!result.IsEmpty()) return NONE;
+ if (!result.IsEmpty()) return DONT_ENUM;
}
return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
*name_handle,
@@ -2700,7 +2706,7 @@ Object* JSObject::DefineGetterSetter(String* name,
return Heap::undefined_value();
}
- uint32_t index;
+ uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
if (is_element && IsJSArray()) return Heap::undefined_value();
@@ -2958,7 +2964,7 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
// Make the lookup and include prototypes.
int accessor_index = is_getter ? kGetterIndex : kSetterIndex;
- uint32_t index;
+ uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
for (Object* obj = this;
obj != Heap::null_value();
@@ -4784,7 +4790,7 @@ static inline uint32_t HashSequentialString(const schar* chars, int length) {
uint32_t String::ComputeAndSetHash() {
// Should only be called if hash code has not yet been computed.
- ASSERT(!(hash_field() & kHashComputedMask));
+ ASSERT(!HasHashCode());
const int len = length();
@@ -4803,7 +4809,7 @@ uint32_t String::ComputeAndSetHash() {
set_hash_field(field);
// Check the hash code is there.
- ASSERT(hash_field() & kHashComputedMask);
+ ASSERT(HasHashCode());
uint32_t result = field >> kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@@ -4844,7 +4850,7 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
if (length() <= kMaxCachedArrayIndexLength) {
Hash(); // force computation of hash code
uint32_t field = hash_field();
- if ((field & kIsArrayIndexMask) == 0) return false;
+ if ((field & kIsNotArrayIndexMask) != 0) return false;
// Isolate the array index form the full hash field.
*index = (kArrayIndexHashMask & field) >> kHashShift;
return true;
@@ -4858,16 +4864,19 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
static inline uint32_t HashField(uint32_t hash,
bool is_array_index,
int length = -1) {
- uint32_t result =
- (hash << String::kHashShift) | String::kHashComputedMask;
+ uint32_t result = (hash << String::kHashShift);
if (is_array_index) {
// For array indexes mix the length into the hash as an array index could
// be zero.
ASSERT(length > 0);
+ ASSERT(length <= String::kMaxArrayIndexSize);
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- result |= String::kIsArrayIndexMask;
+ ASSERT(String::kMaxArrayIndexSize < (1 << String::kArrayIndexValueBits));
+ result &= ~String::kIsNotArrayIndexMask;
result |= length << String::kArrayIndexHashLengthShift;
+ } else {
+ result |= String::kIsNotArrayIndexMask;
}
return result;
}
@@ -5255,8 +5264,10 @@ void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence());
+ ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+ rinfo->IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
Object* old_target = target;
VisitPointer(&target);
@@ -5269,6 +5280,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+ RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
@@ -5397,6 +5409,7 @@ const char* Code::Kind2String(Kind kind) {
case STORE_IC: return "STORE_IC";
case KEYED_STORE_IC: return "KEYED_STORE_IC";
case CALL_IC: return "CALL_IC";
+ case KEYED_CALL_IC: return "KEYED_CALL_IC";
case BINARY_OP_IC: return "BINARY_OP_IC";
}
UNREACHABLE();
@@ -5639,7 +5652,7 @@ Object* JSObject::SetElementsLength(Object* len) {
// General slow case.
if (len->IsNumber()) {
uint32_t length;
- if (Array::IndexFromObject(len, &length)) {
+ if (len->ToArrayIndex(&length)) {
return SetSlowElements(len);
} else {
return ArrayLengthRangeError();
@@ -6063,8 +6076,7 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
if (IsJSArray()) {
// Update the length of the array if needed.
uint32_t array_length = 0;
- CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
- &array_length));
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
if (index >= array_length) {
JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
}
@@ -6202,8 +6214,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
if (ShouldConvertToFastElements()) {
uint32_t new_length = 0;
if (IsJSArray()) {
- CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
- &new_length));
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
JSArray::cast(this)->set_length(Smi::FromInt(new_length));
} else {
new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
@@ -6234,7 +6245,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
Object* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index, Object* value) {
uint32_t old_len = 0;
- CHECK(Array::IndexFromObject(length(), &old_len));
+ CHECK(length()->ToArrayIndex(&old_len));
// Check to see if we need to update the length. For now, we make
// sure that the length stays within 32-bits (unsigned).
if (index >= old_len && index != 0xffffffff) {
@@ -6516,7 +6527,7 @@ bool JSObject::ShouldConvertToFastElements() {
// fast elements.
uint32_t length = 0;
if (IsJSArray()) {
- CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &length));
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
} else {
length = dictionary->max_number_key();
}
diff --git a/src/objects.h b/src/objects.h
index 8e89e8f0..095dd981 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -54,29 +54,28 @@
// - JSGlobalObject
// - JSBuiltinsObject
// - JSGlobalProxy
-// - JSValue
-// - Array
-// - ByteArray
-// - PixelArray
-// - ExternalArray
-// - ExternalByteArray
-// - ExternalUnsignedByteArray
-// - ExternalShortArray
-// - ExternalUnsignedShortArray
-// - ExternalIntArray
-// - ExternalUnsignedIntArray
-// - ExternalFloatArray
-// - FixedArray
-// - DescriptorArray
-// - HashTable
-// - Dictionary
-// - SymbolTable
-// - CompilationCacheTable
-// - CodeCacheHashTable
-// - MapCache
-// - Context
-// - GlobalContext
-// - JSFunctionResultCache
+// - JSValue
+// - ByteArray
+// - PixelArray
+// - ExternalArray
+// - ExternalByteArray
+// - ExternalUnsignedByteArray
+// - ExternalShortArray
+// - ExternalUnsignedShortArray
+// - ExternalIntArray
+// - ExternalUnsignedIntArray
+// - ExternalFloatArray
+// - FixedArray
+// - DescriptorArray
+// - HashTable
+// - Dictionary
+// - SymbolTable
+// - CompilationCacheTable
+// - CodeCacheHashTable
+// - MapCache
+// - Context
+// - GlobalContext
+// - JSFunctionResultCache
// - String
// - SeqString
// - SeqAsciiString
@@ -409,8 +408,9 @@ const uint32_t kStringRepresentationMask = 0x03;
enum StringRepresentationTag {
kSeqStringTag = 0x0,
kConsStringTag = 0x1,
- kExternalStringTag = 0x3
+ kExternalStringTag = 0x2
};
+const uint32_t kIsConsStringMask = 0x1;
// A ConsString with an empty string as the right side is a candidate
@@ -676,6 +676,10 @@ class Object BASE_EMBEDDED {
// Return the object's prototype (might be Heap::null_value()).
Object* GetPrototype();
+ // Tries to convert an object to an array index. Returns true and sets
+ // the output parameter if it succeeds.
+ inline bool ToArrayIndex(uint32_t* index);
+
// Returns true if this is a JSValue containing a string and the index is
// < the length of the string. Used to implement [] on strings.
inline bool IsStringObjectWithCharacterAt(uint32_t index);
@@ -1026,7 +1030,7 @@ class HeapObject: public Object {
// Returns the field at offset in obj, as a read/write Object* reference.
// Does no checking, and is safe to use during GC, while maps are invalid.
- // Does not update remembered sets, so should only be assigned to
+ // Does not invoke write barrier, so should only be assigned to
// during marking GC.
static inline Object** RawField(HeapObject* obj, int offset);
@@ -1046,6 +1050,7 @@ class HeapObject: public Object {
void HeapObjectPrint();
void HeapObjectVerify();
inline void VerifyObjectField(int offset);
+ inline void VerifySmiField(int offset);
void PrintHeader(const char* id);
@@ -1150,7 +1155,7 @@ class JSObject: public HeapObject {
};
// [properties]: Backing storage for properties.
- // properties is a FixedArray in the fast case, and a Dictionary in the
+ // properties is a FixedArray in the fast case and a Dictionary in the
// slow case.
DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
inline void initialize_properties();
@@ -1158,9 +1163,9 @@ class JSObject: public HeapObject {
inline StringDictionary* property_dictionary(); // Gets slow properties.
// [elements]: The elements (properties with names that are integers).
- // elements is a FixedArray in the fast case, and a Dictionary in the slow
- // case or a PixelArray in a special case.
- DECL_ACCESSORS(elements, Array) // Get and set fast elements.
+ // elements is a FixedArray in the fast case, a Dictionary in the slow
+ // case, and a PixelArray or ExternalArray in special cases.
+ DECL_ACCESSORS(elements, HeapObject)
inline void initialize_elements();
inline ElementsKind GetElementsKind();
inline bool HasFastElements();
@@ -1594,37 +1599,13 @@ class JSObject: public HeapObject {
};
-// Abstract super class arrays. It provides length behavior.
-class Array: public HeapObject {
+// FixedArray describes fixed-sized arrays with element type Object*.
+class FixedArray: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
- // Convert an object to an array index.
- // Returns true if the conversion succeeded.
- static inline bool IndexFromObject(Object* object, uint32_t* index);
-
- // Layout descriptor.
- static const int kLengthOffset = HeapObject::kHeaderSize;
-
- protected:
- // No code should use the Array class directly, only its subclasses.
- // Use the kHeaderSize of the appropriate subclass, which may be aligned.
- static const int kHeaderSize = kLengthOffset + kIntSize;
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
-};
-
-
-// FixedArray describes fixed sized arrays where element
-// type is Object*.
-
-class FixedArray: public Array {
- public:
-
// Setter and getter for elements.
inline Object* get(int index);
// Setter that uses write barrier.
@@ -1665,7 +1646,10 @@ class FixedArray: public Array {
// Casting.
static inline FixedArray* cast(Object* obj);
- static const int kHeaderSize = Array::kAlignedSize;
+ // Layout description.
+ // Length is smi tagged when it is stored.
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
// Maximal allowed size, in bytes, of a single FixedArray.
// Prevents overflowing size computations, as well as extreme memory
@@ -2364,8 +2348,12 @@ class JSFunctionResultCache: public FixedArray {
// ByteArray represents fixed sized byte arrays. Used by the outside world,
// such as PCRE, and also by the memory allocator and garbage collector to
// fill in free blocks in the heap.
-class ByteArray: public Array {
+class ByteArray: public HeapObject {
public:
+ // [length]: length of the array.
+ inline int length();
+ inline void set_length(int value);
+
// Setter and getter.
inline byte get(int index);
inline void set(int index, byte value);
@@ -2374,7 +2362,7 @@ class ByteArray: public Array {
inline int get_int(int index);
static int SizeFor(int length) {
- return OBJECT_SIZE_ALIGN(kHeaderSize + length);
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length);
}
// We use byte arrays for free blocks in the heap. Given a desired size in
// bytes that is a multiple of the word size and big enough to hold a byte
@@ -2402,9 +2390,12 @@ class ByteArray: public Array {
void ByteArrayVerify();
#endif
- // ByteArray headers are not quadword aligned.
- static const int kHeaderSize = Array::kHeaderSize;
- static const int kAlignedSize = Array::kAlignedSize;
+ // Layout description.
+ // Length is smi tagged when it is stored.
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
+
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
// Maximal memory consumption for a single ByteArray.
static const int kMaxSize = 512 * MB;
@@ -2423,8 +2414,12 @@ class ByteArray: public Array {
// multipage/the-canvas-element.html#canvaspixelarray
// In particular, write access clamps the value written to 0 or 255 if the
// value written is outside this range.
-class PixelArray: public Array {
+class PixelArray: public HeapObject {
public:
+ // [length]: length of the array.
+ inline int length();
+ inline void set_length(int value);
+
// [external_pointer]: The pointer to the external memory area backing this
// pixel array.
DECL_ACCESSORS(external_pointer, uint8_t) // Pointer to the data store.
@@ -2449,9 +2444,11 @@ class PixelArray: public Array {
static const int kMaxLength = 0x3fffffff;
// PixelArray headers are not quadword aligned.
- static const int kExternalPointerOffset = Array::kAlignedSize;
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kExternalPointerOffset =
+ POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
- static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PixelArray);
@@ -2469,8 +2466,12 @@ class PixelArray: public Array {
// Out-of-range values passed to the setter are converted via a C
// cast, not clamping. Out-of-range indices cause exceptions to be
// raised rather than being silently ignored.
-class ExternalArray: public Array {
+class ExternalArray: public HeapObject {
public:
+ // [length]: length of the array.
+ inline int length();
+ inline void set_length(int value);
+
// [external_pointer]: The pointer to the external memory area backing this
// external array.
DECL_ACCESSORS(external_pointer, void) // Pointer to the data store.
@@ -2482,9 +2483,11 @@ class ExternalArray: public Array {
static const int kMaxLength = 0x3fffffff;
// ExternalArray headers are not quadword aligned.
- static const int kExternalPointerOffset = Array::kAlignedSize;
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kExternalPointerOffset =
+ POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
- static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
@@ -2666,6 +2669,7 @@ class Code: public HeapObject {
LOAD_IC,
KEYED_LOAD_IC,
CALL_IC,
+ KEYED_CALL_IC,
STORE_IC,
KEYED_STORE_IC,
BINARY_OP_IC,
@@ -2720,6 +2724,7 @@ class Code: public HeapObject {
inline bool is_store_stub() { return kind() == STORE_IC; }
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
+ inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
// [major_key]: For kind STUB or BINARY_OP_IC, the major key.
inline CodeStub::Major major_key();
@@ -3038,7 +3043,13 @@ class Map: public HeapObject {
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
static const int kPadStart = kCodeCacheOffset + kPointerSize;
- static const int kSize = MAP_SIZE_ALIGN(kPadStart);
+ static const int kSize = MAP_POINTER_ALIGN(kPadStart);
+
+ // Layout of pointer fields. Heap iteration code relies on them
+ // being continiously allocated.
+ static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
+ static const int kPointerFieldsEndOffset =
+ Map::kCodeCacheOffset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -3297,6 +3308,12 @@ class SharedFunctionInfo: public HeapObject {
inline bool try_full_codegen();
inline void set_try_full_codegen(bool flag);
+ // Indicates if this function can be lazy compiled.
+ // This is used to determine if we can safely flush code from a function
+ // when doing GC if we expect that the function will no longer be used.
+ inline bool allows_lazy_compilation();
+ inline void set_allows_lazy_compilation(bool flag);
+
// Check whether a inlined constructor can be generated with the given
// prototype.
bool CanGenerateInlineConstructor(Object* prototype);
@@ -3350,23 +3367,64 @@ class SharedFunctionInfo: public HeapObject {
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInferredNameOffset + kPointerSize;
- // Integer fields.
+#if V8_HOST_ARCH_32_BIT
+ // Smi fields.
static const int kLengthOffset =
kThisPropertyAssignmentsOffset + kPointerSize;
- static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
+ static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
- kFormalParameterCountOffset + kIntSize;
- static const int kNumLiteralsOffset = kExpectedNofPropertiesOffset + kIntSize;
+ kFormalParameterCountOffset + kPointerSize;
+ static const int kNumLiteralsOffset =
+ kExpectedNofPropertiesOffset + kPointerSize;
static const int kStartPositionAndTypeOffset =
+ kNumLiteralsOffset + kPointerSize;
+ static const int kEndPositionOffset =
+ kStartPositionAndTypeOffset + kPointerSize;
+ static const int kFunctionTokenPositionOffset =
+ kEndPositionOffset + kPointerSize;
+ static const int kCompilerHintsOffset =
+ kFunctionTokenPositionOffset + kPointerSize;
+ static const int kThisPropertyAssignmentsCountOffset =
+ kCompilerHintsOffset + kPointerSize;
+ // Total size.
+ static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
+#else
+ // The only reason to use smi fields instead of int fields
+ // is to allow interation without maps decoding during
+ // garbage collections.
+ // To avoid wasting space on 64-bit architectures we use
+ // the following trick: we group integer fields into pairs
+ // First integer in each pair is shifted left by 1.
+ // By doing this we guarantee that LSB of each kPointerSize aligned
+ // word is not set and thus this word cannot be treated as pointer
+ // to HeapObject during old space traversal.
+ static const int kLengthOffset =
+ kThisPropertyAssignmentsOffset + kPointerSize;
+ static const int kFormalParameterCountOffset =
+ kLengthOffset + kIntSize;
+
+ static const int kExpectedNofPropertiesOffset =
+ kFormalParameterCountOffset + kIntSize;
+ static const int kNumLiteralsOffset =
+ kExpectedNofPropertiesOffset + kIntSize;
+
+ static const int kEndPositionOffset =
kNumLiteralsOffset + kIntSize;
- static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
- static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
+ static const int kStartPositionAndTypeOffset =
+ kEndPositionOffset + kIntSize;
+
+ static const int kFunctionTokenPositionOffset =
+ kStartPositionAndTypeOffset + kIntSize;
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kIntSize;
+
static const int kThisPropertyAssignmentsCountOffset =
kCompilerHintsOffset + kIntSize;
+
// Total size.
static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
+
+#endif
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
private:
@@ -3381,6 +3439,7 @@ class SharedFunctionInfo: public HeapObject {
// Bit positions in compiler_hints.
static const int kHasOnlySimpleThisPropertyAssignments = 0;
static const int kTryFullCodegen = 1;
+ static const int kAllowLazyCompilation = 2;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -4122,8 +4181,7 @@ class String: public HeapObject {
// Layout description.
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kHashFieldOffset = kLengthOffset + kPointerSize;
- static const int kSize = kHashFieldOffset + kIntSize;
- // Notice: kSize is not pointer-size aligned if pointers are 64-bit.
+ static const int kSize = kHashFieldOffset + kPointerSize;
// Maximum number of characters to consider when trying to convert a string
// value into an array index.
@@ -4142,12 +4200,12 @@ class String: public HeapObject {
// whether a hash code has been computed. If the hash code has been
// computed the 2nd bit tells whether the string can be used as an
// array index.
- static const int kHashComputedMask = 1;
- static const int kIsArrayIndexMask = 1 << 1;
- static const int kNofLengthBitFields = 2;
+ static const int kHashNotComputedMask = 1;
+ static const int kIsNotArrayIndexMask = 1 << 1;
+ static const int kNofHashBitFields = 2;
// Shift constant retrieving hash code from hash field.
- static const int kHashShift = kNofLengthBitFields;
+ static const int kHashShift = kNofHashBitFields;
// Array index strings this short can keep their index in the hash
// field.
@@ -4156,13 +4214,35 @@ class String: public HeapObject {
// For strings which are array indexes the hash value has the string length
// mixed into the hash, mainly to avoid a hash value of zero which would be
// the case for the string '0'. 24 bits are used for the array index value.
- static const int kArrayIndexHashLengthShift = 24 + kNofLengthBitFields;
+ static const int kArrayIndexValueBits = 24;
+ static const int kArrayIndexLengthBits =
+ kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
+
+ STATIC_CHECK((kArrayIndexLengthBits > 0));
+
+ static const int kArrayIndexHashLengthShift =
+ kArrayIndexValueBits + kNofHashBitFields;
+
static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
- static const int kArrayIndexValueBits =
- kArrayIndexHashLengthShift - kHashShift;
+
+ static const int kArrayIndexValueMask =
+ ((1 << kArrayIndexValueBits) - 1) << kHashShift;
+
+ // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
+ // could use a mask to test if the length of string is less than or equal to
+ // kMaxCachedArrayIndexLength.
+ STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
+
+ static const int kContainsCachedArrayIndexMask =
+ (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
+ kIsNotArrayIndexMask;
// Value of empty hash field indicating that the hash is not computed.
- static const int kEmptyHashField = 0;
+ static const int kEmptyHashField =
+ kIsNotArrayIndexMask | kHashNotComputedMask;
+
+ // Value of hash field containing computed hash equal to zero.
+ static const int kZeroHash = kIsNotArrayIndexMask;
// Maximal string length.
static const int kMaxLength = (1 << (32 - 2)) - 1;
@@ -4230,6 +4310,8 @@ class String: public HeapObject {
// mutates the ConsString and might return a failure.
Object* SlowTryFlatten(PretenureFlag pretenure);
+ static inline bool IsHashFieldComputed(uint32_t field);
+
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
bool SlowEquals(String* other);
@@ -4279,7 +4361,7 @@ class SeqAsciiString: public SeqString {
// Computes the size for an AsciiString instance of a given length.
static int SizeFor(int length) {
- return OBJECT_SIZE_ALIGN(kHeaderSize + length * kCharSize);
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
}
// Layout description.
@@ -4331,7 +4413,7 @@ class SeqTwoByteString: public SeqString {
// Computes the size for a TwoByteString instance of a given length.
static int SizeFor(int length) {
- return OBJECT_SIZE_ALIGN(kHeaderSize + length * kShortSize);
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
}
// Layout description.
diff --git a/src/parser.cc b/src/parser.cc
index bbf71bca..31bac918 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -2867,10 +2867,13 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
+ int left_position = scanner().peek_location().beg_pos;
Expression* left = ParseAssignmentExpression(true, CHECK_OK);
Expect(Token::COLON, CHECK_OK);
+ int right_position = scanner().peek_location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return NEW(Conditional(expression, left, right));
+ return NEW(Conditional(expression, left, right,
+ left_position, right_position));
}
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 72fe088d..9b8b2067 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -84,6 +84,12 @@ void OS::Setup() {
}
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+ __asm__ __volatile__("" : : : "memory");
+ *ptr = value;
+}
+
+
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // FreeBSD runs on anything.
}
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index ff1ecb13..7e8a5586 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -177,7 +177,8 @@ LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
#endif
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__) // don't use on a simulator
+#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)
+ // Only use on ARM hardware.
pLinuxKernelMemoryBarrier();
#else
__asm__ __volatile__("" : : : "memory");
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 0ae1ecf4..6d97ed7e 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -107,6 +107,12 @@ int OS::ActivationFrameAlignment() {
}
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+ __asm__ __volatile__("" : : : "memory");
+ *ptr = value;
+}
+
+
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index fecb70b7..ea9bc987 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -130,6 +130,17 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
}
}
+
+template<class Visitor>
+void HeapEntriesMap::Apply(Visitor* visitor) {
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ if (!IsAlias(p->value))
+ visitor->Apply(reinterpret_cast<HeapEntry*>(p->value));
+ }
+}
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index ad8867ce..805ed3e6 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -29,11 +29,12 @@
#include "v8.h"
#include "global-handles.h"
+#include "scopeinfo.h"
+#include "top.h"
+#include "zone-inl.h"
#include "profile-generator-inl.h"
-#include "../include/v8-profiler.h"
-
namespace v8 {
namespace internal {
@@ -55,7 +56,7 @@ TokenEnumerator::~TokenEnumerator() {
int TokenEnumerator::GetTokenId(Object* token) {
- if (token == NULL) return CodeEntry::kNoSecurityToken;
+ if (token == NULL) return TokenEnumerator::kNoSecurityToken;
for (int i = 0; i < token_locations_.length(); ++i) {
if (*token_locations_[i] == token && !token_removed_[i]) return i;
}
@@ -86,6 +87,37 @@ void TokenEnumerator::TokenRemoved(Object** token_location) {
}
+StringsStorage::StringsStorage()
+ : names_(StringsMatch) {
+}
+
+
+StringsStorage::~StringsStorage() {
+ for (HashMap::Entry* p = names_.Start();
+ p != NULL;
+ p = names_.Next(p)) {
+ DeleteArray(reinterpret_cast<const char*>(p->value));
+ }
+}
+
+
+const char* StringsStorage::GetName(String* name) {
+ if (name->IsString()) {
+ char* c_name =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach();
+ HashMap::Entry* cache_entry = names_.Lookup(c_name, name->Hash(), true);
+ if (cache_entry->value == NULL) {
+ // New entry added.
+ cache_entry->value = c_name;
+ } else {
+ DeleteArray(c_name);
+ }
+ return reinterpret_cast<const char*>(cache_entry->value);
+ }
+ return "";
+}
+
+
const char* CodeEntry::kEmptyNamePrefix = "";
unsigned CodeEntry::next_call_uid_ = 1;
@@ -171,7 +203,7 @@ ProfileTree::ProfileTree()
"(root)",
"",
0,
- CodeEntry::kNoSecurityToken),
+ TokenEnumerator::kNoSecurityToken),
root_(new ProfileNode(this, &root_entry_)) {
}
@@ -248,11 +280,11 @@ class FilteredCloneCallback {
private:
bool IsTokenAcceptable(int token, int parent_token) {
- if (token == CodeEntry::kNoSecurityToken
+ if (token == TokenEnumerator::kNoSecurityToken
|| token == security_token_id_) return true;
- if (token == CodeEntry::kInheritsSecurityToken) {
- ASSERT(parent_token != CodeEntry::kInheritsSecurityToken);
- return parent_token == CodeEntry::kNoSecurityToken
+ if (token == TokenEnumerator::kInheritsSecurityToken) {
+ ASSERT(parent_token != TokenEnumerator::kInheritsSecurityToken);
+ return parent_token == TokenEnumerator::kNoSecurityToken
|| parent_token == security_token_id_;
}
return false;
@@ -373,7 +405,7 @@ void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) {
CpuProfile* CpuProfile::FilteredClone(int security_token_id) {
- ASSERT(security_token_id != CodeEntry::kNoSecurityToken);
+ ASSERT(security_token_id != TokenEnumerator::kNoSecurityToken);
CpuProfile* clone = new CpuProfile(title_, uid_);
clone->top_down_.FilteredClone(&top_down_, security_token_id);
clone->bottom_up_.FilteredClone(&bottom_up_, security_token_id);
@@ -438,8 +470,7 @@ void CodeMap::Print() {
CpuProfilesCollection::CpuProfilesCollection()
- : function_and_resource_names_(StringsMatch),
- profiles_uids_(UidsMatch),
+ : profiles_uids_(UidsMatch),
current_profiles_semaphore_(OS::CreateSemaphore(1)) {
// Create list of unabridged profiles.
profiles_by_token_.Add(new List<CpuProfile*>());
@@ -470,11 +501,6 @@ CpuProfilesCollection::~CpuProfilesCollection() {
profiles_by_token_.Iterate(DeleteProfilesList);
code_entries_.Iterate(DeleteCodeEntry);
args_count_names_.Iterate(DeleteArgsCountName);
- for (HashMap::Entry* p = function_and_resource_names_.Start();
- p != NULL;
- p = function_and_resource_names_.Next(p)) {
- DeleteArray(reinterpret_cast<const char*>(p->value));
- }
}
@@ -517,7 +543,7 @@ CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
profile->CalculateTotalTicks();
profile->SetActualSamplingRate(actual_sampling_rate);
List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
+ profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
unabridged_list->Add(profile);
HashMap::Entry* entry =
profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
@@ -550,8 +576,8 @@ CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
return NULL;
}
List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
- if (security_token_id == CodeEntry::kNoSecurityToken) {
+ profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
+ if (security_token_id == TokenEnumerator::kNoSecurityToken) {
return unabridged_list->at(index);
}
List<CpuProfile*>* list = GetProfilesList(security_token_id);
@@ -564,7 +590,7 @@ CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
int CpuProfilesCollection::TokenToIndex(int security_token_id) {
- ASSERT(CodeEntry::kNoSecurityToken == -1);
+ ASSERT(TokenEnumerator::kNoSecurityToken == -1);
return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ...
}
@@ -572,23 +598,25 @@ int CpuProfilesCollection::TokenToIndex(int security_token_id) {
List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
int security_token_id) {
const int index = TokenToIndex(security_token_id);
- profiles_by_token_.AddBlock(NULL, profiles_by_token_.length() - index + 1);
+ const int lists_to_add = index - profiles_by_token_.length() + 1;
+ if (lists_to_add > 0) profiles_by_token_.AddBlock(NULL, lists_to_add);
List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
+ profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
const int current_count = unabridged_list->length();
if (profiles_by_token_[index] == NULL) {
profiles_by_token_[index] = new List<CpuProfile*>(current_count);
}
List<CpuProfile*>* list = profiles_by_token_[index];
- list->AddBlock(NULL, current_count - list->length());
+ const int profiles_to_add = current_count - list->length();
+ if (profiles_to_add > 0) list->AddBlock(NULL, profiles_to_add);
return list;
}
List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) {
List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
- if (security_token_id == CodeEntry::kNoSecurityToken) {
+ profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
+ if (security_token_id == TokenEnumerator::kNoSecurityToken) {
return unabridged_list;
}
List<CpuProfile*>* list = GetProfilesList(security_token_id);
@@ -611,7 +639,7 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
GetFunctionName(name),
GetName(resource_name),
line_number,
- CodeEntry::kNoSecurityToken);
+ TokenEnumerator::kNoSecurityToken);
code_entries_.Add(entry);
return entry;
}
@@ -624,7 +652,7 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
GetFunctionName(name),
"",
v8::CpuProfileNode::kNoLineNumberInfo,
- CodeEntry::kNoSecurityToken);
+ TokenEnumerator::kNoSecurityToken);
code_entries_.Add(entry);
return entry;
}
@@ -638,7 +666,7 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
GetName(name),
"",
v8::CpuProfileNode::kNoLineNumberInfo,
- CodeEntry::kInheritsSecurityToken);
+ TokenEnumerator::kInheritsSecurityToken);
code_entries_.Add(entry);
return entry;
}
@@ -651,7 +679,7 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
GetName(args_count),
"",
v8::CpuProfileNode::kNoLineNumberInfo,
- CodeEntry::kInheritsSecurityToken);
+ TokenEnumerator::kInheritsSecurityToken);
code_entries_.Add(entry);
return entry;
}
@@ -664,27 +692,6 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(int security_token_id) {
}
-const char* CpuProfilesCollection::GetName(String* name) {
- if (name->IsString()) {
- char* c_name =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach();
- HashMap::Entry* cache_entry =
- function_and_resource_names_.Lookup(c_name,
- name->Hash(),
- true);
- if (cache_entry->value == NULL) {
- // New entry added.
- cache_entry->value = c_name;
- } else {
- DeleteArray(c_name);
- }
- return reinterpret_cast<const char*>(cache_entry->value);
- } else {
- return "";
- }
-}
-
-
const char* CpuProfilesCollection::GetName(int args_count) {
ASSERT(args_count >= 0);
if (args_count_names_.length() <= args_count) {
@@ -805,6 +812,794 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
profiles_->AddPathToCurrentProfiles(entries);
}
+
+HeapGraphEdge::HeapGraphEdge(Type type,
+ const char* name,
+ HeapEntry* from,
+ HeapEntry* to)
+ : type_(type), name_(name), from_(from), to_(to) {
+ ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY);
+}
+
+
+HeapGraphEdge::HeapGraphEdge(int index,
+ HeapEntry* from,
+ HeapEntry* to)
+ : type_(ELEMENT), index_(index), from_(from), to_(to) {
+}
+
+
+static void DeleteHeapGraphEdge(HeapGraphEdge** edge_ptr) {
+ delete *edge_ptr;
+}
+
+
+static void DeleteHeapGraphPath(HeapGraphPath** path_ptr) {
+ delete *path_ptr;
+}
+
+
+HeapEntry::~HeapEntry() {
+ children_.Iterate(DeleteHeapGraphEdge);
+ retaining_paths_.Iterate(DeleteHeapGraphPath);
+}
+
+
+void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) {
+ HeapGraphEdge* edge =
+ new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry);
+ children_.Add(edge);
+ entry->retainers_.Add(edge);
+}
+
+
+void HeapEntry::SetElementReference(int index, HeapEntry* entry) {
+ HeapGraphEdge* edge = new HeapGraphEdge(index, this, entry);
+ children_.Add(edge);
+ entry->retainers_.Add(edge);
+}
+
+
+void HeapEntry::SetPropertyReference(const char* name, HeapEntry* entry) {
+ HeapGraphEdge* edge =
+ new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry);
+ children_.Add(edge);
+ entry->retainers_.Add(edge);
+}
+
+
+void HeapEntry::SetAutoIndexReference(HeapEntry* entry) {
+ SetElementReference(next_auto_index_++, entry);
+}
+
+
+int HeapEntry::TotalSize() {
+ return total_size_ != kUnknownSize ? total_size_ : CalculateTotalSize();
+}
+
+
+int HeapEntry::NonSharedTotalSize() {
+ return non_shared_total_size_ != kUnknownSize ?
+ non_shared_total_size_ : CalculateNonSharedTotalSize();
+}
+
+
+int HeapEntry::CalculateTotalSize() {
+ snapshot_->ClearPaint();
+ List<HeapEntry*> list(10);
+ list.Add(this);
+ total_size_ = self_size_;
+ this->PaintReachable();
+ while (!list.is_empty()) {
+ HeapEntry* entry = list.RemoveLast();
+ const int children_count = entry->children_.length();
+ for (int i = 0; i < children_count; ++i) {
+ HeapEntry* child = entry->children_[i]->to();
+ if (!child->painted_reachable()) {
+ list.Add(child);
+ child->PaintReachable();
+ total_size_ += child->self_size_;
+ }
+ }
+ }
+ return total_size_;
+}
+
+
+namespace {
+
+class NonSharedSizeCalculator {
+ public:
+ NonSharedSizeCalculator()
+ : non_shared_total_size_(0) {
+ }
+
+ int non_shared_total_size() const { return non_shared_total_size_; }
+
+ void Apply(HeapEntry* entry) {
+ if (entry->painted_reachable()) {
+ non_shared_total_size_ += entry->self_size();
+ }
+ }
+
+ private:
+ int non_shared_total_size_;
+};
+
+} // namespace
+
+int HeapEntry::CalculateNonSharedTotalSize() {
+ // To calculate non-shared total size, first we paint all reachable
+ // nodes in one color, then we paint all nodes reachable from other
+ // nodes with a different color. Then we consider only nodes painted
+ // with the first color for caclulating the total size.
+ snapshot_->ClearPaint();
+ List<HeapEntry*> list(10);
+ list.Add(this);
+ this->PaintReachable();
+ while (!list.is_empty()) {
+ HeapEntry* entry = list.RemoveLast();
+ const int children_count = entry->children_.length();
+ for (int i = 0; i < children_count; ++i) {
+ HeapEntry* child = entry->children_[i]->to();
+ if (!child->painted_reachable()) {
+ list.Add(child);
+ child->PaintReachable();
+ }
+ }
+ }
+
+ List<HeapEntry*> list2(10);
+ if (this != snapshot_->root()) {
+ list2.Add(snapshot_->root());
+ snapshot_->root()->PaintReachableFromOthers();
+ }
+ while (!list2.is_empty()) {
+ HeapEntry* entry = list2.RemoveLast();
+ const int children_count = entry->children_.length();
+ for (int i = 0; i < children_count; ++i) {
+ HeapEntry* child = entry->children_[i]->to();
+ if (child != this && child->not_painted_reachable_from_others()) {
+ list2.Add(child);
+ child->PaintReachableFromOthers();
+ }
+ }
+ }
+
+ NonSharedSizeCalculator calculator;
+ snapshot_->IterateEntries(&calculator);
+ return calculator.non_shared_total_size();
+}
+
+
+class CachedHeapGraphPath {
+ public:
+ CachedHeapGraphPath()
+ : nodes_(NodesMatch) { }
+ CachedHeapGraphPath(const CachedHeapGraphPath& src)
+ : nodes_(NodesMatch, &HashMap::DefaultAllocator, src.nodes_.capacity()),
+ path_(src.path_.length() + 1) {
+ for (HashMap::Entry* p = src.nodes_.Start();
+ p != NULL;
+ p = src.nodes_.Next(p)) {
+ nodes_.Lookup(p->key, p->hash, true);
+ }
+ path_.AddAll(src.path_);
+ }
+ void Add(HeapGraphEdge* edge) {
+ nodes_.Lookup(edge->to(), Hash(edge->to()), true);
+ path_.Add(edge);
+ }
+ bool ContainsNode(HeapEntry* node) {
+ return nodes_.Lookup(node, Hash(node), false) != NULL;
+ }
+ const List<HeapGraphEdge*>* path() const { return &path_; }
+
+ private:
+ static uint32_t Hash(HeapEntry* entry) {
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry));
+ }
+ static bool NodesMatch(void* key1, void* key2) { return key1 == key2; }
+
+ HashMap nodes_;
+ List<HeapGraphEdge*> path_;
+};
+
+
+const List<HeapGraphPath*>* HeapEntry::GetRetainingPaths() {
+ if (retaining_paths_.length() == 0 && retainers_.length() != 0) {
+ CachedHeapGraphPath path;
+ FindRetainingPaths(this, &path);
+ }
+ return &retaining_paths_;
+}
+
+
+void HeapEntry::FindRetainingPaths(HeapEntry* node,
+ CachedHeapGraphPath* prev_path) {
+ for (int i = 0; i < node->retainers_.length(); ++i) {
+ HeapGraphEdge* ret_edge = node->retainers_[i];
+ if (prev_path->ContainsNode(ret_edge->from())) continue;
+ if (ret_edge->from() != snapshot_->root()) {
+ CachedHeapGraphPath path(*prev_path);
+ path.Add(ret_edge);
+ FindRetainingPaths(ret_edge->from(), &path);
+ } else {
+ HeapGraphPath* ret_path = new HeapGraphPath(*prev_path->path());
+ ret_path->Set(0, ret_edge);
+ retaining_paths_.Add(ret_path);
+ }
+ }
+}
+
+
+static void RemoveEdge(List<HeapGraphEdge*>* list, HeapGraphEdge* edge) {
+ for (int i = 0; i < list->length(); ) {
+ if (list->at(i) == edge) {
+ list->Remove(i);
+ return;
+ } else {
+ ++i;
+ }
+ }
+ UNREACHABLE();
+}
+
+
+void HeapEntry::RemoveChild(HeapGraphEdge* edge) {
+ RemoveEdge(&children_, edge);
+ delete edge;
+}
+
+
+void HeapEntry::RemoveRetainer(HeapGraphEdge* edge) {
+ RemoveEdge(&retainers_, edge);
+}
+
+
+void HeapEntry::CutEdges() {
+ for (int i = 0; i < children_.length(); ++i) {
+ HeapGraphEdge* edge = children_[i];
+ edge->to()->RemoveRetainer(edge);
+ }
+ children_.Iterate(DeleteHeapGraphEdge);
+ children_.Clear();
+
+ for (int i = 0; i < retainers_.length(); ++i) {
+ HeapGraphEdge* edge = retainers_[i];
+ edge->from()->RemoveChild(edge);
+ }
+ retainers_.Clear();
+}
+
+
+void HeapEntry::Print(int max_depth, int indent) {
+ OS::Print("%6d %6d %6d", self_size_, TotalSize(), NonSharedTotalSize());
+ if (type_ != STRING) {
+ OS::Print("%s %.40s\n", TypeAsString(), name_);
+ } else {
+ OS::Print("\"");
+ const char* c = name_;
+ while (*c && (c - name_) <= 40) {
+ if (*c != '\n')
+ OS::Print("%c", *c);
+ else
+ OS::Print("\\n");
+ ++c;
+ }
+ OS::Print("\"\n");
+ }
+ if (--max_depth == 0) return;
+ const int children_count = children_.length();
+ for (int i = 0; i < children_count; ++i) {
+ HeapGraphEdge* edge = children_[i];
+ switch (edge->type()) {
+ case HeapGraphEdge::CONTEXT_VARIABLE:
+ OS::Print(" %*c #%s: ", indent, ' ', edge->name());
+ break;
+ case HeapGraphEdge::ELEMENT:
+ OS::Print(" %*c %d: ", indent, ' ', edge->index());
+ break;
+ case HeapGraphEdge::PROPERTY:
+ OS::Print(" %*c %s: ", indent, ' ', edge->name());
+ break;
+ default:
+ OS::Print("!!! unknown edge type: %d ", edge->type());
+ }
+ edge->to()->Print(max_depth, indent + 2);
+ }
+}
+
+
+const char* HeapEntry::TypeAsString() {
+ switch (type_) {
+ case INTERNAL: return "/internal/";
+ case JS_OBJECT: return "/object/";
+ case CLOSURE: return "/closure/";
+ case STRING: return "/string/";
+ case CODE: return "/code/";
+ case ARRAY: return "/array/";
+ default: return "???";
+ }
+}
+
+
+HeapGraphPath::HeapGraphPath(const List<HeapGraphEdge*>& path)
+ : path_(path.length() + 1) {
+ Add(NULL);
+ for (int i = path.length() - 1; i >= 0; --i) {
+ Add(path[i]);
+ }
+}
+
+
+void HeapGraphPath::Print() {
+ path_[0]->from()->Print(1, 0);
+ for (int i = 0; i < path_.length(); ++i) {
+ OS::Print(" -> ");
+ HeapGraphEdge* edge = path_[i];
+ switch (edge->type()) {
+ case HeapGraphEdge::CONTEXT_VARIABLE:
+ OS::Print("[#%s] ", edge->name());
+ break;
+ case HeapGraphEdge::ELEMENT:
+ OS::Print("[%d] ", edge->index());
+ break;
+ case HeapGraphEdge::PROPERTY:
+ OS::Print("[%s] ", edge->name());
+ break;
+ default:
+ OS::Print("!!! unknown edge type: %d ", edge->type());
+ }
+ edge->to()->Print(1, 0);
+ }
+ OS::Print("\n");
+}
+
+
+class IndexedReferencesExtractor : public ObjectVisitor {
+ public:
+ IndexedReferencesExtractor(HeapSnapshot* snapshot, HeapEntry* parent)
+ : snapshot_(snapshot),
+ parent_(parent) {
+ }
+
+ void VisitPointer(Object** o) {
+ if (!(*o)->IsHeapObject()) return;
+ HeapEntry* entry = snapshot_->GetEntry(HeapObject::cast(*o));
+ if (entry != NULL) {
+ parent_->SetAutoIndexReference(entry);
+ }
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) VisitPointer(p);
+ }
+
+ private:
+ HeapSnapshot* snapshot_;
+ HeapEntry* parent_;
+};
+
+
+HeapEntriesMap::HeapEntriesMap()
+ : entries_(HeapObjectsMatch) {
+}
+
+
+HeapEntriesMap::~HeapEntriesMap() {
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ if (!IsAlias(p->value)) delete reinterpret_cast<HeapEntry*>(p->value);
+ }
+}
+
+
+void HeapEntriesMap::Alias(HeapObject* object, HeapEntry* entry) {
+ HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
+ if (cache_entry->value == NULL)
+ cache_entry->value = reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(entry) | kAliasTag);
+}
+
+
+void HeapEntriesMap::Apply(void (HeapEntry::*Func)(void)) {
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ if (!IsAlias(p->value)) (reinterpret_cast<HeapEntry*>(p->value)->*Func)();
+ }
+}
+
+
+HeapEntry* HeapEntriesMap::Map(HeapObject* object) {
+ HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), false);
+ return cache_entry != NULL ?
+ reinterpret_cast<HeapEntry*>(
+ reinterpret_cast<intptr_t>(cache_entry->value) & (~kAliasTag)) : NULL;
+}
+
+
+void HeapEntriesMap::Pair(HeapObject* object, HeapEntry* entry) {
+ HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
+ ASSERT(cache_entry->value == NULL);
+ cache_entry->value = entry;
+}
+
+
+HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
+ const char* title,
+ unsigned uid)
+ : collection_(collection),
+ title_(title),
+ uid_(uid),
+ root_(this) {
+}
+
+
+void HeapSnapshot::ClearPaint() {
+ root_.ClearPaint();
+ entries_.Apply(&HeapEntry::ClearPaint);
+}
+
+
+HeapEntry* HeapSnapshot::GetEntry(Object* obj) {
+ if (!obj->IsHeapObject()) return NULL;
+ HeapObject* object = HeapObject::cast(obj);
+
+ {
+ HeapEntry* existing = FindEntry(object);
+ if (existing != NULL) return existing;
+ }
+
+ // Add new entry.
+ if (object->IsJSFunction()) {
+ JSFunction* func = JSFunction::cast(object);
+ SharedFunctionInfo* shared = func->shared();
+ String* name = String::cast(shared->name())->length() > 0 ?
+ String::cast(shared->name()) : shared->inferred_name();
+ return AddEntry(object, HeapEntry::CLOSURE, collection_->GetName(name));
+ } else if (object->IsJSObject()) {
+ return AddEntry(object,
+ HeapEntry::JS_OBJECT,
+ collection_->GetName(
+ JSObject::cast(object)->constructor_name()));
+ } else if (object->IsJSGlobalPropertyCell()) {
+ HeapEntry* value = GetEntry(JSGlobalPropertyCell::cast(object)->value());
+ // If GPC references an object that we have interest in, add the object.
+ // We don't store HeapEntries for GPCs. Instead, we make our hash map
+ // to point to object's HeapEntry by GPCs address.
+ if (value != NULL) AddEntryAlias(object, value);
+ return value;
+ } else if (object->IsString()) {
+ return AddEntry(object,
+ HeapEntry::STRING,
+ collection_->GetName(String::cast(object)));
+ } else if (object->IsCode()
+ || object->IsSharedFunctionInfo()
+ || object->IsScript()) {
+ return AddEntry(object, HeapEntry::CODE);
+ } else if (object->IsFixedArray()) {
+ return AddEntry(object, HeapEntry::ARRAY);
+ }
+ // No interest in this object.
+ return NULL;
+}
+
+
+void HeapSnapshot::SetClosureReference(HeapEntry* parent,
+ String* reference_name,
+ Object* child) {
+ HeapEntry* child_entry = GetEntry(child);
+ if (child_entry != NULL) {
+ parent->SetClosureReference(
+ collection_->GetName(reference_name), child_entry);
+ }
+}
+
+
+void HeapSnapshot::SetElementReference(HeapEntry* parent,
+ int index,
+ Object* child) {
+ HeapEntry* child_entry = GetEntry(child);
+ if (child_entry != NULL) {
+ parent->SetElementReference(index, child_entry);
+ }
+}
+
+
+void HeapSnapshot::SetPropertyReference(HeapEntry* parent,
+ String* reference_name,
+ Object* child) {
+ HeapEntry* child_entry = GetEntry(child);
+ if (child_entry != NULL) {
+ parent->SetPropertyReference(
+ collection_->GetName(reference_name), child_entry);
+ }
+}
+
+
+HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
+ HeapEntry::Type type,
+ const char* name) {
+ HeapEntry* entry = new HeapEntry(this,
+ type,
+ name,
+ GetObjectSize(object),
+ GetObjectSecurityToken(object));
+ entries_.Pair(object, entry);
+
+ // Detect, if this is a JS global object of the current context, and
+ // add it to snapshot's roots. There can be several JS global objects
+ // in a context.
+ if (object->IsJSGlobalProxy()) {
+ int global_security_token = GetGlobalSecurityToken();
+ int object_security_token =
+ collection_->token_enumerator()->GetTokenId(
+ Context::cast(
+ JSGlobalProxy::cast(object)->context())->security_token());
+ if (object_security_token == TokenEnumerator::kNoSecurityToken
+ || object_security_token == global_security_token) {
+ HeapEntry* global_object_entry =
+ GetEntry(HeapObject::cast(object->map()->prototype()));
+ ASSERT(global_object_entry != NULL);
+ root_.SetAutoIndexReference(global_object_entry);
+ }
+ }
+
+ return entry;
+}
+
+
+namespace {
+
+class EdgesCutter {
+ public:
+ explicit EdgesCutter(int global_security_token)
+ : global_security_token_(global_security_token) {
+ }
+
+ void Apply(HeapEntry* entry) {
+ if (entry->security_token_id() != TokenEnumerator::kNoSecurityToken
+ && entry->security_token_id() != global_security_token_) {
+ entry->CutEdges();
+ }
+ }
+
+ private:
+ const int global_security_token_;
+};
+
+} // namespace
+
+void HeapSnapshot::CutObjectsFromForeignSecurityContexts() {
+ EdgesCutter cutter(GetGlobalSecurityToken());
+ entries_.Apply(&cutter);
+}
+
+
+int HeapSnapshot::GetGlobalSecurityToken() {
+ return collection_->token_enumerator()->GetTokenId(
+ Top::context()->global()->global_context()->security_token());
+}
+
+
+int HeapSnapshot::GetObjectSize(HeapObject* obj) {
+ return obj->IsJSObject() ?
+ CalculateNetworkSize(JSObject::cast(obj)) : obj->Size();
+}
+
+
+int HeapSnapshot::GetObjectSecurityToken(HeapObject* obj) {
+ if (obj->IsGlobalContext()) {
+ return collection_->token_enumerator()->GetTokenId(
+ Context::cast(obj)->security_token());
+ } else {
+ return TokenEnumerator::kNoSecurityToken;
+ }
+}
+
+
+int HeapSnapshot::CalculateNetworkSize(JSObject* obj) {
+ int size = obj->Size();
+ // If 'properties' and 'elements' are non-empty (thus, non-shared),
+ // take their size into account.
+ if (FixedArray::cast(obj->properties())->length() != 0) {
+ size += obj->properties()->Size();
+ }
+ if (FixedArray::cast(obj->elements())->length() != 0) {
+ size += obj->elements()->Size();
+ }
+ // For functions, also account non-empty context and literals sizes.
+ if (obj->IsJSFunction()) {
+ JSFunction* f = JSFunction::cast(obj);
+ if (f->unchecked_context()->IsContext()) {
+ size += f->context()->Size();
+ }
+ if (f->literals()->length() != 0) {
+ size += f->literals()->Size();
+ }
+ }
+ return size;
+}
+
+
+void HeapSnapshot::Print(int max_depth) {
+ root_.Print(max_depth, 0);
+}
+
+
+HeapSnapshotsCollection::HeapSnapshotsCollection()
+ : snapshots_uids_(HeapSnapshotsMatch),
+ token_enumerator_(new TokenEnumerator()) {
+}
+
+
+static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
+ delete *snapshot_ptr;
+}
+
+
+HeapSnapshotsCollection::~HeapSnapshotsCollection() {
+ delete token_enumerator_;
+ snapshots_.Iterate(DeleteHeapSnapshot);
+}
+
+
+HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
+ unsigned uid) {
+ HeapSnapshot* snapshot = new HeapSnapshot(this, name, uid);
+ snapshots_.Add(snapshot);
+ HashMap::Entry* entry =
+ snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
+ static_cast<uint32_t>(snapshot->uid()),
+ true);
+ ASSERT(entry->value == NULL);
+ entry->value = snapshot;
+ return snapshot;
+}
+
+
+HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
+ HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid),
+ static_cast<uint32_t>(uid),
+ false);
+ return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL;
+}
+
+
+HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
+ : snapshot_(snapshot) {
+}
+
+
+void HeapSnapshotGenerator::GenerateSnapshot() {
+ AssertNoAllocation no_alloc;
+
+ // Iterate heap contents.
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ ExtractReferences(obj);
+ }
+
+ snapshot_->CutObjectsFromForeignSecurityContexts();
+}
+
+
+void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) {
+ HeapEntry* entry = snapshot_->GetEntry(obj);
+ if (entry == NULL) return;
+ if (entry->visited()) return;
+
+ if (obj->IsJSObject()) {
+ JSObject* js_obj = JSObject::cast(obj);
+ ExtractClosureReferences(js_obj, entry);
+ ExtractPropertyReferences(js_obj, entry);
+ ExtractElementReferences(js_obj, entry);
+ snapshot_->SetPropertyReference(
+ entry, Heap::prototype_symbol(), js_obj->map()->prototype());
+ } else if (obj->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(obj);
+ snapshot_->SetElementReference(entry, 0, cell->value());
+ } else if (obj->IsString()) {
+ if (obj->IsConsString()) {
+ ConsString* cs = ConsString::cast(obj);
+ snapshot_->SetElementReference(entry, 0, cs->first());
+ snapshot_->SetElementReference(entry, 1, cs->second());
+ }
+ } else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
+ IndexedReferencesExtractor refs_extractor(snapshot_, entry);
+ obj->Iterate(&refs_extractor);
+ } else if (obj->IsFixedArray()) {
+ IndexedReferencesExtractor refs_extractor(snapshot_, entry);
+ obj->Iterate(&refs_extractor);
+ }
+ entry->MarkAsVisited();
+}
+
+
+void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj,
+ HeapEntry* entry) {
+ if (js_obj->IsJSFunction()) {
+ HandleScope hs;
+ JSFunction* func = JSFunction::cast(js_obj);
+ Context* context = func->context();
+ ZoneScope zscope(DELETE_ON_EXIT);
+ ScopeInfo<ZoneListAllocationPolicy> scope_info(
+ context->closure()->shared()->code());
+ int locals_number = scope_info.NumberOfLocals();
+ for (int i = 0; i < locals_number; ++i) {
+ String* local_name = *scope_info.LocalName(i);
+ int idx = ScopeInfo<>::ContextSlotIndex(
+ context->closure()->shared()->code(), local_name, NULL);
+ if (idx >= 0 && idx < context->length()) {
+ snapshot_->SetClosureReference(entry, local_name, context->get(idx));
+ }
+ }
+ }
+}
+
+
+void HeapSnapshotGenerator::ExtractPropertyReferences(JSObject* js_obj,
+ HeapEntry* entry) {
+ if (js_obj->HasFastProperties()) {
+ DescriptorArray* descs = js_obj->map()->instance_descriptors();
+ for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ switch (descs->GetType(i)) {
+ case FIELD: {
+ int index = descs->GetFieldIndex(i);
+ snapshot_->SetPropertyReference(
+ entry, descs->GetKey(i), js_obj->FastPropertyAt(index));
+ break;
+ }
+ case CONSTANT_FUNCTION:
+ snapshot_->SetPropertyReference(
+ entry, descs->GetKey(i), descs->GetConstantFunction(i));
+ break;
+ default: ;
+ }
+ }
+ } else {
+ StringDictionary* dictionary = js_obj->property_dictionary();
+ int length = dictionary->Capacity();
+ for (int i = 0; i < length; ++i) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ snapshot_->SetPropertyReference(
+ entry, String::cast(k), dictionary->ValueAt(i));
+ }
+ }
+ }
+}
+
+
+void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj,
+ HeapEntry* entry) {
+ if (js_obj->HasFastElements()) {
+ FixedArray* elements = FixedArray::cast(js_obj->elements());
+ int length = js_obj->IsJSArray() ?
+ Smi::cast(JSArray::cast(js_obj)->length())->value() :
+ elements->length();
+ for (int i = 0; i < length; ++i) {
+ if (!elements->get(i)->IsTheHole()) {
+ snapshot_->SetElementReference(entry, i, elements->get(i));
+ }
+ }
+ } else if (js_obj->HasDictionaryElements()) {
+ NumberDictionary* dictionary = js_obj->element_dictionary();
+ int length = dictionary->Capacity();
+ for (int i = 0; i < length; ++i) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ ASSERT(k->IsNumber());
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ snapshot_->SetElementReference(entry, index, dictionary->ValueAt(i));
+ }
+ }
+ }
+}
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 78307870..3f90702b 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -31,6 +31,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "hashmap.h"
+#include "../include/v8-profiler.h"
namespace v8 {
namespace internal {
@@ -41,6 +42,9 @@ class TokenEnumerator {
~TokenEnumerator();
int GetTokenId(Object* token);
+ static const int kNoSecurityToken = -1;
+ static const int kInheritsSecurityToken = -2;
+
private:
static void TokenRemovedCallback(v8::Persistent<v8::Value> handle,
void* parameter);
@@ -50,6 +54,30 @@ class TokenEnumerator {
List<bool> token_removed_;
friend class TokenEnumeratorTester;
+
+ DISALLOW_COPY_AND_ASSIGN(TokenEnumerator);
+};
+
+
+// Provides a storage of strings allocated in C++ heap, to hold them
+// forever, even if they disappear from JS heap or external storage.
+class StringsStorage {
+ public:
+ StringsStorage();
+ ~StringsStorage();
+
+ const char* GetName(String* name);
+
+ private:
+ INLINE(static bool StringsMatch(void* key1, void* key2)) {
+ return strcmp(reinterpret_cast<char*>(key1),
+ reinterpret_cast<char*>(key2)) == 0;
+ }
+
+ // String::Hash -> const char*
+ HashMap names_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringsStorage);
};
@@ -78,8 +106,6 @@ class CodeEntry {
void CopyData(const CodeEntry& source);
static const char* kEmptyNamePrefix;
- static const int kNoSecurityToken = -1;
- static const int kInheritsSecurityToken = -2;
private:
unsigned call_uid_;
@@ -257,6 +283,9 @@ class CpuProfilesCollection {
String* title,
double actual_sampling_rate);
List<CpuProfile*>* Profiles(int security_token_id);
+ const char* GetName(String* name) {
+ return function_and_resource_names_.GetName(name);
+ }
CpuProfile* GetProfile(int security_token_id, unsigned uid);
inline bool is_last_profile();
@@ -274,22 +303,15 @@ class CpuProfilesCollection {
private:
INLINE(const char* GetFunctionName(String* name));
INLINE(const char* GetFunctionName(const char* name));
- const char* GetName(String* name);
const char* GetName(int args_count);
List<CpuProfile*>* GetProfilesList(int security_token_id);
int TokenToIndex(int security_token_id);
- INLINE(static bool StringsMatch(void* key1, void* key2)) {
- return strcmp(reinterpret_cast<char*>(key1),
- reinterpret_cast<char*>(key2)) == 0;
- }
-
INLINE(static bool UidsMatch(void* key1, void* key2)) {
return key1 == key2;
}
- // String::Hash -> const char*
- HashMap function_and_resource_names_;
+ StringsStorage function_and_resource_names_;
// args_count -> char*
List<char*> args_count_names_;
List<CodeEntry*> code_entries_;
@@ -338,6 +360,8 @@ class SampleRateCalculator {
unsigned measurements_count_;
unsigned wall_time_query_countdown_;
double last_wall_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(SampleRateCalculator);
};
@@ -397,6 +421,310 @@ class ProfileGenerator {
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};
+
+class HeapSnapshot;
+class HeapEntry;
+
+
+class HeapGraphEdge {
+ public:
+ enum Type {
+ CONTEXT_VARIABLE,
+ ELEMENT,
+ PROPERTY
+ };
+
+ HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to);
+ HeapGraphEdge(int index, HeapEntry* from, HeapEntry* to);
+
+ Type type() const { return type_; }
+ int index() const {
+ ASSERT(type_ == ELEMENT);
+ return index_;
+ }
+ const char* name() const {
+ ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY);
+ return name_;
+ }
+ HeapEntry* from() const { return from_; }
+ HeapEntry* to() const { return to_; }
+
+ private:
+ Type type_;
+ union {
+ int index_;
+ const char* name_;
+ };
+ HeapEntry* from_;
+ HeapEntry* to_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
+};
+
+
+class HeapGraphPath;
+class CachedHeapGraphPath;
+
+class HeapEntry {
+ public:
+ enum Type {
+ INTERNAL,
+ ARRAY,
+ STRING,
+ JS_OBJECT,
+ CODE,
+ CLOSURE
+ };
+
+ explicit HeapEntry(HeapSnapshot* snapshot)
+ : snapshot_(snapshot),
+ visited_(false),
+ type_(INTERNAL),
+ name_(""),
+ next_auto_index_(0),
+ self_size_(0),
+ security_token_id_(TokenEnumerator::kNoSecurityToken),
+ children_(1),
+ retainers_(0),
+ retaining_paths_(0),
+ total_size_(kUnknownSize),
+ non_shared_total_size_(kUnknownSize),
+ painted_(kUnpainted) { }
+ HeapEntry(HeapSnapshot* snapshot,
+ Type type,
+ const char* name,
+ int self_size,
+ int security_token_id)
+ : snapshot_(snapshot),
+ visited_(false),
+ type_(type),
+ name_(name),
+ next_auto_index_(1),
+ self_size_(self_size),
+ security_token_id_(security_token_id),
+ children_(4),
+ retainers_(4),
+ retaining_paths_(4),
+ total_size_(kUnknownSize),
+ non_shared_total_size_(kUnknownSize),
+ painted_(kUnpainted) { }
+ ~HeapEntry();
+
+ bool visited() const { return visited_; }
+ Type type() const { return type_; }
+ const char* name() const { return name_; }
+ int self_size() const { return self_size_; }
+ int security_token_id() const { return security_token_id_; }
+ bool painted_reachable() { return painted_ == kPaintReachable; }
+ bool not_painted_reachable_from_others() {
+ return painted_ != kPaintReachableFromOthers;
+ }
+ const List<HeapGraphEdge*>* children() const { return &children_; }
+ const List<HeapGraphEdge*>* retainers() const { return &retainers_; }
+ const List<HeapGraphPath*>* GetRetainingPaths();
+
+ void ClearPaint() { painted_ = kUnpainted; }
+ void CutEdges();
+ void MarkAsVisited() { visited_ = true; }
+ void PaintReachable() {
+ ASSERT(painted_ == kUnpainted);
+ painted_ = kPaintReachable;
+ }
+ void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; }
+ void SetClosureReference(const char* name, HeapEntry* entry);
+ void SetElementReference(int index, HeapEntry* entry);
+ void SetPropertyReference(const char* name, HeapEntry* entry);
+ void SetAutoIndexReference(HeapEntry* entry);
+
+ int TotalSize();
+ int NonSharedTotalSize();
+
+ void Print(int max_depth, int indent);
+
+ private:
+ int CalculateTotalSize();
+ int CalculateNonSharedTotalSize();
+ void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path);
+ void RemoveChild(HeapGraphEdge* edge);
+ void RemoveRetainer(HeapGraphEdge* edge);
+
+ const char* TypeAsString();
+
+ HeapSnapshot* snapshot_;
+ bool visited_;
+ Type type_;
+ const char* name_;
+ int next_auto_index_;
+ int self_size_;
+ int security_token_id_;
+ List<HeapGraphEdge*> children_;
+ List<HeapGraphEdge*> retainers_;
+ List<HeapGraphPath*> retaining_paths_;
+ int total_size_;
+ int non_shared_total_size_;
+ int painted_;
+
+ static const int kUnknownSize = -1;
+ static const int kUnpainted = 0;
+ static const int kPaintReachable = 1;
+ static const int kPaintReachableFromOthers = 2;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HeapEntry);
+};
+
+
+class HeapGraphPath {
+ public:
+ HeapGraphPath()
+ : path_(8) { }
+ explicit HeapGraphPath(const List<HeapGraphEdge*>& path);
+
+ void Add(HeapGraphEdge* edge) { path_.Add(edge); }
+ void Set(int index, HeapGraphEdge* edge) { path_[index] = edge; }
+ const List<HeapGraphEdge*>* path() const { return &path_; }
+
+ void Print();
+
+ private:
+ List<HeapGraphEdge*> path_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapGraphPath);
+};
+
+
+class HeapEntriesMap {
+ public:
+ HeapEntriesMap();
+ ~HeapEntriesMap();
+
+ void Alias(HeapObject* object, HeapEntry* entry);
+ void Apply(void (HeapEntry::*Func)(void));
+ template<class Visitor>
+ void Apply(Visitor* visitor);
+ HeapEntry* Map(HeapObject* object);
+ void Pair(HeapObject* object, HeapEntry* entry);
+
+ private:
+ INLINE(uint32_t Hash(HeapObject* object)) {
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
+ }
+ INLINE(static bool HeapObjectsMatch(void* key1, void* key2)) {
+ return key1 == key2;
+ }
+ INLINE(bool IsAlias(void* ptr)) {
+ return reinterpret_cast<intptr_t>(ptr) & kAliasTag;
+ }
+
+ static const intptr_t kAliasTag = 1;
+
+ HashMap entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
+};
+
+
+class HeapSnapshotsCollection;
+
+// HeapSnapshot represents a single heap snapshot. It is stored in
+// HeapSnapshotsCollection, which is also a factory for
+// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
+// to be able to return them even if they were collected.
+// HeapSnapshotGenerator fills in a HeapSnapshot.
+class HeapSnapshot {
+ public:
+ HeapSnapshot(HeapSnapshotsCollection* collection,
+ const char* title,
+ unsigned uid);
+ void ClearPaint();
+ void CutObjectsFromForeignSecurityContexts();
+ HeapEntry* GetEntry(Object* object);
+ void SetClosureReference(
+ HeapEntry* parent, String* reference_name, Object* child);
+ void SetElementReference(HeapEntry* parent, int index, Object* child);
+ void SetPropertyReference(
+ HeapEntry* parent, String* reference_name, Object* child);
+
+ INLINE(const char* title() const) { return title_; }
+ INLINE(unsigned uid() const) { return uid_; }
+ const HeapEntry* const_root() const { return &root_; }
+ HeapEntry* root() { return &root_; }
+ template<class Visitor>
+ void IterateEntries(Visitor* visitor) { entries_.Apply(visitor); }
+
+ void Print(int max_depth);
+
+ private:
+ HeapEntry* AddEntry(HeapObject* object, HeapEntry::Type type) {
+ return AddEntry(object, type, "");
+ }
+ HeapEntry* AddEntry(
+ HeapObject* object, HeapEntry::Type type, const char* name);
+ void AddEntryAlias(HeapObject* object, HeapEntry* entry) {
+ entries_.Alias(object, entry);
+ }
+ HeapEntry* FindEntry(HeapObject* object) {
+ return entries_.Map(object);
+ }
+ int GetGlobalSecurityToken();
+ int GetObjectSecurityToken(HeapObject* obj);
+ static int GetObjectSize(HeapObject* obj);
+ static int CalculateNetworkSize(JSObject* obj);
+
+ HeapSnapshotsCollection* collection_;
+ const char* title_;
+ unsigned uid_;
+ HeapEntry root_;
+ // HeapObject* -> HeapEntry*
+ HeapEntriesMap entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
+};
+
+
+class HeapSnapshotsCollection {
+ public:
+ HeapSnapshotsCollection();
+ ~HeapSnapshotsCollection();
+
+ HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
+ List<HeapSnapshot*>* snapshots() { return &snapshots_; }
+ HeapSnapshot* GetSnapshot(unsigned uid);
+
+ const char* GetName(String* name) { return names_.GetName(name); }
+
+ TokenEnumerator* token_enumerator() { return token_enumerator_; }
+
+ private:
+ INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
+ return key1 == key2;
+ }
+
+ List<HeapSnapshot*> snapshots_;
+ // uid -> HeapSnapshot*
+ HashMap snapshots_uids_;
+ StringsStorage names_;
+ TokenEnumerator* token_enumerator_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
+};
+
+
+class HeapSnapshotGenerator {
+ public:
+ explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
+ void GenerateSnapshot();
+
+ private:
+ void ExtractReferences(HeapObject* obj);
+ void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
+ void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
+ void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
+
+ HeapSnapshot* snapshot_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
+};
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/regexp.js b/src/regexp.js
index 24e33098..9367f15f 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -257,6 +257,10 @@ function RegExpExec(string) {
}
+// One-element cache for the simplified test regexp.
+var regexp_key;
+var regexp_val;
+
// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
// that test is defined in terms of String.prototype.exec. However, it probably
// means the original value of String.prototype.exec, which is what everybody
@@ -281,9 +285,7 @@ function RegExpTest(string) {
}
var lastIndex = this.lastIndex;
-
var cache = regExpCache;
-
if (%_ObjectEquals(cache.type, 'test') &&
%_ObjectEquals(cache.regExp, this) &&
%_ObjectEquals(cache.subject, string) &&
@@ -291,6 +293,22 @@ function RegExpTest(string) {
return cache.answer;
}
+ // Remove irrelevant preceeding '.*' in a test regexp. The expression
+ // checks whether this.source starts with '.*' and that the third
+ // char is not a '?'
+ if (%_StringCharCodeAt(this.source,0) == 46 && // '.'
+ %_StringCharCodeAt(this.source,1) == 42 && // '*'
+ %_StringCharCodeAt(this.source,2) != 63) { // '?'
+ if (!%_ObjectEquals(regexp_key, this)) {
+ regexp_key = this;
+ regexp_val = new $RegExp(this.source.substring(2, this.source.length),
+ (this.global ? 'g' : '')
+ + (this.ignoreCase ? 'i' : '')
+ + (this.multiline ? 'm' : ''));
+ }
+ if (!regexp_val.test(s)) return false;
+ }
+
var length = s.length;
var i = this.global ? TO_INTEGER(lastIndex) : 0;
@@ -299,7 +317,7 @@ function RegExpTest(string) {
cache.subject = s;
cache.lastIndex = i;
- if (i < 0 || i > s.length) {
+ if (i < 0 || i > length) {
this.lastIndex = 0;
cache.answer = false;
return false;
diff --git a/src/runtime.cc b/src/runtime.cc
index b421ac71..88786e82 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -291,7 +291,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
result = SetProperty(boilerplate, name, value, NONE);
- } else if (Array::IndexFromObject(*key, &element_index)) {
+ } else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
result = SetElement(boilerplate, element_index, value);
} else {
@@ -569,6 +569,18 @@ static void GetOwnPropertyImplementation(JSObject* obj,
}
+// Enumerator used as indices into the array returned from GetOwnProperty
+enum PropertyDescriptorIndices {
+ IS_ACCESSOR_INDEX,
+ VALUE_INDEX,
+ GETTER_INDEX,
+ SETTER_INDEX,
+ WRITABLE_INDEX,
+ ENUMERABLE_INDEX,
+ CONFIGURABLE_INDEX,
+ DESCRIPTOR_SIZE
+};
+
// Returns an array with the property description:
// if args[1] is not a property on args[0]
// returns undefined
@@ -579,18 +591,63 @@ static void GetOwnPropertyImplementation(JSObject* obj,
static Object* Runtime_GetOwnProperty(Arguments args) {
ASSERT(args.length() == 2);
HandleScope scope;
- Handle<FixedArray> elms = Factory::NewFixedArray(5);
+ Handle<FixedArray> elms = Factory::NewFixedArray(DESCRIPTOR_SIZE);
Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
LookupResult result;
CONVERT_CHECKED(JSObject, obj, args[0]);
CONVERT_CHECKED(String, name, args[1]);
+ // This could be an element.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ if (!obj->HasLocalElement(index)) {
+ return Heap::undefined_value();
+ }
+
+ // Special handling of string objects according to ECMAScript 5 15.5.5.2.
+ // Note that this might be a string object with elements other than the
+ // actual string value. This is covered by the subsequent cases.
+ if (obj->IsStringObjectWithCharacterAt(index)) {
+ JSValue* js_value = JSValue::cast(obj);
+ String* str = String::cast(js_value->value());
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, str->SubString(index, index+1));
+ elms->set(WRITABLE_INDEX, Heap::false_value());
+ elms->set(ENUMERABLE_INDEX, Heap::false_value());
+ elms->set(CONFIGURABLE_INDEX, Heap::false_value());
+ return *desc;
+ }
+
+ // This can potentially be an element in the elements dictionary or
+ // a fast element.
+ if (obj->HasDictionaryElements()) {
+ NumberDictionary* dictionary = obj->element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
+ elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
+ elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
+ return *desc;
+ } else {
+ // Elements that are stored as array elements always has:
+ // writable: true, configurable: true, enumerable: true.
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, obj->GetElement(index));
+ elms->set(WRITABLE_INDEX, Heap::true_value());
+ elms->set(ENUMERABLE_INDEX, Heap::true_value());
+ elms->set(CONFIGURABLE_INDEX, Heap::true_value());
+ return *desc;
+ }
+ }
+
// Use recursive implementation to also traverse hidden prototypes
GetOwnPropertyImplementation(obj, name, &result);
- if (!result.IsProperty())
+ if (!result.IsProperty()) {
return Heap::undefined_value();
-
+ }
if (result.type() == CALLBACKS) {
Object* structure = result.GetCallbackObject();
if (structure->IsProxy() || structure->IsAccessorInfo()) {
@@ -598,25 +655,25 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
// an API defined callback.
Object* value = obj->GetPropertyWithCallback(
obj, structure, name, result.holder());
- elms->set(0, Heap::false_value());
- elms->set(1, value);
- elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, value);
+ elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
} else if (structure->IsFixedArray()) {
// __defineGetter__/__defineSetter__ callback.
- elms->set(0, Heap::true_value());
- elms->set(1, FixedArray::cast(structure)->get(0));
- elms->set(2, FixedArray::cast(structure)->get(1));
+ elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
+ elms->set(GETTER_INDEX, FixedArray::cast(structure)->get(0));
+ elms->set(SETTER_INDEX, FixedArray::cast(structure)->get(1));
} else {
return Heap::undefined_value();
}
} else {
- elms->set(0, Heap::false_value());
- elms->set(1, result.GetLazyValue());
- elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, result.GetLazyValue());
+ elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
}
- elms->set(3, Heap::ToBoolean(!result.IsDontEnum()));
- elms->set(4, Heap::ToBoolean(!result.IsDontDelete()));
+ elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete()));
return *desc;
}
@@ -1581,25 +1638,9 @@ static Object* Runtime_SetCode(Arguments args) {
}
-static Object* CharCodeAt(String* subject, Object* index) {
- uint32_t i = 0;
- if (!Array::IndexFromObject(index, &i)) return Heap::nan_value();
- // Flatten the string. If someone wants to get a char at an index
- // in a cons string, it is likely that more indices will be
- // accessed.
- Object* flat = subject->TryFlatten();
- if (flat->IsFailure()) return flat;
- subject = String::cast(flat);
- if (i >= static_cast<uint32_t>(subject->length())) {
- return Heap::nan_value();
- }
- return Smi::FromInt(subject->Get(i));
-}
-
-
static Object* CharFromCode(Object* char_code) {
uint32_t code;
- if (Array::IndexFromObject(char_code, &code)) {
+ if (char_code->ToArrayIndex(&code)) {
if (code <= 0xffff) {
return Heap::LookupSingleCharacterStringFromCode(code);
}
@@ -1614,21 +1655,31 @@ static Object* Runtime_StringCharCodeAt(Arguments args) {
CONVERT_CHECKED(String, subject, args[0]);
Object* index = args[1];
- return CharCodeAt(subject, index);
-}
+ RUNTIME_ASSERT(index->IsNumber());
+ uint32_t i = 0;
+ if (index->IsSmi()) {
+ int value = Smi::cast(index)->value();
+ if (value < 0) return Heap::nan_value();
+ i = value;
+ } else {
+ ASSERT(index->IsHeapNumber());
+ double value = HeapNumber::cast(index)->value();
+ i = static_cast<uint32_t>(DoubleToInteger(value));
+ }
-static Object* Runtime_StringCharAt(Arguments args) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
+ // Flatten the string. If someone wants to get a char at an index
+ // in a cons string, it is likely that more indices will be
+ // accessed.
+ Object* flat = subject->TryFlatten();
+ if (flat->IsFailure()) return flat;
+ subject = String::cast(flat);
- CONVERT_CHECKED(String, subject, args[0]);
- Object* index = args[1];
- Object* code = CharCodeAt(subject, index);
- if (code == Heap::nan_value()) {
- return Heap::undefined_value();
+ if (i >= static_cast<uint32_t>(subject->length())) {
+ return Heap::nan_value();
}
- return CharFromCode(code);
+
+ return Smi::FromInt(subject->Get(i));
}
@@ -2780,7 +2831,7 @@ static Object* Runtime_StringIndexOf(Arguments args) {
Object* index = args[2];
uint32_t start_index;
- if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
+ if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
int position = Runtime::StringMatch(sub, pat, start_index);
@@ -2830,7 +2881,7 @@ static Object* Runtime_StringLastIndexOf(Arguments args) {
Object* index = args[2];
uint32_t start_index;
- if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
+ if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
uint32_t pat_length = pat->length();
uint32_t sub_length = sub->length();
@@ -3657,7 +3708,7 @@ Object* Runtime::GetObjectProperty(Handle<Object> object, Handle<Object> key) {
// Check if the given key is an array index.
uint32_t index;
- if (Array::IndexFromObject(*key, &index)) {
+ if (key->ToArrayIndex(&index)) {
return GetElementOrCharAt(object, index);
}
@@ -3843,7 +3894,7 @@ Object* Runtime::SetObjectProperty(Handle<Object> object,
// Check if the given key is an array index.
uint32_t index;
- if (Array::IndexFromObject(*key, &index)) {
+ if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
// of a string using [] notation. We need to support this too in
// JavaScript.
@@ -3895,7 +3946,7 @@ Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
// Check if the given key is an array index.
uint32_t index;
- if (Array::IndexFromObject(*key, &index)) {
+ if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
// of a string using [] notation. We need to support this too in
// JavaScript.
@@ -3942,7 +3993,7 @@ Object* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
// Check if the given key is an array index.
uint32_t index;
- if (Array::IndexFromObject(*key, &index)) {
+ if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the
// characters of a string using [] notation. In the case of a
// String object we just need to redirect the deletion to the
@@ -4355,7 +4406,7 @@ static Object* Runtime_GetArgumentsProperty(Arguments args) {
// Try to convert the key to an index. If successful and within
// index return the the argument from the frame.
uint32_t index;
- if (Array::IndexFromObject(args[0], &index) && index < n) {
+ if (args[0]->ToArrayIndex(&index) && index < n) {
return frame->GetParameter(index);
}
@@ -5287,6 +5338,28 @@ static Object* Runtime_NumberToInteger(Arguments args) {
}
+
+
+
+static Object* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(number, args[0]);
+
+ // We do not include 0 so that we don't have to treat +0 / -0 cases.
+ if (number > 0 && number <= Smi::kMaxValue) {
+ return Smi::FromInt(static_cast<int>(number));
+ }
+
+ double double_value = DoubleToInteger(number);
+ // Map both -0 and +0 to +0.
+ if (double_value == 0) double_value = 0;
+
+ return Heap::NumberFromDouble(double_value);
+}
+
+
static Object* Runtime_NumberToJSUint32(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -6457,8 +6530,8 @@ static Object* Runtime_NewArgumentsFast(Arguments args) {
if (obj->IsFailure()) return obj;
AssertNoAllocation no_gc;
- reinterpret_cast<Array*>(obj)->set_map(Heap::fixed_array_map());
- FixedArray* array = FixedArray::cast(obj);
+ FixedArray* array = reinterpret_cast<FixedArray*>(obj);
+ array->set_map(Heap::fixed_array_map());
array->set_length(length);
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@@ -7172,6 +7245,24 @@ static Object* Runtime_CompileString(Arguments args) {
}
+static ObjectPair CompileGlobalEval(Handle<String> source,
+ Handle<Object> receiver) {
+ // Deal with a normal eval call with a string argument. Compile it
+ // and return the compiled function bound in the local context.
+ Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
+ source,
+ Handle<Context>(Top::context()),
+ Top::context()->IsGlobalContext(),
+ Compiler::DONT_VALIDATE_JSON);
+ if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
+ Handle<JSFunction> compiled = Factory::NewFunctionFromSharedFunctionInfo(
+ shared,
+ Handle<Context>(Top::context()),
+ NOT_TENURED);
+ return MakePair(*compiled, *receiver);
+}
+
+
static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
ASSERT(args.length() == 3);
if (!args[0]->IsJSFunction()) {
@@ -7237,20 +7328,27 @@ static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
return MakePair(*callee, Top::context()->global()->global_receiver());
}
- // Deal with a normal eval call with a string argument. Compile it
- // and return the compiled function bound in the local context.
- Handle<String> source = args.at<String>(1);
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source,
- Handle<Context>(Top::context()),
- Top::context()->IsGlobalContext(),
- Compiler::DONT_VALIDATE_JSON);
- if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
- callee = Factory::NewFunctionFromSharedFunctionInfo(
- shared,
- Handle<Context>(Top::context()),
- NOT_TENURED);
- return MakePair(*callee, args[2]);
+ return CompileGlobalEval(args.at<String>(1), args.at<Object>(2));
+}
+
+
+static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) {
+ ASSERT(args.length() == 3);
+ if (!args[0]->IsJSFunction()) {
+ return MakePair(Top::ThrowIllegalOperation(), NULL);
+ }
+
+ HandleScope scope;
+ Handle<JSFunction> callee = args.at<JSFunction>(0);
+
+ // 'eval' is bound in the global context, but it may have been overwritten.
+ // Compare it to the builtin 'GlobalEval' function to make sure.
+ if (*callee != Top::global_context()->global_eval_fun() ||
+ !args[1]->IsString()) {
+ return MakePair(*callee, Top::context()->global()->global_receiver());
+ }
+
+ return CompileGlobalEval(args.at<String>(1), args.at<Object>(2));
}
@@ -7747,8 +7845,8 @@ static Object* Runtime_SwapElements(Arguments args) {
Handle<Object> key2 = args.at<Object>(2);
uint32_t index1, index2;
- if (!Array::IndexFromObject(*key1, &index1)
- || !Array::IndexFromObject(*key2, &index2)) {
+ if (!key1->ToArrayIndex(&index1)
+ || !key2->ToArrayIndex(&index2)) {
return Top::ThrowIllegalOperation();
}
@@ -7779,17 +7877,19 @@ static Object* Runtime_GetArrayKeys(Arguments args) {
for (int i = 0; i < keys_length; i++) {
Object* key = keys->get(i);
uint32_t index;
- if (!Array::IndexFromObject(key, &index) || index >= length) {
+ if (!key->ToArrayIndex(&index) || index >= length) {
// Zap invalid keys.
keys->set_undefined(i);
}
}
return *Factory::NewJSArrayWithElements(keys);
} else {
+ ASSERT(array->HasFastElements());
Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
// -1 means start of array.
single_interval->set(0, Smi::FromInt(-1));
- uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
+ uint32_t actual_length =
+ static_cast<uint32_t>(FixedArray::cast(array->elements())->length());
uint32_t min_length = actual_length < length ? actual_length : length;
Handle<Object> length_object =
Factory::NewNumber(static_cast<double>(min_length));
@@ -8143,8 +8243,9 @@ static const int kFrameDetailsArgumentCountIndex = 3;
static const int kFrameDetailsLocalCountIndex = 4;
static const int kFrameDetailsSourcePositionIndex = 5;
static const int kFrameDetailsConstructCallIndex = 6;
-static const int kFrameDetailsDebuggerFrameIndex = 7;
-static const int kFrameDetailsFirstDynamicIndex = 8;
+static const int kFrameDetailsAtReturnIndex = 7;
+static const int kFrameDetailsDebuggerFrameIndex = 8;
+static const int kFrameDetailsFirstDynamicIndex = 9;
// Return an array with frame details
// args[0]: number: break id
@@ -8158,9 +8259,11 @@ static const int kFrameDetailsFirstDynamicIndex = 8;
// 4: Local count
// 5: Source position
// 6: Constructor call
-// 7: Debugger frame
+// 7: Is at return
+// 8: Debugger frame
// Arguments name, value
// Locals name, value
+// Return value if any
static Object* Runtime_GetFrameDetails(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
@@ -8236,8 +8339,39 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
}
}
- // Now advance to the arguments adapter frame (if any). If contains all
- // the provided parameters and
+ // Check whether this frame is positioned at return.
+ int at_return = (index == 0) ? Debug::IsBreakAtReturn(it.frame()) : false;
+
+ // If positioned just before return find the value to be returned and add it
+ // to the frame information.
+ Handle<Object> return_value = Factory::undefined_value();
+ if (at_return) {
+ StackFrameIterator it2;
+ Address internal_frame_sp = NULL;
+ while (!it2.done()) {
+ if (it2.frame()->is_internal()) {
+ internal_frame_sp = it2.frame()->sp();
+ } else {
+ if (it2.frame()->is_java_script()) {
+ if (it2.frame()->id() == it.frame()->id()) {
+ // The internal frame just before the JavaScript frame contains the
+ // value to return on top. A debug break at return will create an
+ // internal frame to store the return value (eax/rax/r0) before
+ // entering the debug break exit frame.
+ if (internal_frame_sp != NULL) {
+ return_value =
+ Handle<Object>(Memory::Object_at(internal_frame_sp));
+ break;
+ }
+ }
+ }
+
+ // Indicate that the previous frame was not an internal frame.
+ internal_frame_sp = NULL;
+ }
+ it2.Advance();
+ }
+ }
// Now advance to the arguments adapter frame (if any). It contains all
// the provided parameters whereas the function frame always have the number
@@ -8254,7 +8388,8 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
// Calculate the size of the result.
int details_size = kFrameDetailsFirstDynamicIndex +
- 2 * (argument_count + info.NumberOfLocals());
+ 2 * (argument_count + info.NumberOfLocals()) +
+ (at_return ? 1 : 0);
Handle<FixedArray> details = Factory::NewFixedArray(details_size);
// Add the frame id.
@@ -8280,6 +8415,9 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
// Add the constructor information.
details->set(kFrameDetailsConstructCallIndex, Heap::ToBoolean(constructor));
+ // Add the at return information.
+ details->set(kFrameDetailsAtReturnIndex, Heap::ToBoolean(at_return));
+
// Add information on whether this frame is invoked in the debugger context.
details->set(kFrameDetailsDebuggerFrameIndex,
Heap::ToBoolean(*save->context() == *Debug::debug_context()));
@@ -8309,6 +8447,11 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
details->set(details_index++, locals->get(i));
}
+ // Add the value being returned.
+ if (at_return) {
+ details->set(details_index++, *return_value);
+ }
+
// Add the receiver (same as in function frame).
// THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
diff --git a/src/runtime.h b/src/runtime.h
index a7f0bf37..3d4df1bd 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -102,6 +102,7 @@ namespace internal {
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
F(NumberToInteger, 1, 1) \
+ F(NumberToIntegerMapMinusZero, 1, 1) \
F(NumberToJSUint32, 1, 1) \
F(NumberToJSInt32, 1, 1) \
F(NumberToSmi, 1, 1) \
@@ -161,7 +162,6 @@ namespace internal {
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
- F(StringCharAt, 2, 1) \
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
@@ -222,6 +222,7 @@ namespace internal {
/* Eval */ \
F(GlobalReceiver, 1, 1) \
F(ResolvePossiblyDirectEval, 3, 2) \
+ F(ResolvePossiblyDirectEvalNoLookup, 3, 2) \
\
F(SetProperty, -1 /* 3 or 4 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \
diff --git a/src/serialize.cc b/src/serialize.cc
index 06c6df72..e610e283 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -229,6 +229,10 @@ void ExternalReferenceTable::PopulateTable() {
DEBUG_ADDRESS,
Debug::k_after_break_target_address << kDebugIdShift,
"Debug::after_break_target_address()");
+ Add(Debug_Address(Debug::k_debug_break_slot_address).address(),
+ DEBUG_ADDRESS,
+ Debug::k_debug_break_slot_address << kDebugIdShift,
+ "Debug::debug_break_slot_address()");
Add(Debug_Address(Debug::k_debug_break_return_address).address(),
DEBUG_ADDRESS,
Debug::k_debug_break_return_address << kDebugIdShift,
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 66894c4f..d49c2075 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -66,99 +66,210 @@ Address Page::AllocationTop() {
}
-void Page::ClearRSet() {
- // This method can be called in all rset states.
- memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
-}
-
-
-// Given a 32-bit address, separate its bits into:
-// | page address | words (6) | bit offset (5) | pointer alignment (2) |
-// The address of the rset word containing the bit for this word is computed as:
-// page_address + words * 4
-// For a 64-bit address, if it is:
-// | page address | words(5) | bit offset(5) | pointer alignment (3) |
-// The address of the rset word containing the bit for this word is computed as:
-// page_address + words * 4 + kRSetOffset.
-// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
-// even on the X64 architecture.
-
-Address Page::ComputeRSetBitPosition(Address address, int offset,
- uint32_t* bitmask) {
- ASSERT(Page::is_rset_in_use());
-
- Page* page = Page::FromAddress(address);
- uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
- kPointerSizeLog2);
- *bitmask = 1 << (bit_offset % kBitsPerInt);
-
- Address rset_address =
- page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
- // The remembered set address is either in the normal remembered set range
- // of a page or else we have a large object page.
- ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
- || page->IsLargeObjectPage());
-
- if (rset_address >= page->RSetEnd()) {
- // We have a large object page, and the remembered set address is actually
- // past the end of the object.
-
- // The first part of the remembered set is still located at the start of
- // the page, but anything after kRSetEndOffset must be relocated to after
- // the large object, i.e. after
- // (page->ObjectAreaStart() + object size)
- // We do that by adding the difference between the normal RSet's end and
- // the object's end.
- ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
- int fixedarray_length =
- FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
- + Array::kLengthOffset));
- rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
+Address Page::AllocationWatermark() {
+ PagedSpace* owner = MemoryAllocator::PageOwner(this);
+ if (this == owner->AllocationTopPage()) {
+ return owner->top();
}
- return rset_address;
+ return address() + AllocationWatermarkOffset();
}
-void Page::SetRSet(Address address, int offset) {
- uint32_t bitmask = 0;
- Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
- Memory::uint32_at(rset_address) |= bitmask;
+uint32_t Page::AllocationWatermarkOffset() {
+ return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
+ kAllocationWatermarkOffsetShift);
+}
- ASSERT(IsRSetSet(address, offset));
+
+void Page::SetAllocationWatermark(Address allocation_watermark) {
+ if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
+ // When iterating intergenerational references during scavenge
+ // we might decide to promote an encountered young object.
+ // We will allocate a space for such an object and put it
+ // into the promotion queue to process it later.
+ // If space for object was allocated somewhere beyond allocation
+ // watermark this might cause garbage pointers to appear under allocation
+ // watermark. To avoid visiting them during dirty regions iteration
+ // which might be still in progress we store a valid allocation watermark
+ // value and mark this page as having an invalid watermark.
+ SetCachedAllocationWatermark(AllocationWatermark());
+ InvalidateWatermark(true);
+ }
+
+ flags_ = (flags_ & kFlagsMask) |
+ Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
+ ASSERT(AllocationWatermarkOffset()
+ == static_cast<uint32_t>(Offset(allocation_watermark)));
}
-// Clears the corresponding remembered set bit for a given address.
-void Page::UnsetRSet(Address address, int offset) {
- uint32_t bitmask = 0;
- Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
- Memory::uint32_at(rset_address) &= ~bitmask;
+void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
+ mc_first_forwarded = allocation_watermark;
+}
+
+
+Address Page::CachedAllocationWatermark() {
+ return mc_first_forwarded;
+}
+
+
+uint32_t Page::GetRegionMarks() {
+ return dirty_regions_;
+}
+
+
+void Page::SetRegionMarks(uint32_t marks) {
+ dirty_regions_ = marks;
+}
+
+
+int Page::GetRegionNumberForAddress(Address addr) {
+ // Each page is divided into 256 byte regions. Each region has a corresponding
+ // dirty mark bit in the page header. Region can contain intergenerational
+ // references iff its dirty mark is set.
+ // A normal 8K page contains exactly 32 regions so all region marks fit
+ // into 32-bit integer field. To calculate a region number we just divide
+ // offset inside page by region size.
+ // A large page can contain more then 32 regions. But we want to avoid
+ // additional write barrier code for distinguishing between large and normal
+ // pages so we just ignore the fact that addr points into a large page and
+ // calculate region number as if addr pointed into a normal 8K page. This way
+ // we get a region number modulo 32 so for large pages several regions might
+ // be mapped to a single dirty mark.
+ ASSERT_PAGE_ALIGNED(this->address());
+ STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
+
+ // We are using masking with kPageAlignmentMask instead of Page::Offset()
+ // to get an offset to the beginning of 8K page containing addr not to the
+ // beginning of actual page which can be bigger then 8K.
+ intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
+ return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
+}
+
+
+uint32_t Page::GetRegionMaskForAddress(Address addr) {
+ return 1 << GetRegionNumberForAddress(addr);
+}
+
+
+uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
+ uint32_t result = 0;
+ if (length_in_bytes >= kPageSize) {
+ result = kAllRegionsDirtyMarks;
+ } else if (length_in_bytes > 0) {
+ int start_region = GetRegionNumberForAddress(start);
+ int end_region =
+ GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
+ uint32_t start_mask = (~0) << start_region;
+ uint32_t end_mask = ~((~1) << end_region);
+ result = start_mask & end_mask;
+ // if end_region < start_region, the mask is ored.
+ if (result == 0) result = start_mask | end_mask;
+ }
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ uint32_t expected = 0;
+ for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
+ expected |= GetRegionMaskForAddress(a);
+ }
+ ASSERT(expected == result);
+ }
+#endif
+ return result;
+}
+
+
+void Page::MarkRegionDirty(Address address) {
+ SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
+}
+
- ASSERT(!IsRSetSet(address, offset));
+bool Page::IsRegionDirty(Address address) {
+ return GetRegionMarks() & GetRegionMaskForAddress(address);
}
-bool Page::IsRSetSet(Address address, int offset) {
+void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
+ int rstart = GetRegionNumberForAddress(start);
+ int rend = GetRegionNumberForAddress(end);
+
+ if (reaches_limit) {
+ end += 1;
+ }
+
+ if ((rend - rstart) == 0) {
+ return;
+ }
+
uint32_t bitmask = 0;
- Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
- return (Memory::uint32_at(rset_address) & bitmask) != 0;
+
+ if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
+ || (start == ObjectAreaStart())) {
+ // First region is fully covered
+ bitmask = 1 << rstart;
+ }
+
+ while (++rstart < rend) {
+ bitmask |= 1 << rstart;
+ }
+
+ if (bitmask) {
+ SetRegionMarks(GetRegionMarks() & ~bitmask);
+ }
+}
+
+
+void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
+ watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED;
+}
+
+
+bool Page::IsWatermarkValid() {
+ return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_;
+}
+
+
+void Page::InvalidateWatermark(bool value) {
+ if (value) {
+ flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_;
+ } else {
+ flags_ = (flags_ & ~WATERMARK_INVALIDATED) |
+ (watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED);
+ }
+
+ ASSERT(IsWatermarkValid() == !value);
}
bool Page::GetPageFlag(PageFlag flag) {
- return (flags & flag) != 0;
+ return (flags_ & flag) != 0;
}
void Page::SetPageFlag(PageFlag flag, bool value) {
if (value) {
- flags |= flag;
+ flags_ |= flag;
} else {
- flags &= ~flag;
+ flags_ &= ~flag;
}
}
+void Page::ClearPageFlags() {
+ flags_ = 0;
+}
+
+
+void Page::ClearGCFields() {
+ InvalidateWatermark(true);
+ SetAllocationWatermark(ObjectAreaStart());
+ if (Heap::gc_state() == Heap::SCAVENGE) {
+ SetCachedAllocationWatermark(ObjectAreaStart());
+ }
+ SetRegionMarks(kAllRegionsCleanMarks);
+}
+
+
bool Page::WasInUseBeforeMC() {
return GetPageFlag(WAS_IN_USE_BEFORE_MC);
}
@@ -343,14 +454,6 @@ HeapObject* LargeObjectChunk::GetObject() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
-int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
- int extra_rset_bits =
- RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
- kBitsPerInt);
- return extra_rset_bits / kBitsPerByte;
-}
-
-
Object* NewSpace::AllocateRawInternal(int size_in_bytes,
AllocationInfo* alloc_info) {
Address new_top = alloc_info->top + size_in_bytes;
diff --git a/src/spaces.cc b/src/spaces.cc
index 6b6d926e..3c495ba5 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -41,6 +41,7 @@ namespace internal {
&& (info).top <= (space).high() \
&& (info).limit == (space).high())
+intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
// ----------------------------------------------------------------------------
// HeapObjectIterator
@@ -139,13 +140,6 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
// -----------------------------------------------------------------------------
-// Page
-
-#ifdef DEBUG
-Page::RSetState Page::rset_state_ = Page::IN_USE;
-#endif
-
-// -----------------------------------------------------------------------------
// CodeRange
List<CodeRange::FreeBlock> CodeRange::free_list_(0);
@@ -524,7 +518,10 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+ p->InvalidateWatermark(true);
p->SetIsLargeObjectPage(false);
+ p->SetAllocationWatermark(p->ObjectAreaStart());
+ p->SetCachedAllocationWatermark(p->ObjectAreaStart());
page_addr += Page::kPageSize;
}
@@ -681,6 +678,7 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
page_addr += Page::kPageSize;
+ p->InvalidateWatermark(true);
if (p->WasInUseBeforeMC()) {
*last_page_in_use = p;
}
@@ -744,10 +742,10 @@ bool PagedSpace::Setup(Address start, size_t size) {
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
- // Sequentially initialize remembered sets in the newly allocated
+ // Sequentially clear region marks in the newly allocated
// pages and cache the current last page in the space.
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
- p->ClearRSet();
+ p->SetRegionMarks(Page::kAllRegionsCleanMarks);
last_page_ = p;
}
@@ -794,10 +792,10 @@ void PagedSpace::Unprotect() {
#endif
-void PagedSpace::ClearRSet() {
+void PagedSpace::MarkAllPagesClean() {
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
- it.next()->ClearRSet();
+ it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
}
}
@@ -900,7 +898,8 @@ HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
// of forwarding addresses is as an offset in terms of live bytes, so we
// need quick access to the allocation top of each page to decode
// forwarding addresses.
- current_page->mc_relocation_top = mc_forwarding_info_.top;
+ current_page->SetAllocationWatermark(mc_forwarding_info_.top);
+ current_page->next_page()->InvalidateWatermark(true);
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
}
@@ -928,10 +927,10 @@ bool PagedSpace::Expand(Page* last_page) {
MemoryAllocator::SetNextPage(last_page, p);
- // Sequentially clear remembered set of new pages and and cache the
+ // Sequentially clear region marks of new pages and and cache the
// new last page in the space.
while (p->is_valid()) {
- p->ClearRSet();
+ p->SetRegionMarks(Page::kAllRegionsCleanMarks);
last_page_ = p;
p = p->next_page();
}
@@ -1030,16 +1029,11 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (above_allocation_top) {
// We don't care what's above the allocation top.
} else {
- // Unless this is the last page in the space containing allocated
- // objects, the allocation top should be at a constant offset from the
- // object area end.
Address top = current_page->AllocationTop();
if (current_page == top_page) {
ASSERT(top == allocation_info_.top);
// The next page will be above the allocation top.
above_allocation_top = true;
- } else {
- ASSERT(top == PageAllocationLimit(current_page));
}
// It should be packed with objects from the bottom to the top.
@@ -1060,8 +1054,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
object->Verify();
// All the interior pointers should be contained in the heap and
- // have their remembered set bits set if required as determined
- // by the visitor.
+ // have page regions covering intergenerational references should be
+ // marked dirty.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
@@ -1120,7 +1114,7 @@ bool NewSpace::Setup(Address start, int size) {
start_ = start;
address_mask_ = ~(size - 1);
- object_mask_ = address_mask_ | kHeapObjectTag;
+ object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
allocation_info_.top = to_space_.low();
@@ -1324,7 +1318,7 @@ bool SemiSpace::Setup(Address start,
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
- object_mask_ = address_mask_ | kHeapObjectTag;
+ object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
age_mark_ = start_;
@@ -1463,6 +1457,7 @@ static void ReportCodeKindStatistics() {
CASE(STORE_IC);
CASE(KEYED_STORE_IC);
CASE(CALL_IC);
+ CASE(KEYED_CALL_IC);
CASE(BINARY_OP_IC);
}
}
@@ -1634,7 +1629,7 @@ void FreeListNode::set_size(int size_in_bytes) {
// If the block is too small (eg, one or two words), to hold both a size
// field and a next pointer, we give it a filler map that gives it the
// correct size.
- if (size_in_bytes > ByteArray::kAlignedSize) {
+ if (size_in_bytes > ByteArray::kHeaderSize) {
set_map(Heap::raw_unchecked_byte_array_map());
// Can't use ByteArray::cast because it fails during deserialization.
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
@@ -1831,7 +1826,7 @@ FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
void FixedSizeFreeList::Reset() {
available_ = 0;
- head_ = NULL;
+ head_ = tail_ = NULL;
}
@@ -1843,8 +1838,13 @@ void FixedSizeFreeList::Free(Address start) {
ASSERT(!MarkCompactCollector::IsCompacting());
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(object_size_);
- node->set_next(head_);
- head_ = node->address();
+ node->set_next(NULL);
+ if (head_ == NULL) {
+ tail_ = head_ = node->address();
+ } else {
+ FreeListNode::FromAddress(tail_)->set_next(node->address());
+ tail_ = node->address();
+ }
available_ += object_size_;
}
@@ -1907,15 +1907,14 @@ void OldSpace::MCCommitRelocationInfo() {
Page* p = it.next();
// Space below the relocation pointer is allocated.
computed_size +=
- static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
+ static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
if (it.has_next()) {
- // Free the space at the top of the page. We cannot use
- // p->mc_relocation_top after the call to Free (because Free will clear
- // remembered set bits).
+ // Free the space at the top of the page.
int extra_size =
- static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
+ static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
if (extra_size > 0) {
- int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
+ int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
+ extra_size);
// The bytes we have just "freed" to add to the free list were
// already accounted as available.
accounting_stats_.WasteBytes(wasted_bytes);
@@ -1963,7 +1962,10 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
// Clean them up.
do {
- first->ClearRSet();
+ first->InvalidateWatermark(true);
+ first->SetAllocationWatermark(first->ObjectAreaStart());
+ first->SetCachedAllocationWatermark(first->ObjectAreaStart());
+ first->SetRegionMarks(Page::kAllRegionsCleanMarks);
first = first->next_page();
} while (first != NULL);
@@ -2003,6 +2005,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
// Current allocation top points to a page which is now in the middle
// of page list. We should move allocation top forward to the new last
// used page so various object iterators will continue to work properly.
+ last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
last_in_use->AllocationTop());
@@ -2035,6 +2038,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
p->ObjectAreaStart());
+ p->SetAllocationWatermark(p->ObjectAreaStart());
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
}
}
@@ -2066,6 +2070,7 @@ bool PagedSpace::ReserveSpace(int bytes) {
if (!reserved_page->is_valid()) return false;
}
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
+ TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
SetAllocationInfo(&allocation_info_,
TopPageOf(allocation_info_)->next_page());
return true;
@@ -2100,7 +2105,20 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
accounting_stats_.WasteBytes(wasted_bytes);
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
- return HeapObject::cast(result);
+
+ HeapObject* obj = HeapObject::cast(result);
+ Page* p = Page::FromAddress(obj->address());
+
+ if (obj->address() >= p->AllocationWatermark()) {
+ // There should be no hole between the allocation watermark
+ // and allocated object address.
+ // Memory above the allocation watermark was not swept and
+ // might contain garbage pointers to new space.
+ ASSERT(obj->address() == p->AllocationWatermark());
+ p->SetAllocationWatermark(obj->address() + size_in_bytes);
+ }
+
+ return obj;
}
}
@@ -2123,6 +2141,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
+ current_page->SetAllocationWatermark(allocation_info_.top);
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
if (free_size > 0) {
@@ -2133,6 +2152,7 @@ void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
+ current_page->SetAllocationWatermark(allocation_info_.top);
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
// In the fixed space free list all the free list items have the right size.
@@ -2152,8 +2172,10 @@ void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
+ Page* next_page = current_page->next_page();
+ next_page->ClearGCFields();
PutRestOfCurrentPageOnFreeList(current_page);
- SetAllocationInfo(&allocation_info_, current_page->next_page());
+ SetAllocationInfo(&allocation_info_, next_page);
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
@@ -2296,160 +2318,12 @@ void OldSpace::ReportStatistics() {
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
- // Report remembered set statistics.
- int rset_marked_pointers = 0;
- int rset_marked_arrays = 0;
- int rset_marked_array_elements = 0;
- int cross_gen_pointers = 0;
- int cross_gen_array_elements = 0;
-
- PageIterator page_it(this, PageIterator::PAGES_IN_USE);
- while (page_it.has_next()) {
- Page* p = page_it.next();
-
- for (Address rset_addr = p->RSetStart();
- rset_addr < p->RSetEnd();
- rset_addr += kIntSize) {
- int rset = Memory::int_at(rset_addr);
- if (rset != 0) {
- // Bits were set
- int intoff =
- static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
- int bitoff = 0;
- for (; bitoff < kBitsPerInt; ++bitoff) {
- if ((rset & (1 << bitoff)) != 0) {
- int bitpos = intoff*kBitsPerByte + bitoff;
- Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
- Object** obj = reinterpret_cast<Object**>(slot);
- if (*obj == Heap::raw_unchecked_fixed_array_map()) {
- rset_marked_arrays++;
- FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
-
- rset_marked_array_elements += fa->length();
- // Manually inline FixedArray::IterateBody
- Address elm_start = slot + FixedArray::kHeaderSize;
- Address elm_stop = elm_start + fa->length() * kPointerSize;
- for (Address elm_addr = elm_start;
- elm_addr < elm_stop; elm_addr += kPointerSize) {
- // Filter non-heap-object pointers
- Object** elm_p = reinterpret_cast<Object**>(elm_addr);
- if (Heap::InNewSpace(*elm_p))
- cross_gen_array_elements++;
- }
- } else {
- rset_marked_pointers++;
- if (Heap::InNewSpace(*obj))
- cross_gen_pointers++;
- }
- }
- }
- }
- }
- }
-
- pct = rset_marked_pointers == 0 ?
- 0 : cross_gen_pointers * 100 / rset_marked_pointers;
- PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
- rset_marked_pointers, cross_gen_pointers, pct);
- PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
- PrintF(" elements %d, ", rset_marked_array_elements);
- pct = rset_marked_array_elements == 0 ? 0
- : cross_gen_array_elements * 100 / rset_marked_array_elements;
- PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
- PrintF(" total rset-marked bits %d\n",
- (rset_marked_pointers + rset_marked_arrays));
- pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
- : (cross_gen_pointers + cross_gen_array_elements) * 100 /
- (rset_marked_pointers + rset_marked_array_elements);
- PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
- (rset_marked_pointers + rset_marked_array_elements),
- (cross_gen_pointers + cross_gen_array_elements),
- pct);
-
ClearHistograms();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
CollectHistogramInfo(obj);
ReportHistogram(true);
}
-
-
-// Dump the range of remembered set words between [start, end) corresponding
-// to the pointers starting at object_p. The allocation_top is an object
-// pointer which should not be read past. This is important for large object
-// pages, where some bits in the remembered set range do not correspond to
-// allocated addresses.
-static void PrintRSetRange(Address start, Address end, Object** object_p,
- Address allocation_top) {
- Address rset_address = start;
-
- // If the range starts on on odd numbered word (eg, for large object extra
- // remembered set ranges), print some spaces.
- if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
- PrintF(" ");
- }
-
- // Loop over all the words in the range.
- while (rset_address < end) {
- uint32_t rset_word = Memory::uint32_at(rset_address);
- int bit_position = 0;
-
- // Loop over all the bits in the word.
- while (bit_position < kBitsPerInt) {
- if (object_p == reinterpret_cast<Object**>(allocation_top)) {
- // Print a bar at the allocation pointer.
- PrintF("|");
- } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
- // Do not dereference object_p past the allocation pointer.
- PrintF("#");
- } else if ((rset_word & (1 << bit_position)) == 0) {
- // Print a dot for zero bits.
- PrintF(".");
- } else if (Heap::InNewSpace(*object_p)) {
- // Print an X for one bits for pointers to new space.
- PrintF("X");
- } else {
- // Print a circle for one bits for pointers to old space.
- PrintF("o");
- }
-
- // Print a space after every 8th bit except the last.
- if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
- PrintF(" ");
- }
-
- // Advance to next bit.
- bit_position++;
- object_p++;
- }
-
- // Print a newline after every odd numbered word, otherwise a space.
- if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
- PrintF("\n");
- } else {
- PrintF(" ");
- }
-
- // Advance to next remembered set word.
- rset_address += kIntSize;
- }
-}
-
-
-void PagedSpace::DoPrintRSet(const char* space_name) {
- PageIterator it(this, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- Page* p = it.next();
- PrintF("%s page 0x%x:\n", space_name, p);
- PrintRSetRange(p->RSetStart(), p->RSetEnd(),
- reinterpret_cast<Object**>(p->ObjectAreaStart()),
- p->AllocationTop());
- PrintF("\n");
- }
-}
-
-
-void OldSpace::PrintRSet() { DoPrintRSet("old"); }
#endif
// -----------------------------------------------------------------------------
@@ -2499,6 +2373,7 @@ void FixedSpace::MCCommitRelocationInfo() {
if (it.has_next()) {
accounting_stats_.WasteBytes(
static_cast<int>(page->ObjectAreaEnd() - page_top));
+ page->SetAllocationWatermark(page_top);
}
}
@@ -2528,7 +2403,19 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
- return HeapObject::cast(result);
+ HeapObject* obj = HeapObject::cast(result);
+ Page* p = Page::FromAddress(obj->address());
+
+ if (obj->address() >= p->AllocationWatermark()) {
+ // There should be no hole between the allocation watermark
+ // and allocated object address.
+ // Memory above the allocation watermark was not swept and
+ // might contain garbage pointers to new space.
+ ASSERT(obj->address() == p->AllocationWatermark());
+ p->SetAllocationWatermark(obj->address() + size_in_bytes);
+ }
+
+ return obj;
}
}
@@ -2558,8 +2445,11 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
ASSERT(current_page->next_page()->is_valid());
ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
+ Page* next_page = current_page->next_page();
+ next_page->ClearGCFields();
+ current_page->SetAllocationWatermark(allocation_info_.top);
accounting_stats_.WasteBytes(page_extra_);
- SetAllocationInfo(&allocation_info_, current_page->next_page());
+ SetAllocationInfo(&allocation_info_, next_page);
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
@@ -2570,51 +2460,12 @@ void FixedSpace::ReportStatistics() {
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
- // Report remembered set statistics.
- int rset_marked_pointers = 0;
- int cross_gen_pointers = 0;
-
- PageIterator page_it(this, PageIterator::PAGES_IN_USE);
- while (page_it.has_next()) {
- Page* p = page_it.next();
-
- for (Address rset_addr = p->RSetStart();
- rset_addr < p->RSetEnd();
- rset_addr += kIntSize) {
- int rset = Memory::int_at(rset_addr);
- if (rset != 0) {
- // Bits were set
- int intoff =
- static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
- int bitoff = 0;
- for (; bitoff < kBitsPerInt; ++bitoff) {
- if ((rset & (1 << bitoff)) != 0) {
- int bitpos = intoff*kBitsPerByte + bitoff;
- Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
- Object** obj = reinterpret_cast<Object**>(slot);
- rset_marked_pointers++;
- if (Heap::InNewSpace(*obj))
- cross_gen_pointers++;
- }
- }
- }
- }
- }
-
- pct = rset_marked_pointers == 0 ?
- 0 : cross_gen_pointers * 100 / rset_marked_pointers;
- PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
- rset_marked_pointers, cross_gen_pointers, pct);
-
ClearHistograms();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
CollectHistogramInfo(obj);
ReportHistogram(false);
}
-
-
-void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
#endif
@@ -2793,8 +2644,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
chunk->set_size(chunk_size);
first_chunk_ = chunk;
- // Set the object address and size in the page header and clear its
- // remembered set.
+ // Initialize page header.
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
Address object_address = page->ObjectAreaStart();
// Clear the low order bit of the second word in the page to flag it as a
@@ -2802,13 +2652,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
// low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0);
page->SetIsLargeObjectPage(true);
- page->ClearRSet();
- int extra_bytes = requested_size - object_size;
- if (extra_bytes > 0) {
- // The extra memory for the remembered set should be cleared.
- memset(object_address + object_size, 0, extra_bytes);
- }
-
+ page->SetRegionMarks(Page::kAllRegionsCleanMarks);
return HeapObject::FromAddress(object_address);
}
@@ -2823,8 +2667,7 @@ Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ASSERT(0 < size_in_bytes);
- int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
- return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
+ return AllocateRawInternal(size_in_bytes,
size_in_bytes,
NOT_EXECUTABLE);
}
@@ -2851,59 +2694,61 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Failure::Exception();
}
-
-void LargeObjectSpace::ClearRSet() {
- ASSERT(Page::is_rset_in_use());
-
- LargeObjectIterator it(this);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
- // We only have code, sequential strings, or fixed arrays in large
- // object space, and only fixed arrays need remembered set support.
- if (object->IsFixedArray()) {
- // Clear the normal remembered set region of the page;
- Page* page = Page::FromAddress(object->address());
- page->ClearRSet();
-
- // Clear the extra remembered set.
- int size = object->Size();
- int extra_rset_bytes = ExtraRSetBytesFor(size);
- memset(object->address() + size, 0, extra_rset_bytes);
- }
- }
-}
-
-
-void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
- ASSERT(Page::is_rset_in_use());
-
- static void* lo_rset_histogram = StatsTable::CreateHistogram(
- "V8.RSetLO",
- 0,
- // Keeping this histogram's buckets the same as the paged space histogram.
- Page::kObjectAreaSize / kPointerSize,
- 30);
-
+void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays can possibly contain pointers to
// the young generation.
if (object->IsFixedArray()) {
- // Iterate the normal page remembered set range.
Page* page = Page::FromAddress(object->address());
- Address object_end = object->address() + object->Size();
- int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
- Min(page->ObjectAreaEnd(), object_end),
- page->RSetStart(),
- copy_object_func);
-
- // Iterate the extra array elements.
- if (object_end > page->ObjectAreaEnd()) {
- count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
- object_end, copy_object_func);
- }
- if (lo_rset_histogram != NULL) {
- StatsTable::AddHistogramSample(lo_rset_histogram, count);
+ uint32_t marks = page->GetRegionMarks();
+ uint32_t newmarks = Page::kAllRegionsCleanMarks;
+
+ if (marks != Page::kAllRegionsCleanMarks) {
+ // For a large page a single dirty mark corresponds to several
+ // regions (modulo 32). So we treat a large page as a sequence of
+ // normal pages of size Page::kPageSize having same dirty marks
+ // and subsequently iterate dirty regions on each of these pages.
+ Address start = object->address();
+ Address end = page->ObjectAreaEnd();
+ Address object_end = start + object->Size();
+
+ // Iterate regions of the first normal page covering object.
+ uint32_t first_region_number = page->GetRegionNumberForAddress(start);
+ newmarks |=
+ Heap::IterateDirtyRegions(marks >> first_region_number,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object) << first_region_number;
+
+ start = end;
+ end = start + Page::kPageSize;
+ while (end <= object_end) {
+ // Iterate next 32 regions.
+ newmarks |=
+ Heap::IterateDirtyRegions(marks,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
+ start = end;
+ end = start + Page::kPageSize;
+ }
+
+ if (start != object_end) {
+ // Iterate the last piece of an object which is less than
+ // Page::kPageSize.
+ newmarks |=
+ Heap::IterateDirtyRegions(marks,
+ start,
+ object_end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
+ }
+
+ page->SetRegionMarks(newmarks);
}
}
}
@@ -2995,7 +2840,7 @@ void LargeObjectSpace::Verify() {
} else if (object->IsFixedArray()) {
// We loop over fixed arrays ourselves, rather then using the visitor,
// because the visitor doesn't support the start/offset iteration
- // needed for IsRSetSet.
+ // needed for IsRegionDirty.
FixedArray* array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
Object* element = array->get(j);
@@ -3004,8 +2849,11 @@ void LargeObjectSpace::Verify() {
ASSERT(Heap::Contains(element_object));
ASSERT(element_object->map()->IsMap());
if (Heap::InNewSpace(element_object)) {
- ASSERT(Page::IsRSetSet(object->address(),
- FixedArray::kHeaderSize + j * kPointerSize));
+ Address array_addr = object->address();
+ Address element_addr = array_addr + FixedArray::kHeaderSize +
+ j * kPointerSize;
+
+ ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
}
}
}
@@ -3046,33 +2894,6 @@ void LargeObjectSpace::CollectCodeStatistics() {
}
}
}
-
-
-void LargeObjectSpace::PrintRSet() {
- LargeObjectIterator it(this);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
- if (object->IsFixedArray()) {
- Page* page = Page::FromAddress(object->address());
-
- Address allocation_top = object->address() + object->Size();
- PrintF("large page 0x%x:\n", page);
- PrintRSetRange(page->RSetStart(), page->RSetEnd(),
- reinterpret_cast<Object**>(object->address()),
- allocation_top);
- int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
- int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
- kBitsPerInt);
- PrintF("------------------------------------------------------------"
- "-----------\n");
- PrintRSetRange(allocation_top,
- allocation_top + extra_rset_bits / kBitsPerByte,
- reinterpret_cast<Object**>(object->address()
- + Page::kObjectAreaSize),
- allocation_top);
- PrintF("\n");
- }
- }
-}
#endif // DEBUG
} } // namespace v8::internal
diff --git a/src/spaces.h b/src/spaces.h
index df42d515..051ce37c 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -45,23 +45,46 @@ namespace internal {
// The old generation is collected by a mark-sweep-compact collector.
//
// The semispaces of the young generation are contiguous. The old and map
-// spaces consists of a list of pages. A page has a page header, a remembered
-// set area, and an object area. A page size is deliberately chosen as 8K
-// bytes. The first word of a page is an opaque page header that has the
+// spaces consists of a list of pages. A page has a page header and an object
+// area. A page size is deliberately chosen as 8K bytes.
+// The first word of a page is an opaque page header that has the
// address of the next page and its ownership information. The second word may
-// have the allocation top address of this page. The next 248 bytes are
-// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
-// remembered set bit corresponds to a pointer in the object area.
+// have the allocation top address of this page. Heap objects are aligned to the
+// pointer size.
//
// There is a separate large object space for objects larger than
// Page::kMaxHeapObjectSize, so that they do not have to move during
-// collection. The large object space is paged and uses the same remembered
-// set implementation. Pages in large object space may be larger than 8K.
+// collection. The large object space is paged. Pages in large object space
+// may be larger than 8K.
+//
+// A card marking write barrier is used to keep track of intergenerational
+// references. Old space pages are divided into regions of Page::kRegionSize
+// size. Each region has a corresponding dirty bit in the page header which is
+// set if the region might contain pointers to new space. For details about
+// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
+// method body.
+//
+// During scavenges and mark-sweep collections we iterate intergenerational
+// pointers without decoding heap object maps so if the page belongs to old
+// pointer space or large object space it is essential to guarantee that
+// the page does not contain any garbage pointers to new space: every pointer
+// aligned word which satisfies the Heap::InNewSpace() predicate must be a
+// pointer to a live heap object in new space. Thus objects in old pointer
+// and large object spaces should have a special layout (e.g. no bare integer
+// fields). This requirement does not apply to map space which is iterated in
+// a special fashion. However we still require pointer fields of dead maps to
+// be cleaned.
+//
+// To enable lazy cleaning of old space pages we use a notion of allocation
+// watermark. Every pointer under watermark is considered to be well formed.
+// Page allocation watermark is not necessarily equal to page allocation top but
+// all alive objects on page should reside under allocation watermark.
+// During scavenge allocation watermark might be bumped and invalid pointers
+// might appear below it. To avoid following them we store a valid watermark
+// into special field in the page header and set a page WATERMARK_INVALIDATED
+// flag. For details see comments in the Page::SetAllocationWatermark() method
+// body.
//
-// NOTE: The mark-compact collector rebuilds the remembered set after a
-// collection. It reuses first a few words of the remembered set for
-// bookkeeping relocation information.
-
// Some assertion macros used in the debugging mode.
@@ -91,25 +114,13 @@ class AllocationInfo;
// -----------------------------------------------------------------------------
// A page normally has 8K bytes. Large object pages may be larger. A page
-// address is always aligned to the 8K page size. A page is divided into
-// three areas: the first two words are used for bookkeeping, the next 248
-// bytes are used as remembered set, and the rest of the page is the object
-// area.
-//
-// Pointers are aligned to the pointer size (4), only 1 bit is needed
-// for a pointer in the remembered set. Given an address, its remembered set
-// bit position (offset from the start of the page) is calculated by dividing
-// its page offset by 32. Therefore, the object area in a page starts at the
-// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
-// the first two words (64 bits) in a page can be used for other purposes.
+// address is always aligned to the 8K page size.
//
-// On the 64-bit platform, we add an offset to the start of the remembered set,
-// and pointers are aligned to 8-byte pointer size. This means that we need
-// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
-// For this reason we add an offset to get room for the Page data at the start.
+// Each page starts with a header of Page::kPageHeaderSize size which contains
+// bookkeeping data.
//
// The mark-compact collector transforms a map pointer into a page index and a
-// page offset. The excact encoding is described in the comments for
+// page offset. The exact encoding is described in the comments for
// class MapWord in objects.h.
//
// The only way to get a page pointer is by calling factory methods:
@@ -150,18 +161,25 @@ class Page {
// Return the end of allocation in this page. Undefined for unused pages.
inline Address AllocationTop();
+ // Return the allocation watermark for the page.
+ // For old space pages it is guaranteed that the area under the watermark
+ // does not contain any garbage pointers to new space.
+ inline Address AllocationWatermark();
+
+ // Return the allocation watermark offset from the beginning of the page.
+ inline uint32_t AllocationWatermarkOffset();
+
+ inline void SetAllocationWatermark(Address allocation_watermark);
+
+ inline void SetCachedAllocationWatermark(Address allocation_watermark);
+ inline Address CachedAllocationWatermark();
+
// Returns the start address of the object area in this page.
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
// Returns the end address (exclusive) of the object area in this page.
Address ObjectAreaEnd() { return address() + Page::kPageSize; }
- // Returns the start address of the remembered set area.
- Address RSetStart() { return address() + kRSetStartOffset; }
-
- // Returns the end address of the remembered set area (exclusive).
- Address RSetEnd() { return address() + kRSetEndOffset; }
-
// Checks whether an address is page aligned.
static bool IsAlignedToPageSize(Address a) {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
@@ -193,33 +211,24 @@ class Page {
}
// ---------------------------------------------------------------------
- // Remembered set support
+ // Card marking support
- // Clears remembered set in this page.
- inline void ClearRSet();
+ static const uint32_t kAllRegionsCleanMarks = 0x0;
+ static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
- // Return the address of the remembered set word corresponding to an
- // object address/offset pair, and the bit encoded as a single-bit
- // mask in the output parameter 'bitmask'.
- INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
- uint32_t* bitmask));
+ inline uint32_t GetRegionMarks();
+ inline void SetRegionMarks(uint32_t dirty);
- // Sets the corresponding remembered set bit for a given address.
- INLINE(static void SetRSet(Address address, int offset));
+ inline uint32_t GetRegionMaskForAddress(Address addr);
+ inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
+ inline int GetRegionNumberForAddress(Address addr);
- // Clears the corresponding remembered set bit for a given address.
- static inline void UnsetRSet(Address address, int offset);
+ inline void MarkRegionDirty(Address addr);
+ inline bool IsRegionDirty(Address addr);
- // Checks whether the remembered set bit for a given address is set.
- static inline bool IsRSetSet(Address address, int offset);
-
-#ifdef DEBUG
- // Use a state to mark whether remembered set space can be used for other
- // purposes.
- enum RSetState { IN_USE, NOT_IN_USE };
- static bool is_rset_in_use() { return rset_state_ == IN_USE; }
- static void set_rset_state(RSetState state) { rset_state_ = state; }
-#endif
+ inline void ClearRegionMarks(Address start,
+ Address end,
+ bool reaches_limit);
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@@ -227,25 +236,11 @@ class Page {
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
- // The offset of the remembered set in a page, in addition to the empty bytes
- // formed as the remembered bits of the remembered set itself.
-#ifdef V8_TARGET_ARCH_X64
- static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
-#else
- static const int kRSetOffset = 0;
-#endif
- // The end offset of the remembered set in a page
- // (heaps are aligned to pointer size).
- static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
+ static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
+ kIntSize + kPointerSize;
// The start offset of the object area in a page.
- // This needs to be at least (bits per uint32_t) * kBitsPerPointer,
- // to align start of rset to a uint32_t address.
- static const int kObjectStartOffset = 256;
-
- // The start offset of the used part of the remembered set in a page.
- static const int kRSetStartOffset = kRSetOffset +
- kObjectStartOffset / kBitsPerPointer;
+ static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize);
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
@@ -253,13 +248,65 @@ class Page {
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
+ static const int kDirtyFlagOffset = 2 * kPointerSize;
+ static const int kRegionSizeLog2 = 8;
+ static const int kRegionSize = 1 << kRegionSizeLog2;
+ static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
+
+ STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
+
enum PageFlag {
IS_NORMAL_PAGE = 1 << 0,
- WAS_IN_USE_BEFORE_MC = 1 << 1
+ WAS_IN_USE_BEFORE_MC = 1 << 1,
+
+ // Page allocation watermark was bumped by preallocation during scavenge.
+ // Correct watermark can be retrieved by CachedAllocationWatermark() method
+ WATERMARK_INVALIDATED = 1 << 2
};
+ // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
+ // scavenge we just invalidate the watermark on each old space page after
+ // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
+ // flag at the beginning of the next scavenge and each page becomes marked as
+ // having a valid watermark.
+ //
+ // The following invariant must hold for pages in old pointer and map spaces:
+ // If page is in use then page is marked as having invalid watermark at
+ // the beginning and at the end of any GC.
+ //
+ // This invariant guarantees that after flipping flag meaning at the
+ // beginning of scavenge all pages in use will be marked as having valid
+ // watermark.
+ static inline void FlipMeaningOfInvalidatedWatermarkFlag();
+
+ // Returns true if the page allocation watermark was not altered during
+ // scavenge.
+ inline bool IsWatermarkValid();
+
+ inline void InvalidateWatermark(bool value);
+
inline bool GetPageFlag(PageFlag flag);
inline void SetPageFlag(PageFlag flag, bool value);
+ inline void ClearPageFlags();
+
+ inline void ClearGCFields();
+
+ static const int kAllocationWatermarkOffsetShift = 3;
+ static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
+ static const uint32_t kAllocationWatermarkOffsetMask =
+ ((1 << kAllocationWatermarkOffsetBits) - 1) <<
+ kAllocationWatermarkOffsetShift;
+
+ static const uint32_t kFlagsMask =
+ ((1 << kAllocationWatermarkOffsetShift) - 1);
+
+ STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
+ kAllocationWatermarkOffsetBits);
+
+ // This field contains the meaning of the WATERMARK_INVALIDATED flag.
+ // Instead of clearing this flag from all pages we just flip
+ // its meaning at the beginning of a scavenge.
+ static intptr_t watermark_invalidated_mark_;
//---------------------------------------------------------------------------
// Page header description.
@@ -279,26 +326,24 @@ class Page {
// second word *may* (if the page start and large object chunk start are
// the same) contain the large object chunk size. In either case, the
// low-order bit for large object pages will be cleared.
- // For normal pages this word is used to store various page flags.
- int flags;
+ // For normal pages this word is used to store page flags and
+ // offset of allocation top.
+ intptr_t flags_;
- // The following fields may overlap with remembered set, they can only
- // be used in the mark-compact collector when remembered set is not
- // used.
+ // This field contains dirty marks for regions covering the page. Only dirty
+ // regions might contain intergenerational references.
+ // Only 32 dirty marks are supported so for large object pages several regions
+ // might be mapped to a single dirty mark.
+ uint32_t dirty_regions_;
// The index of the page in its owner space.
int mc_page_index;
- // The allocation pointer after relocating objects to this page.
- Address mc_relocation_top;
-
- // The forwarding address of the first live object in this page.
+ // During mark-compact collections this field contains the forwarding address
+ // of the first live object in this page.
+ // During scavenge collection this field is used to store allocation watermark
+ // if it is altered during scavenge.
Address mc_first_forwarded;
-
-#ifdef DEBUG
- private:
- static RSetState rset_state_; // state of the remembered set
-#endif
};
@@ -921,8 +966,7 @@ class PagedSpace : public Space {
// Checks whether page is currently in use by this space.
bool IsUsed(Page* page);
- // Clears remembered sets of pages in this space.
- void ClearRSet();
+ void MarkAllPagesClean();
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
@@ -936,6 +980,11 @@ class PagedSpace : public Space {
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) = 0;
+ void FlushTopPageWatermark() {
+ AllocationTopPage()->SetCachedAllocationWatermark(top());
+ AllocationTopPage()->InvalidateWatermark(true);
+ }
+
// Current capacity without growing (Size() + Available() + Waste()).
int Capacity() { return accounting_stats_.Capacity(); }
@@ -990,7 +1039,8 @@ class PagedSpace : public Space {
// Writes relocation info to the top page.
void MCWriteRelocationInfoToPage() {
- TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
+ TopPageOf(mc_forwarding_info_)->
+ SetAllocationWatermark(mc_forwarding_info_.top);
}
// Computes the offset of a given address in this space to the beginning
@@ -1108,8 +1158,6 @@ class PagedSpace : public Space {
#ifdef DEBUG
// Returns the number of total pages in this space.
int CountTotalPages();
-
- void DoPrintRSet(const char* space_name);
#endif
private:
@@ -1702,6 +1750,9 @@ class FixedSizeFreeList BASE_EMBEDDED {
// The head of the free list.
Address head_;
+ // The tail of the free list.
+ Address tail_;
+
// The identity of the owning space, for building allocation Failure
// objects.
AllocationSpace owner_;
@@ -1762,8 +1813,6 @@ class OldSpace : public PagedSpace {
#ifdef DEBUG
// Reports statistics for the space
void ReportStatistics();
- // Dump the remembered sets in the space to stdout.
- void PrintRSet();
#endif
protected:
@@ -1828,9 +1877,6 @@ class FixedSpace : public PagedSpace {
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
-
- // Dump the remembered sets in the space to stdout.
- void PrintRSet();
#endif
protected:
@@ -1899,11 +1945,11 @@ class MapSpace : public FixedSpace {
PageIterator it(this, PageIterator::ALL_PAGES);
while (pages_left-- > 0) {
ASSERT(it.has_next());
- it.next()->ClearRSet();
+ it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
}
ASSERT(it.has_next());
Page* top_page = it.next();
- top_page->ClearRSet();
+ top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
ASSERT(top_page->is_valid());
int offset = live_maps % kMapsPerPage * Map::kSize;
@@ -1994,9 +2040,8 @@ class LargeObjectChunk {
public:
// Allocates a new LargeObjectChunk that contains a large object page
// (Page::kPageSize aligned) that has at least size_in_bytes (for a large
- // object and possibly extra remembered set words) bytes after the object
- // area start of that page. The allocated chunk size is set in the output
- // parameter chunk_size.
+ // object) bytes after the object area start of that page.
+ // The allocated chunk size is set in the output parameter chunk_size.
static LargeObjectChunk* New(int size_in_bytes,
size_t* chunk_size,
Executability executable);
@@ -2019,16 +2064,12 @@ class LargeObjectChunk {
// Returns the object in this chunk.
inline HeapObject* GetObject();
- // Given a requested size (including any extra remembered set words),
- // returns the physical size of a chunk to be allocated.
+ // Given a requested size returns the physical size of a chunk to be
+ // allocated.
static int ChunkSizeFor(int size_in_bytes);
- // Given a chunk size, returns the object size it can accommodate (not
- // including any extra remembered set words). Used by
- // LargeObjectSpace::Available. Note that this can overestimate the size
- // of object that will fit in a chunk---if the object requires extra
- // remembered set words (eg, for large fixed arrays), the actual object
- // size for the chunk will be smaller than reported by this function.
+ // Given a chunk size, returns the object size it can accommodate. Used by
+ // LargeObjectSpace::Available.
static int ObjectSizeFor(int chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
@@ -2064,8 +2105,7 @@ class LargeObjectSpace : public Space {
// Allocates a large FixedArray.
Object* AllocateRawFixedArray(int size_in_bytes);
- // Available bytes for objects in this space, not including any extra
- // remembered set words.
+ // Available bytes for objects in this space.
int Available() {
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
}
@@ -2083,11 +2123,8 @@ class LargeObjectSpace : public Space {
// space, may be slow.
Object* FindObject(Address a);
- // Clears remembered sets.
- void ClearRSet();
-
- // Iterates objects whose remembered set bits are set.
- void IterateRSet(ObjectSlotCallback func);
+ // Iterates objects covered by dirty regions.
+ void IterateDirtyRegions(ObjectSlotCallback func);
// Frees unmarked objects.
void FreeUnmarkedObjects();
@@ -2114,8 +2151,6 @@ class LargeObjectSpace : public Space {
virtual void Print();
void ReportStatistics();
void CollectCodeStatistics();
- // Dump the remembered sets in the space to stdout.
- void PrintRSet();
#endif
// Checks whether an address is in the object area in this space. It
// iterates all objects in the space. May be slow.
@@ -2134,10 +2169,6 @@ class LargeObjectSpace : public Space {
int object_size,
Executability executable);
- // Returns the number of extra bytes (rounded up to the nearest full word)
- // required for extra_object_bytes of extra pointers (in bytes).
- static inline int ExtraRSetBytesFor(int extra_object_bytes);
-
friend class LargeObjectIterator;
public:
diff --git a/src/string.js b/src/string.js
index 59a501f9..cc6504fe 100644
--- a/src/string.js
+++ b/src/string.js
@@ -62,26 +62,21 @@ function StringValueOf() {
// ECMA-262, section 15.5.4.4
function StringCharAt(pos) {
- var char_code = %_FastCharCodeAt(this, pos);
- if (!%_IsSmi(char_code)) {
- var subject = TO_STRING_INLINE(this);
- var index = TO_INTEGER(pos);
- if (index >= subject.length || index < 0) return "";
- char_code = %StringCharCodeAt(subject, index);
+ var result = %_StringCharAt(this, pos);
+ if (%_IsSmi(result)) {
+ result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
}
- return %_CharFromCode(char_code);
+ return result;
}
// ECMA-262 section 15.5.4.5
function StringCharCodeAt(pos) {
- var fast_answer = %_FastCharCodeAt(this, pos);
- if (%_IsSmi(fast_answer)) {
- return fast_answer;
+ var result = %_StringCharCodeAt(this, pos);
+ if (!%_IsSmi(result)) {
+ result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
}
- var subject = TO_STRING_INLINE(this);
- var index = TO_INTEGER(pos);
- return %StringCharCodeAt(subject, index);
+ return result;
}
@@ -214,11 +209,7 @@ function StringMatch(regexp) {
function SubString(string, start, end) {
// Use the one character string cache.
if (start + 1 == end) {
- var char_code = %_FastCharCodeAt(string, start);
- if (!%_IsSmi(char_code)) {
- char_code = %StringCharCodeAt(string, start);
- }
- return %_CharFromCode(char_code);
+ return %_StringCharAt(string, start);
}
return %_SubString(string, start, end);
}
@@ -322,10 +313,7 @@ function ExpandReplacement(string, subject, matchInfo, builder) {
var expansion = '$';
var position = next + 1;
if (position < length) {
- var peek = %_FastCharCodeAt(string, position);
- if (!%_IsSmi(peek)) {
- peek = %StringCharCodeAt(string, position);
- }
+ var peek = %_StringCharCodeAt(string, position);
if (peek == 36) { // $$
++position;
builder.add('$');
@@ -343,10 +331,7 @@ function ExpandReplacement(string, subject, matchInfo, builder) {
++position;
var n = peek - 48;
if (position < length) {
- peek = %_FastCharCodeAt(string, position);
- if (!%_IsSmi(peek)) {
- peek = %StringCharCodeAt(string, position);
- }
+ peek = %_StringCharCodeAt(string, position);
// $nn, 01 <= nn <= 99
if (n != 0 && peek == 48 || peek >= 49 && peek <= 57) {
var nn = n * 10 + (peek - 48);
@@ -824,7 +809,7 @@ function StringFromCharCode(code) {
var n = %_ArgumentsLength();
if (n == 1) {
if (!%_IsSmi(code)) code = ToNumber(code);
- return %_CharFromCode(code & 0xffff);
+ return %_StringCharFromCode(code & 0xffff);
}
// NOTE: This is not super-efficient, but it is necessary because we
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 6ebe495f..397988ae 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -441,9 +441,12 @@ Object* StubCache::ComputeKeyedStoreField(String* name, JSObject* receiver,
return code;
}
+#define CALL_LOGGER_TAG(kind, type) \
+ (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
Object* StubCache::ComputeCallConstant(int argc,
InLoopFlag in_loop,
+ Code::Kind kind,
String* name,
Object* object,
JSObject* holder,
@@ -462,7 +465,7 @@ Object* StubCache::ComputeCallConstant(int argc,
}
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::CALL_IC,
+ Code::ComputeMonomorphicFlags(kind,
CONSTANT_FUNCTION,
in_loop,
argc);
@@ -474,11 +477,12 @@ Object* StubCache::ComputeCallConstant(int argc,
// caches.
if (!function->is_compiled()) return Failure::InternalError();
// Compile the stub - only create stubs for fully compiled functions.
- CallStubCompiler compiler(argc, in_loop);
+ CallStubCompiler compiler(argc, in_loop, kind);
code = compiler.CompileCallConstant(object, holder, function, name, check);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -488,6 +492,7 @@ Object* StubCache::ComputeCallConstant(int argc,
Object* StubCache::ComputeCallField(int argc,
InLoopFlag in_loop,
+ Code::Kind kind,
String* name,
Object* object,
JSObject* holder,
@@ -502,20 +507,21 @@ Object* StubCache::ComputeCallField(int argc,
object = holder;
}
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
+ Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
FIELD,
in_loop,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- CallStubCompiler compiler(argc, in_loop);
+ CallStubCompiler compiler(argc, in_loop, kind);
code = compiler.CompileCallField(JSObject::cast(object),
holder,
index,
name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -524,6 +530,7 @@ Object* StubCache::ComputeCallField(int argc,
Object* StubCache::ComputeCallInterceptor(int argc,
+ Code::Kind kind,
String* name,
Object* object,
JSObject* holder) {
@@ -539,19 +546,20 @@ Object* StubCache::ComputeCallInterceptor(int argc,
}
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::CALL_IC,
+ Code::ComputeMonomorphicFlags(kind,
INTERCEPTOR,
NOT_IN_LOOP,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- CallStubCompiler compiler(argc, NOT_IN_LOOP);
+ CallStubCompiler compiler(argc, NOT_IN_LOOP, kind);
code = compiler.CompileCallInterceptor(JSObject::cast(object),
holder,
name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -561,9 +569,10 @@ Object* StubCache::ComputeCallInterceptor(int argc,
Object* StubCache::ComputeCallNormal(int argc,
InLoopFlag in_loop,
+ Code::Kind kind,
String* name,
JSObject* receiver) {
- Object* code = ComputeCallNormal(argc, in_loop);
+ Object* code = ComputeCallNormal(argc, in_loop, kind);
if (code->IsFailure()) return code;
return Set(name, receiver->map(), Code::cast(code));
}
@@ -571,13 +580,17 @@ Object* StubCache::ComputeCallNormal(int argc,
Object* StubCache::ComputeCallGlobal(int argc,
InLoopFlag in_loop,
+ Code::Kind kind,
String* name,
JSObject* receiver,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function) {
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::CALL_IC, NORMAL, in_loop, argc);
+ Code::ComputeMonomorphicFlags(kind,
+ NORMAL,
+ in_loop,
+ argc);
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
@@ -585,11 +598,12 @@ Object* StubCache::ComputeCallGlobal(int argc,
// internal error which will make sure we do not update any
// caches.
if (!function->is_compiled()) return Failure::InternalError();
- CallStubCompiler compiler(argc, in_loop);
+ CallStubCompiler compiler(argc, in_loop, kind);
code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -637,9 +651,11 @@ static Object* FillCache(Object* code) {
}
-Code* StubCache::FindCallInitialize(int argc, InLoopFlag in_loop) {
+Code* StubCache::FindCallInitialize(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, in_loop, UNINITIALIZED, NORMAL, argc);
+ Code::ComputeFlags(kind, in_loop, UNINITIALIZED, NORMAL, argc);
Object* result = ProbeCache(flags);
ASSERT(!result->IsUndefined());
// This might be called during the marking phase of the collector
@@ -648,9 +664,11 @@ Code* StubCache::FindCallInitialize(int argc, InLoopFlag in_loop) {
}
-Object* StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
+Object* StubCache::ComputeCallInitialize(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, in_loop, UNINITIALIZED, NORMAL, argc);
+ Code::ComputeFlags(kind, in_loop, UNINITIALIZED, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -658,9 +676,11 @@ Object* StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
}
-Object* StubCache::ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop) {
+Object* StubCache::ComputeCallPreMonomorphic(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, in_loop, PREMONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(kind, in_loop, PREMONOMORPHIC, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -668,9 +688,11 @@ Object* StubCache::ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop) {
}
-Object* StubCache::ComputeCallNormal(int argc, InLoopFlag in_loop) {
+Object* StubCache::ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, in_loop, MONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(kind, in_loop, MONOMORPHIC, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -678,9 +700,11 @@ Object* StubCache::ComputeCallNormal(int argc, InLoopFlag in_loop) {
}
-Object* StubCache::ComputeCallMegamorphic(int argc, InLoopFlag in_loop) {
+Object* StubCache::ComputeCallMegamorphic(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, in_loop, MEGAMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(kind, in_loop, MEGAMORPHIC, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -688,9 +712,11 @@ Object* StubCache::ComputeCallMegamorphic(int argc, InLoopFlag in_loop) {
}
-Object* StubCache::ComputeCallMiss(int argc) {
- Code::Flags flags =
- Code::ComputeFlags(Code::STUB, NOT_IN_LOOP, MEGAMORPHIC, NORMAL, argc);
+Object* StubCache::ComputeCallMiss(int argc, Code::Kind kind) {
+ // MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
+ // and monomorphic stubs are not mixed up together in the stub cache.
+ Code::Flags flags = Code::ComputeFlags(
+ kind, NOT_IN_LOOP, MONOMORPHIC_PROTOTYPE_FAILURE, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -699,9 +725,9 @@ Object* StubCache::ComputeCallMiss(int argc) {
#ifdef ENABLE_DEBUGGER_SUPPORT
-Object* StubCache::ComputeCallDebugBreak(int argc) {
+Object* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, DEBUG_BREAK, NORMAL, argc);
+ Code::ComputeFlags(kind, NOT_IN_LOOP, DEBUG_BREAK, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
@@ -709,9 +735,9 @@ Object* StubCache::ComputeCallDebugBreak(int argc) {
}
-Object* StubCache::ComputeCallDebugPrepareStepIn(int argc) {
+Object* StubCache::ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC,
+ Code::ComputeFlags(kind,
NOT_IN_LOOP,
DEBUG_PREPARE_STEP_IN,
NORMAL,
@@ -758,8 +784,8 @@ void StubCache::Clear() {
// Support function for computing call IC miss stubs.
-Handle<Code> ComputeCallMiss(int argc) {
- CALL_HEAP_FUNCTION(StubCache::ComputeCallMiss(argc), Code);
+Handle<Code> ComputeCallMiss(int argc, Code::Kind kind) {
+ CALL_HEAP_FUNCTION(StubCache::ComputeCallMiss(argc, kind), Code);
}
@@ -966,13 +992,18 @@ Object* KeyedLoadPropertyWithInterceptor(Arguments args) {
Object* StubCompiler::CompileCallInitialize(Code::Flags flags) {
HandleScope scope;
int argc = Code::ExtractArgumentsCountFromFlags(flags);
- CallIC::GenerateInitialize(masm(), argc);
+ Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ if (kind == Code::CALL_IC) {
+ CallIC::GenerateInitialize(masm(), argc);
+ } else {
+ KeyedCallIC::GenerateInitialize(masm(), argc);
+ }
Object* result = GetCodeWithFlags(flags, "CompileCallInitialize");
if (!result->IsFailure()) {
Counters::call_initialize_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(Logger::CALL_INITIALIZE_TAG,
+ PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
code, code->arguments_count()));
}
return result;
@@ -984,13 +1015,18 @@ Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
// The code of the PreMonomorphic stub is the same as the code
// of the Initialized stub. They just differ on the code object flags.
- CallIC::GenerateInitialize(masm(), argc);
+ Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ if (kind == Code::CALL_IC) {
+ CallIC::GenerateInitialize(masm(), argc);
+ } else {
+ KeyedCallIC::GenerateInitialize(masm(), argc);
+ }
Object* result = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
if (!result->IsFailure()) {
Counters::call_premonomorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(Logger::CALL_PRE_MONOMORPHIC_TAG,
+ PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
code, code->arguments_count()));
}
return result;
@@ -1000,13 +1036,18 @@ Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
Object* StubCompiler::CompileCallNormal(Code::Flags flags) {
HandleScope scope;
int argc = Code::ExtractArgumentsCountFromFlags(flags);
- CallIC::GenerateNormal(masm(), argc);
+ Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ if (kind == Code::CALL_IC) {
+ CallIC::GenerateNormal(masm(), argc);
+ } else {
+ KeyedCallIC::GenerateNormal(masm(), argc);
+ }
Object* result = GetCodeWithFlags(flags, "CompileCallNormal");
if (!result->IsFailure()) {
Counters::call_normal_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(Logger::CALL_NORMAL_TAG,
+ PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
code, code->arguments_count()));
}
return result;
@@ -1016,13 +1057,19 @@ Object* StubCompiler::CompileCallNormal(Code::Flags flags) {
Object* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
HandleScope scope;
int argc = Code::ExtractArgumentsCountFromFlags(flags);
- CallIC::GenerateMegamorphic(masm(), argc);
+ Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ if (kind == Code::CALL_IC) {
+ CallIC::GenerateMegamorphic(masm(), argc);
+ } else {
+ KeyedCallIC::GenerateMegamorphic(masm(), argc);
+ }
+
Object* result = GetCodeWithFlags(flags, "CompileCallMegamorphic");
if (!result->IsFailure()) {
Counters::call_megamorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(Logger::CALL_MEGAMORPHIC_TAG,
+ PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
code, code->arguments_count()));
}
return result;
@@ -1032,13 +1079,18 @@ Object* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
Object* StubCompiler::CompileCallMiss(Code::Flags flags) {
HandleScope scope;
int argc = Code::ExtractArgumentsCountFromFlags(flags);
- CallIC::GenerateMiss(masm(), argc);
+ Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ if (kind == Code::CALL_IC) {
+ CallIC::GenerateMiss(masm(), argc);
+ } else {
+ KeyedCallIC::GenerateMiss(masm(), argc);
+ }
Object* result = GetCodeWithFlags(flags, "CompileCallMiss");
if (!result->IsFailure()) {
Counters::call_megamorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(Logger::CALL_MISS_TAG,
+ PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
code, code->arguments_count()));
}
return result;
@@ -1053,7 +1105,8 @@ Object* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
if (!result->IsFailure()) {
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(Logger::CALL_DEBUG_BREAK_TAG,
+ Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
code, code->arguments_count()));
}
return result;
@@ -1065,18 +1118,26 @@ Object* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
// Use the same code for the the step in preparations as we do for
// the miss case.
int argc = Code::ExtractArgumentsCountFromFlags(flags);
- CallIC::GenerateMiss(masm(), argc);
+ Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ if (kind == Code::CALL_IC) {
+ CallIC::GenerateMiss(masm(), argc);
+ } else {
+ KeyedCallIC::GenerateMiss(masm(), argc);
+ }
Object* result = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
if (!result->IsFailure()) {
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(Logger::CALL_DEBUG_PREPARE_STEP_IN_TAG,
- code, code->arguments_count()));
+ PROFILE(CodeCreateEvent(
+ CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
+ code,
+ code->arguments_count()));
}
return result;
}
#endif
+#undef CALL_LOGGER_TAG
Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, const char* name) {
// Check for allocation failures during stub compilation.
@@ -1167,7 +1228,7 @@ Object* CallStubCompiler::CompileCustomCall(int generator_id,
Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
int argc = arguments_.immediate();
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
+ Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
type,
in_loop_,
argc);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 45aaf75c..fcfffcfc 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -142,6 +142,7 @@ class StubCache : public AllStatic {
static Object* ComputeCallField(int argc,
InLoopFlag in_loop,
+ Code::Kind,
String* name,
Object* object,
JSObject* holder,
@@ -149,6 +150,7 @@ class StubCache : public AllStatic {
static Object* ComputeCallConstant(int argc,
InLoopFlag in_loop,
+ Code::Kind,
String* name,
Object* object,
JSObject* holder,
@@ -156,16 +158,19 @@ class StubCache : public AllStatic {
static Object* ComputeCallNormal(int argc,
InLoopFlag in_loop,
+ Code::Kind,
String* name,
JSObject* receiver);
static Object* ComputeCallInterceptor(int argc,
+ Code::Kind,
String* name,
Object* object,
JSObject* holder);
static Object* ComputeCallGlobal(int argc,
InLoopFlag in_loop,
+ Code::Kind,
String* name,
JSObject* receiver,
GlobalObject* holder,
@@ -174,18 +179,33 @@ class StubCache : public AllStatic {
// ---
- static Object* ComputeCallInitialize(int argc, InLoopFlag in_loop);
- static Object* ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop);
- static Object* ComputeCallNormal(int argc, InLoopFlag in_loop);
- static Object* ComputeCallMegamorphic(int argc, InLoopFlag in_loop);
- static Object* ComputeCallMiss(int argc);
+ static Object* ComputeCallInitialize(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
+
+ static Object* ComputeCallPreMonomorphic(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
+
+ static Object* ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
+
+ static Object* ComputeCallMegamorphic(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
+
+ static Object* ComputeCallMiss(int argc, Code::Kind kind);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
- static Code* FindCallInitialize(int argc, InLoopFlag in_loop);
+ static Code* FindCallInitialize(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Object* ComputeCallDebugBreak(int argc);
- static Object* ComputeCallDebugPrepareStepIn(int argc);
+ static Object* ComputeCallDebugBreak(int argc, Code::Kind kind);
+
+ static Object* ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
#endif
static Object* ComputeLazyCompile(int argc);
@@ -197,9 +217,6 @@ class StubCache : public AllStatic {
// Clear the lookup table (@ mark compact collection).
static void Clear();
- // Functions for generating stubs at startup.
- static void GenerateMiss(MacroAssembler* masm);
-
// Generate code for probing the stub cache table.
// If extra != no_reg it might be used as am extra scratch register.
static void GenerateProbe(MacroAssembler* masm,
@@ -318,7 +335,7 @@ Object* KeyedLoadPropertyWithInterceptor(Arguments args);
// Support function for computing call IC miss stubs.
-Handle<Code> ComputeCallMiss(int argc);
+Handle<Code> ComputeCallMiss(int argc, Code::Kind kind);
// The stub compiler compiles stubs for the stub cache.
@@ -349,6 +366,15 @@ class StubCompiler BASE_EMBEDDED {
int index,
Register prototype);
+ // Generates prototype loading code that uses the objects from the
+ // context we were in when this function was called. This ties the
+ // generated code to a particular context and so must not be used in
+ // cases where the generated code is not allowed to have references
+ // to objects from a context.
+ static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype);
+
static void GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register src,
JSObject* holder, int index);
@@ -568,9 +594,11 @@ class KeyedStoreStubCompiler: public StubCompiler {
// a builtin function on its instance prototype (the one the generator
// is set for), and a name of a generator itself (used to build ids
// and generator function names).
-#define CUSTOM_CALL_IC_GENERATORS(V) \
- V(array, push, ArrayPush) \
- V(array, pop, ArrayPop)
+#define CUSTOM_CALL_IC_GENERATORS(V) \
+ V(array, push, ArrayPush) \
+ V(array, pop, ArrayPop) \
+ V(string, charCodeAt, StringCharCodeAt) \
+ V(string, charAt, StringCharAt)
class CallStubCompiler: public StubCompiler {
@@ -583,8 +611,8 @@ class CallStubCompiler: public StubCompiler {
kNumCallGenerators
};
- CallStubCompiler(int argc, InLoopFlag in_loop)
- : arguments_(argc), in_loop_(in_loop) { }
+ CallStubCompiler(int argc, InLoopFlag in_loop, Code::Kind kind)
+ : arguments_(argc), in_loop_(in_loop), kind_(kind) { }
Object* CompileCallField(JSObject* object,
JSObject* holder,
@@ -624,6 +652,7 @@ class CallStubCompiler: public StubCompiler {
private:
const ParameterCount arguments_;
const InLoopFlag in_loop_;
+ const Code::Kind kind_;
const ParameterCount& arguments() { return arguments_; }
@@ -632,6 +661,10 @@ class CallStubCompiler: public StubCompiler {
// Convenience function. Calls GetCode above passing
// CONSTANT_FUNCTION type and the name of the given function.
Object* GetCode(JSFunction* function);
+
+ void GenerateNameCheck(String* name, Label* miss);
+
+ void GenerateMissBranch();
};
diff --git a/src/type-info.h b/src/type-info.h
index 568437a9..9d20bc19 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -47,7 +47,7 @@ namespace internal {
class TypeInfo {
public:
- TypeInfo() { }
+ TypeInfo() : type_(kUnknownType) { }
static inline TypeInfo Unknown();
// We know it's a primitive type.
diff --git a/src/unbound-queue-inl.h b/src/unbound-queue-inl.h
index ff5d8338..fffb1dbc 100644
--- a/src/unbound-queue-inl.h
+++ b/src/unbound-queue-inl.h
@@ -82,6 +82,14 @@ void UnboundQueue<Record>::Enqueue(const Record& rec) {
while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
}
+
+template<typename Record>
+Record* UnboundQueue<Record>::Peek() {
+ ASSERT(divider_ != last_);
+ Node* next = reinterpret_cast<Node*>(divider_)->next;
+ return &next->value;
+}
+
} } // namespace v8::internal
#endif // V8_UNBOUND_QUEUE_INL_H_
diff --git a/src/unbound-queue.h b/src/unbound-queue.h
index 7bc314bc..443d5ce6 100644
--- a/src/unbound-queue.h
+++ b/src/unbound-queue.h
@@ -47,6 +47,7 @@ class UnboundQueue BASE_EMBEDDED {
INLINE(void Dequeue(Record* rec));
INLINE(void Enqueue(const Record& rec));
INLINE(bool IsEmpty()) { return divider_ == last_; }
+ INLINE(Record* Peek());
private:
INLINE(void DeleteFirst());
diff --git a/src/utils.h b/src/utils.h
index 7c818671..ed6d9a4f 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -37,11 +37,13 @@ namespace internal {
// ----------------------------------------------------------------------------
// General helper functions
+#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
+
// Returns true iff x is a power of 2 (or zero). Cannot be used with the
// maximally negative value of the type T (the -1 overflows).
template <typename T>
static inline bool IsPowerOf2(T x) {
- return (x & (x - 1)) == 0;
+ return IS_POWER_OF_TWO(x);
}
@@ -525,12 +527,54 @@ class StringBuilder {
};
+// Custom memcpy implementation for platforms where the standard version
+// may not be good enough.
+// TODO(lrn): Check whether some IA32 platforms should be excluded.
+#if defined(V8_TARGET_ARCH_IA32)
+
+// TODO(lrn): Extend to other platforms as needed.
+
+typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
+
+// Implemented in codegen-<arch>.cc.
+MemCopyFunction CreateMemCopyFunction();
+
+// Copy memory area to disjoint memory area.
+static inline void MemCopy(void* dest, const void* src, size_t size) {
+ static MemCopyFunction memcopy = CreateMemCopyFunction();
+ (*memcopy)(dest, src, size);
+#ifdef DEBUG
+ CHECK_EQ(0, memcmp(dest, src, size));
+#endif
+}
+
+
+// Limit below which the extra overhead of the MemCopy function is likely
+// to outweigh the benefits of faster copying.
+// TODO(lrn): Try to find a more precise value.
+static const int kMinComplexMemCopy = 256;
+
+#else // V8_TARGET_ARCH_IA32
+
+static inline void MemCopy(void* dest, const void* src, size_t size) {
+ memcpy(dest, src, size);
+}
+
+static const int kMinComplexMemCopy = 256;
+
+#endif // V8_TARGET_ARCH_IA32
+
+
// Copy from ASCII/16bit chars to ASCII/16bit chars.
template <typename sourcechar, typename sinkchar>
static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*dest) == sizeof(*src)) {
+ if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
+ MemCopy(dest, src, chars * sizeof(*dest));
+ return;
+ }
// Number of characters in a uintptr_t.
static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
while (dest <= limit - kStepSize) {
diff --git a/src/v8-counters.h b/src/v8-counters.h
index bd671a13..10b81024 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -123,8 +123,17 @@ namespace internal {
/* How is the generic keyed-load stub used? */ \
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
+ SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
+ /* How is the generic keyed-call stub used? */ \
+ SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \
+ SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
+ SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
+ SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
+ SC(keyed_call_generic_value_type, V8.KeyedCallGenericValueType) \
+ SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
+ SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
/* Count how much the monomorphic keyed-load stubs are hit. */ \
SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \
SC(keyed_load_string_length, V8.KeyedLoadStringLength) \
@@ -156,6 +165,9 @@ namespace internal {
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
+ SC(memcopy_aligned, V8.MemCopyAligned) \
+ SC(memcopy_unaligned, V8.MemCopyUnaligned) \
+ SC(memcopy_noxmm, V8.MemCopyNoXMM) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(reloc_info_count, V8.RelocInfoCount) \
diff --git a/src/v8.cc b/src/v8.cc
index 7219d630..65ce2e1b 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -149,10 +149,10 @@ void V8::TearDown() {
Top::TearDown();
- Heap::TearDown();
-
CpuProfiler::TearDown();
+ Heap::TearDown();
+
Logger::TearDown();
is_running_ = false;
diff --git a/src/v8natives.js b/src/v8natives.js
index ed392e2e..1d47eb75 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -492,23 +492,23 @@ PropertyDescriptor.prototype.hasSetter = function() {
function GetOwnProperty(obj, p) {
var desc = new PropertyDescriptor();
- // An array with:
- // obj is a data property [false, value, Writeable, Enumerable, Configurable]
- // obj is an accessor [true, Get, Set, Enumerable, Configurable]
+ // GetOwnProperty returns an array indexed by the constants
+ // defined in macros.py.
+ // If p is not a property on obj undefined is returned.
var props = %GetOwnProperty(ToObject(obj), ToString(p));
if (IS_UNDEFINED(props)) return void 0;
// This is an accessor
- if (props[0]) {
- desc.setGet(props[1]);
- desc.setSet(props[2]);
+ if (props[IS_ACCESSOR_INDEX]) {
+ desc.setGet(props[GETTER_INDEX]);
+ desc.setSet(props[SETTER_INDEX]);
} else {
- desc.setValue(props[1]);
- desc.setWritable(props[2]);
+ desc.setValue(props[VALUE_INDEX]);
+ desc.setWritable(props[WRITABLE_INDEX]);
}
- desc.setEnumerable(props[3]);
- desc.setConfigurable(props[4]);
+ desc.setEnumerable(props[ENUMERABLE_INDEX]);
+ desc.setConfigurable(props[CONFIGURABLE_INDEX]);
return desc;
}
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 02292f6d..1e5e82e6 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -331,7 +331,7 @@ void ThreadManager::Iterate(ObjectVisitor* v) {
}
-void ThreadManager::IterateThreads(ThreadVisitor* v) {
+void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
for (ThreadState* state = ThreadState::FirstInUse();
state != NULL;
state = state->Next()) {
diff --git a/src/v8threads.h b/src/v8threads.h
index d70aa3c8..ca42354c 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -104,7 +104,7 @@ class ThreadManager : public AllStatic {
static bool IsArchived();
static void Iterate(ObjectVisitor* v);
- static void IterateThreads(ThreadVisitor* v);
+ static void IterateArchivedThreads(ThreadVisitor* v);
static void MarkCompactPrologue(bool is_compacting);
static void MarkCompactEpilogue(bool is_compacting);
static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
diff --git a/src/version.cc b/src/version.cc
index adeee595..d210dabc 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,9 +34,9 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 12
+#define BUILD_NUMBER 18
#define PATCH_LEVEL 0
-#define CANDIDATE_VERSION false
+#define CANDIDATE_VERSION true
// Define SONAME to have the SCons build the put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
diff --git a/src/virtual-frame-light-inl.h b/src/virtual-frame-light-inl.h
index 17b1c504..d08b5d21 100644
--- a/src/virtual-frame-light-inl.h
+++ b/src/virtual-frame-light-inl.h
@@ -42,7 +42,8 @@ namespace internal {
VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
: element_count_(0),
top_of_stack_state_(NO_TOS_REGISTERS),
- register_allocation_map_(0) { }
+ register_allocation_map_(0),
+ tos_known_smi_map_(0) { }
// On entry to a function, the virtual frame already contains the receiver,
@@ -50,20 +51,23 @@ VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
VirtualFrame::VirtualFrame()
: element_count_(parameter_count() + 2),
top_of_stack_state_(NO_TOS_REGISTERS),
- register_allocation_map_(0) { }
+ register_allocation_map_(0),
+ tos_known_smi_map_(0) { }
// When cloned, a frame is a deep copy of the original.
VirtualFrame::VirtualFrame(VirtualFrame* original)
: element_count_(original->element_count()),
top_of_stack_state_(original->top_of_stack_state_),
- register_allocation_map_(original->register_allocation_map_) { }
+ register_allocation_map_(original->register_allocation_map_),
+ tos_known_smi_map_(0) { }
-bool VirtualFrame::Equals(VirtualFrame* other) {
+bool VirtualFrame::Equals(const VirtualFrame* other) {
ASSERT(element_count() == other->element_count());
if (top_of_stack_state_ != other->top_of_stack_state_) return false;
if (register_allocation_map_ != other->register_allocation_map_) return false;
+ if (tos_known_smi_map_ != other->tos_known_smi_map_) return false;
return true;
}
@@ -99,7 +103,9 @@ VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
}
-CodeGenerator* VirtualFrame::cgen() { return CodeGeneratorScope::Current(); }
+CodeGenerator* VirtualFrame::cgen() const {
+ return CodeGeneratorScope::Current();
+}
MacroAssembler* VirtualFrame::masm() { return cgen()->masm(); }
@@ -112,15 +118,17 @@ void VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
}
-int VirtualFrame::parameter_count() {
+int VirtualFrame::parameter_count() const {
return cgen()->scope()->num_parameters();
}
-int VirtualFrame::local_count() { return cgen()->scope()->num_stack_slots(); }
+int VirtualFrame::local_count() const {
+ return cgen()->scope()->num_stack_slots();
+}
-int VirtualFrame::frame_pointer() { return parameter_count() + 3; }
+int VirtualFrame::frame_pointer() const { return parameter_count() + 3; }
int VirtualFrame::context_index() { return frame_pointer() - 1; }
@@ -129,7 +137,7 @@ int VirtualFrame::context_index() { return frame_pointer() - 1; }
int VirtualFrame::function_index() { return frame_pointer() - 2; }
-int VirtualFrame::local0_index() { return frame_pointer() + 2; }
+int VirtualFrame::local0_index() const { return frame_pointer() + 2; }
int VirtualFrame::fp_relative(int index) {
@@ -139,12 +147,12 @@ int VirtualFrame::fp_relative(int index) {
}
-int VirtualFrame::expression_base_index() {
+int VirtualFrame::expression_base_index() const {
return local0_index() + local_count();
}
-int VirtualFrame::height() {
+int VirtualFrame::height() const {
return element_count() - expression_base_index();
}
diff --git a/src/virtual-frame-light.cc b/src/virtual-frame-light.cc
index 9c019cf7..bbaaaf5f 100644
--- a/src/virtual-frame-light.cc
+++ b/src/virtual-frame-light.cc
@@ -36,7 +36,7 @@ namespace internal {
void VirtualFrame::Adjust(int count) {
ASSERT(count >= 0);
- element_count_ += count;
+ RaiseHeight(count, 0);
}
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 4c69510c..70bcdb16 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -210,6 +210,10 @@ void RelocInfo::apply(intptr_t delta) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
Memory::int32_at(pc_ + 1) -= static_cast<int32_t>(delta); // relocate entry
+ } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
+ // Special handling of debug break slot when a break point is set (call
+ // instruction has been inserted).
+ Memory::int32_at(pc_ + 1) -= static_cast<int32_t>(delta); // relocate entry
}
}
@@ -298,6 +302,11 @@ bool RelocInfo::IsPatchedReturnSequence() {
}
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ return !Assembler::IsNop(pc());
+}
+
+
Address RelocInfo::call_address() {
ASSERT(IsPatchedReturnSequence());
return Memory::Address_at(
@@ -341,8 +350,10 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
- RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) {
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 9f264964..d77c09fd 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -2800,6 +2800,13 @@ void Assembler::RecordJSReturn() {
}
+void Assembler::RecordDebugBreakSlot() {
+ WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
void Assembler::RecordComment(const char* msg) {
if (FLAG_debug_code) {
EnsureSpace ensure_space(this);
@@ -2822,13 +2829,16 @@ void Assembler::RecordStatementPosition(int pos) {
}
-void Assembler::WriteRecordedPositions() {
+bool Assembler::WriteRecordedPositions() {
+ bool written = false;
+
// Write the statement position if it is different from what was written last
// time.
if (current_statement_position_ != written_statement_position_) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
written_statement_position_ = current_statement_position_;
+ written = true;
}
// Write the position if it is different from what was written last time and
@@ -2838,7 +2848,11 @@ void Assembler::WriteRecordedPositions() {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::POSITION, current_position_);
written_position_ = current_position_;
+ written = true;
}
+
+ // Return whether something was written.
+ return written;
}
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 3db4d084..c7e737c6 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -307,7 +307,7 @@ class Operand BASE_EMBEDDED {
private:
byte rex_;
- byte buf_[10];
+ byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;
@@ -455,6 +455,11 @@ class Assembler : public Malloced {
// return address. TODO: Use return sequence length instead.
// Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
static const int kPatchReturnSequenceAddressOffset = 13 - 4;
+ // Distance between start of patched debug break slot and where the
+ // 32-bit displacement of a near call would be, relative to the pushed
+ // return address. TODO: Use return sequence length instead.
+ // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
+ static const int kPatchDebugBreakSlotAddressOffset = 13 - 4;
// TODO(X64): Rename this, removing the "Real", after changing the above.
static const int kRealPatchReturnSequenceAddressOffset = 2;
@@ -463,6 +468,10 @@ class Assembler : public Malloced {
static const int kCallInstructionLength = 13;
static const int kJSReturnSequenceLength = 13;
+ // The debug break slot must be able to contain a call instruction.
+ static const int kDebugBreakSlotLength = kCallInstructionLength;
+
+
// ---------------------------------------------------------------------------
// Code generation
//
@@ -1135,13 +1144,16 @@ class Assembler : public Malloced {
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
// Record a comment relocation entry that can be used by a disassembler.
// Use --debug_code to enable.
void RecordComment(const char* msg);
void RecordPosition(int pos);
void RecordStatementPosition(int pos);
- void WriteRecordedPositions();
+ bool WriteRecordedPositions();
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
int current_statement_position() const { return current_statement_position_; }
@@ -1159,6 +1171,8 @@ class Assembler : public Malloced {
return static_cast<int>(reloc_info_writer.pos() - pc_);
}
+ static bool IsNop(Address addr) { return *addr == 0x90; }
+
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 8099febb..ff655c76 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -308,7 +308,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// (tail-call) to the code in register edx without checking arguments.
__ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldOperand(rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset));
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ cmpq(rax, rbx);
@@ -417,9 +418,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movq(rax, Operand(rbp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
- __ movq(rcx, Operand(rbp, kArgumentsOffset)); // load arguments
- __ push(rcx);
- __ push(rax);
+ __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -429,8 +428,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// we have generated an inline version of the keyed load. In this
// case, we know that we are not generating a test instruction next.
- // Remove IC arguments from the stack and push the nth argument.
- __ addq(rsp, Immediate(2 * kPointerSize));
+ // Push the nth argument.
__ push(rax);
// Update the index on the stack and in register rax.
@@ -525,15 +523,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ lea(scratch1, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
- // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
- __ Move(FieldOperand(scratch1, JSObject::kMapOffset),
+ __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
Factory::fixed_array_map());
- __ movq(FieldOperand(scratch1, Array::kLengthOffset),
- Immediate(initial_capacity));
+ __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
+ Smi::FromInt(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
@@ -587,7 +585,6 @@ static void AllocateJSArray(MacroAssembler* masm,
JSFunction::kPrototypeOrInitialMapOffset));
// Check whether an empty sized array is requested.
- __ SmiToInteger64(array_size, array_size);
__ testq(array_size, array_size);
__ j(not_zero, &not_empty);
@@ -605,10 +602,11 @@ static void AllocateJSArray(MacroAssembler* masm,
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
__ bind(&not_empty);
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ SmiIndex index =
+ masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- array_size,
+ index.scale,
+ index.reg,
result,
elements_array_end,
scratch,
@@ -620,43 +618,41 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array: initial map
// elements_array_end: start of next object
- // array_size: size of array
+ // array_size: size of array (smi)
__ bind(&allocated);
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
__ Move(elements_array, Factory::empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
- __ Integer32ToSmi(scratch, array_size);
- __ movq(FieldOperand(result, JSArray::kLengthOffset), scratch);
+ __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// elements_array_end: start of next object
- // array_size: size of array
+ // array_size: size of array (smi)
__ lea(elements_array, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
- // Initialize the fixed array. FixedArray length is not stored as a smi.
+ // Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
- // array_size: size of array
- ASSERT(kSmiTag == 0);
+ // array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
Label not_empty_2, fill_array;
- __ testq(array_size, array_size);
+ __ SmiTest(array_size);
__ j(not_zero, &not_empty_2);
// Length of the FixedArray is the number of pre-allocated elements even
// though the actual JSArray has length 0.
- __ movq(FieldOperand(elements_array, Array::kLengthOffset),
- Immediate(kPreallocatedArrayElements));
+ __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
+ Smi::FromInt(kPreallocatedArrayElements));
__ jmp(&fill_array);
__ bind(&not_empty_2);
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
- __ movq(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+ __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
@@ -1039,8 +1035,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
// rax: start of next object
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map
- __ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+ __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
// Initialize the fields to undefined.
// rbx: JSObject
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 767c33fe..f9692ce4 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -43,12 +43,12 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm)
// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
+// Platform-specific FrameRegisterState functions.
-void DeferredCode::SaveRegisters() {
+void FrameRegisterState::Save(MacroAssembler* masm) const {
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int action = registers_[i];
if (action == kPush) {
@@ -60,7 +60,7 @@ void DeferredCode::SaveRegisters() {
}
-void DeferredCode::RestoreRegisters() {
+void FrameRegisterState::Restore(MacroAssembler* masm) const {
// Restore registers in reverse order due to the stack.
for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
int action = registers_[i];
@@ -74,6 +74,45 @@ void DeferredCode::RestoreRegisters() {
}
+#undef __
+#define __ ACCESS_MASM(masm_)
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+ frame_state_.Save(masm_);
+}
+
+
+void DeferredCode::RestoreRegisters() {
+ frame_state_.Restore(masm_);
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ frame_state_->Save(masm);
+}
+
+
+void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ frame_state_->Restore(masm);
+}
+
+
+void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterInternalFrame();
+}
+
+
+void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveInternalFrame();
+}
+
+
// -------------------------------------------------------------------------
// CodeGenState implementation.
@@ -621,9 +660,25 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
void DeferredReferenceGetKeyedValue::Generate() {
- __ push(receiver_); // First IC argument.
- __ push(key_); // Second IC argument.
-
+ if (receiver_.is(rdx)) {
+ if (!key_.is(rax)) {
+ __ movq(rax, key_);
+ } // else do nothing.
+ } else if (receiver_.is(rax)) {
+ if (key_.is(rdx)) {
+ __ xchg(rax, rdx);
+ } else if (key_.is(rax)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rdx, receiver_);
+ __ movq(rax, key_);
+ }
+ } else if (key_.is(rax)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rax, key_);
+ __ movq(rdx, receiver_);
+ }
// Calculate the delta from the IC call instruction to the map check
// movq instruction in the inlined version. This delta is stored in
// a test(rax, delta) instruction after the call so that we can find
@@ -647,8 +702,6 @@ void DeferredReferenceGetKeyedValue::Generate() {
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
- __ pop(key_);
- __ pop(receiver_);
}
@@ -755,6 +808,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Load(applicand);
+ frame()->Dup();
Handle<String> name = Factory::LookupAsciiSymbol("apply");
frame()->Push(name);
Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
@@ -852,10 +906,11 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// avoid copying too many arguments to avoid stack overflows.
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
- __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger32(rax, rax);
- __ movq(rcx, rax);
- __ cmpq(rax, Immediate(kArgumentsLimit));
+ __ SmiToInteger32(rax,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movl(rcx, rax);
+ __ cmpl(rax, Immediate(kArgumentsLimit));
__ j(above, &build_args);
// Loop through the arguments pushing them onto the execution
@@ -1890,8 +1945,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 3
frame_->EmitPush(rdx); // <- slot 2
- __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- __ Integer32ToSmi(rax, rax);
+ __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
entry.Jump();
@@ -1902,8 +1956,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 2
// Push the length of the array and the initial index onto the stack.
- __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ Integer32ToSmi(rax, rax);
+ __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
@@ -2830,26 +2883,66 @@ void CodeGenerator::VisitCall(Call* node) {
// Allocate a frame slot for the receiver.
frame_->Push(Factory::undefined_value());
+
+ // Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
- // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ // Result to hold the result of the function resolution and the
+ // final result of the eval call.
+ Result result;
+
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ JumpTarget done;
+ if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ ASSERT(var->slot()->type() == Slot::LOOKUP);
+ JumpTarget slow;
+ // Prepare the stack for the call to
+ // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
+ // function, the first argument to the eval call and the
+ // receiver.
+ Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ frame_->Push(&fun);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+ frame_->PushParameterAt(-1);
+
+ // Resolve the call.
+ result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+
+ done.Jump(&result);
+ slow.Bind();
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval by
+ // pushing the loaded function, the first argument to the eval
+ // call and the receiver.
frame_->PushElementAt(arg_count + 1);
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
frame_->Push(Factory::undefined_value());
}
-
- // Push the receiver.
frame_->PushParameterAt(-1);
// Resolve the call.
- Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+
+ // If we generated fast-case code bind the jump-target where fast
+ // and slow case merge.
+ if (done.is_linked()) done.Bind(&result);
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
@@ -3970,23 +4063,67 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
}
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateFastCharCodeAt");
+class DeferredStringCharCodeAt : public DeferredCode {
+ public:
+ DeferredStringCharCodeAt(Register object,
+ Register index,
+ Register scratch,
+ Register result)
+ : result_(result),
+ char_code_at_generator_(object,
+ index,
+ scratch,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharCodeAtGenerator* fast_case_generator() {
+ return &char_code_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_code_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result_, Heap::kNanValueRootIndex);
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharCodeAtGenerator char_code_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charCodeAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharCodeAt");
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
Result index = frame_->Pop();
Result object = frame_->Pop();
-
- // We will mutate the index register and possibly the object register.
- // The case where they are somehow the same register is handled
- // because we only mutate them in the case where the receiver is a
- // heap object and the index is not.
object.ToRegister();
index.ToRegister();
+ // We might mutate the object register.
frame_->Spill(object.reg());
- frame_->Spill(index.reg());
// We need two extra registers.
Result result = allocator()->Allocate();
@@ -3994,33 +4131,40 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid());
- // There is no virtual frame effect from here up to the final result
- // push.
- Label slow_case;
- Label exit;
- StringHelper::GenerateFastCharCodeAt(masm_,
- object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg(),
- &slow_case,
- &slow_case,
- &slow_case,
- &slow_case);
- __ jmp(&exit);
-
- __ bind(&slow_case);
- // Move the undefined value into the result register, which will
- // trigger the slow case.
- __ LoadRoot(result.reg(), Heap::kUndefinedValueRootIndex);
-
- __ bind(&exit);
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(object.reg(),
+ index.reg(),
+ scratch.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->Push(&result);
}
-void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateCharFromCode");
+class DeferredStringCharFromCode : public DeferredCode {
+ public:
+ DeferredStringCharFromCode(Register code,
+ Register result)
+ : char_from_code_generator_(code, result) {}
+
+ StringCharFromCodeGenerator* fast_case_generator() {
+ return &char_from_code_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_from_code_generator_.GenerateSlow(masm(), call_helper);
+ }
+
+ private:
+ StringCharFromCodeGenerator char_from_code_generator_;
+};
+
+
+// Generates code for creating a one-char string from a char code.
+void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharFromCode");
ASSERT(args->length() == 1);
Load(args->at(0));
@@ -4029,19 +4173,97 @@ void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
code.ToRegister();
ASSERT(code.is_valid());
- // StringHelper::GenerateCharFromCode may do a runtime call.
- frame_->SpillAll();
-
Result result = allocator()->Allocate();
ASSERT(result.is_valid());
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
- StringHelper::GenerateCharFromCode(masm_,
- code.reg(),
- result.reg(),
- scratch.reg(),
- CALL_FUNCTION);
+ DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
+ code.reg(), result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->Push(&result);
+}
+
+
+class DeferredStringCharAt : public DeferredCode {
+ public:
+ DeferredStringCharAt(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result)
+ : result_(result),
+ char_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharAtGenerator* fast_case_generator() {
+ return &char_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Move(result_, Smi::FromInt(0));
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharAtGenerator char_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharAt");
+ ASSERT(args->length() == 2);
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Result index = frame_->Pop();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ index.ToRegister();
+ // We might mutate the object register.
+ frame_->Spill(object.reg());
+
+ // We need three extra registers.
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ Result scratch1 = allocator()->Allocate();
+ ASSERT(scratch1.is_valid());
+ Result scratch2 = allocator()->Allocate();
+ ASSERT(scratch2.is_valid());
+
+ DeferredStringCharAt* deferred =
+ new DeferredStringCharAt(object.reg(),
+ index.reg(),
+ scratch1.reg(),
+ scratch2.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->Push(&result);
}
@@ -4467,7 +4689,8 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ Move(FieldOperand(rcx, HeapObject::kMapOffset),
Factory::fixed_array_map());
// Set length.
- __ movl(FieldOperand(rcx, FixedArray::kLengthOffset), rbx);
+ __ Integer32ToSmi(rdx, rbx);
+ __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
// Fill contents of fixed-array with the-hole.
__ Move(rdx, Factory::the_hole_value());
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
@@ -4507,7 +4730,7 @@ class DeferredSearchCache: public DeferredCode {
virtual void Generate();
private:
- Register dst_; // on invocation index of finger (as Smi), on exit
+ Register dst_; // on invocation index of finger (as int32), on exit
// holds value being looked up.
Register cache_; // instance of JSFunctionResultCache.
Register key_; // key being looked up.
@@ -4531,11 +4754,10 @@ void DeferredSearchCache::Generate() {
Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
- __ SmiToInteger32(dst_, dst_);
// Check the cache from finger to start of the cache.
__ bind(&first_loop);
- __ subq(dst_, kEntrySizeImm);
- __ cmpq(dst_, kEntriesIndexImm);
+ __ subl(dst_, kEntrySizeImm);
+ __ cmpl(dst_, kEntriesIndexImm);
__ j(less, &search_further);
__ cmpq(ArrayElement(cache_, dst_), key_);
@@ -4549,14 +4771,15 @@ void DeferredSearchCache::Generate() {
__ bind(&search_further);
// Check the cache from end of cache up to finger.
- __ movq(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
- __ movq(scratch_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
- __ SmiToInteger32(dst_, dst_);
- __ SmiToInteger32(scratch_, scratch_);
+ __ SmiToInteger32(dst_,
+ FieldOperand(cache_,
+ JSFunctionResultCache::kCacheSizeOffset));
+ __ SmiToInteger32(scratch_,
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
__ bind(&second_loop);
- __ subq(dst_, kEntrySizeImm);
- __ cmpq(dst_, scratch_);
+ __ subl(dst_, kEntrySizeImm);
+ __ cmpl(dst_, scratch_);
__ j(less_equal, &cache_miss);
__ cmpq(ArrayElement(cache_, dst_), key_);
@@ -4586,29 +4809,28 @@ void DeferredSearchCache::Generate() {
// cache miss this optimization would hardly matter much.
// Check if we could add new entry to cache.
- __ movl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
- __ SmiToInteger32(r9, r9);
- __ cmpq(rbx, r9);
+ __ SmiCompare(rbx, r9);
__ j(greater, &add_new_entry);
// Check if we could evict entry after finger.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ SmiToInteger32(rdx, rdx);
+ __ SmiToInteger32(rbx, rbx);
__ addq(rdx, kEntrySizeImm);
Label forward;
__ cmpq(rbx, rdx);
__ j(greater, &forward);
// Need to wrap over the cache.
- __ movq(rdx, kEntriesIndexImm);
+ __ movl(rdx, kEntriesIndexImm);
__ bind(&forward);
__ Integer32ToSmi(r9, rdx);
__ jmp(&update_cache);
__ bind(&add_new_entry);
- // r9 holds cache size as int.
- __ movq(rdx, r9);
- __ Integer32ToSmi(r9, r9);
+ // r9 holds cache size as smi.
+ __ SmiToInteger32(rdx, r9);
__ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
__ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
@@ -4680,16 +4902,13 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
// tmp.reg() now holds finger offset as a smi.
- __ movq(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
- SmiIndex index =
- masm()->SmiToIndex(kScratchRegister, tmp.reg(), kPointerSizeLog2);
+ __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
__ cmpq(key.reg(), FieldOperand(cache.reg(),
- index.reg, index.scale,
+ tmp.reg(), times_pointer_size,
FixedArray::kHeaderSize));
- // Do NOT alter index.reg or tmp.reg() before cmpq below.
deferred->Branch(not_equal);
__ movq(tmp.reg(), FieldOperand(cache.reg(),
- index.reg, index.scale,
+ tmp.reg(), times_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
deferred->BindExit();
@@ -5627,8 +5846,6 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
// property case was inlined. Ensure that there is not a test rax
// instruction here.
masm_->nop();
- // Discard the global object. The result is in answer.
- frame_->Drop();
return answer;
}
@@ -5689,7 +5906,6 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
frame_->Push(&arguments);
frame_->Push(key_literal->handle());
*result = EmitKeyedLoad();
- frame_->Drop(2); // Drop key and receiver.
done->Jump(result);
}
}
@@ -6576,7 +6792,9 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
void DeferredReferenceGetNamedValue::Generate() {
- __ push(receiver_);
+ if (!receiver_.is(rax)) {
+ __ movq(rax, receiver_);
+ }
__ Move(rcx, name_);
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -6593,7 +6811,6 @@ void DeferredReferenceGetNamedValue::Generate() {
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
- __ pop(receiver_);
}
@@ -6677,7 +6894,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
smi_value,
overwrite_mode);
}
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
__ SmiAddConstant(operand->reg(),
operand->reg(),
smi_value,
@@ -6698,7 +6916,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
// A smi currently fits in a 32-bit Immediate.
__ SmiSubConstant(operand->reg(),
operand->reg(),
@@ -6727,7 +6946,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
__ SmiShiftArithmeticRightConstant(operand->reg(),
operand->reg(),
shift_value);
@@ -6754,7 +6974,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
__ SmiShiftLogicalRightConstant(answer.reg(),
operand->reg(),
shift_value,
@@ -6786,12 +7007,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
smi_value,
operand->reg(),
overwrite_mode);
- if (!operand->type_info().IsSmi()) {
- Condition is_smi = masm_->CheckSmi(operand->reg());
- deferred->Branch(NegateCondition(is_smi));
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
__ Move(answer.reg(), smi_value);
__ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
@@ -6812,7 +7029,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
deferred->BindExit();
answer = *operand;
} else {
@@ -6825,7 +7043,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
__ SmiShiftLeftConstant(answer.reg(),
operand->reg(),
shift_value);
@@ -6851,7 +7070,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
if (op == Token::BIT_AND) {
__ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
} else if (op == Token::BIT_XOR) {
@@ -6916,6 +7136,37 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
}
+void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
+ TypeInfo type,
+ DeferredCode* deferred) {
+ if (!type.IsSmi()) {
+ __ JumpIfNotSmi(reg, deferred->entry_label());
+ }
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(reg);
+ }
+}
+
+
+void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred) {
+ if (!left_info.IsSmi() && !right_info.IsSmi()) {
+ __ JumpIfNotBothSmi(left, right, deferred->entry_label());
+ } else if (!left_info.IsSmi()) {
+ __ JumpIfNotSmi(left, deferred->entry_label());
+ } else if (!right_info.IsSmi()) {
+ __ JumpIfNotSmi(right, deferred->entry_label());
+ }
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+}
+
+
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
@@ -6925,9 +7176,6 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
// Copy the type info because left and right may be overwritten.
TypeInfo left_type_info = left->type_info();
TypeInfo right_type_info = right->type_info();
- USE(left_type_info);
- USE(right_type_info);
- // TODO(X64): Use type information in calculations.
Token::Value op = expr->op();
Result answer;
// Special handling of div and mod because they use fixed registers.
@@ -7004,7 +7252,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left->reg(),
right->reg(),
overwrite_mode);
- __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
+ left_type_info, right_type_info, deferred);
if (op == Token::DIV) {
__ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
@@ -7086,7 +7335,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
}
}
} else {
- __ JumpIfNotBothSmi(left->reg(), rcx, deferred->entry_label());
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
+ left_type_info, right_type_info, deferred);
}
__ bind(&do_op);
@@ -7134,7 +7384,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left->reg(),
right->reg(),
overwrite_mode);
- __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
+ left_type_info, right_type_info, deferred);
switch (op) {
case Token::ADD:
@@ -7254,9 +7505,8 @@ Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
__ IncrementCounter(&Counters::named_load_inline, 1);
deferred->BindExit();
- frame()->Push(&receiver);
}
- ASSERT(frame()->height() == original_height);
+ ASSERT(frame()->height() == original_height - 1);
return result;
}
@@ -7279,16 +7529,14 @@ Result CodeGenerator::EmitKeyedLoad() {
Result elements = allocator()->Allocate();
ASSERT(elements.is_valid());
-
Result key = frame_->Pop();
Result receiver = frame_->Pop();
key.ToRegister();
receiver.ToRegister();
- // Use a fresh temporary for the index
- Result index = allocator()->Allocate();
- ASSERT(index.is_valid());
-
+ // If key and receiver are shared registers on the frame, their values will
+ // be automatically saved and restored when going to deferred code.
+ // The result is returned in elements, which is not shared.
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
@@ -7301,9 +7549,9 @@ Result CodeGenerator::EmitKeyedLoad() {
// initialization code.
__ bind(deferred->patch_site());
// Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching. Do not use
- // root array to load null_value, since it must be patched with
- // the expected receiver map.
+ // coverage code can interfere with the patching. Do not use a load
+ // from the root array to load null_value, since the load must be patched
+ // with the expected receiver map, which is not in the root array.
masm_->movq(kScratchRegister, Factory::null_value(),
RelocInfo::EMBEDDED_OBJECT);
masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
@@ -7321,15 +7569,11 @@ Result CodeGenerator::EmitKeyedLoad() {
Factory::fixed_array_map());
deferred->Branch(not_equal);
- // Shift the key to get the actual index value and check that
- // it is within bounds.
- __ SmiToInteger32(index.reg(), key.reg());
- __ cmpl(index.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ // Check that key is within bounds.
+ __ SmiCompare(key.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
- // The index register holds the un-smi-tagged key. It has been
- // zero-extended to 64-bits, so it can be used directly as index in the
- // operand below.
+
// Load and check that the result is not the hole. We could
// reuse the index or elements register for the value.
//
@@ -7337,21 +7581,19 @@ Result CodeGenerator::EmitKeyedLoad() {
// heuristic about which register to reuse. For example, if
// one is rax, the we can reuse that one because the value
// coming from the deferred code will be in rax.
+ SmiIndex index =
+ masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
__ movq(elements.reg(),
- Operand(elements.reg(),
- index.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
+ FieldOperand(elements.reg(),
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
result = elements;
- elements.Unuse();
- index.Unuse();
__ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
- frame_->Push(&receiver);
- frame_->Push(&key);
} else {
Comment cmnt(masm_, "[ Load from keyed Property");
result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
@@ -7362,7 +7604,7 @@ Result CodeGenerator::EmitKeyedLoad() {
// the push that follows might be peep-hole optimized away.
__ nop();
}
- ASSERT(frame()->height() == original_height);
+ ASSERT(frame()->height() == original_height - 2);
return result;
}
@@ -7406,7 +7648,6 @@ void Reference::GetValue() {
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- if (!persist_after_get_) set_unloaded();
break;
}
@@ -7414,29 +7655,33 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
+ if (persist_after_get_) {
+ cgen_->frame()->Dup();
+ }
Result result = cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->Push(&result);
- if (!persist_after_get_) {
- cgen_->UnloadReference(this);
- }
break;
}
case KEYED: {
// A load of a bare identifier (load from global) cannot be keyed.
ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
-
+ if (persist_after_get_) {
+ cgen_->frame()->PushElementAt(1);
+ cgen_->frame()->PushElementAt(1);
+ }
Result value = cgen_->EmitKeyedLoad();
cgen_->frame()->Push(&value);
- if (!persist_after_get_) {
- cgen_->UnloadReference(this);
- }
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ set_unloaded();
+ }
}
@@ -7566,7 +7811,7 @@ void Reference::SetValue(InitState init_state) {
// Check whether it is possible to omit the write barrier. If the
// elements array is in new space or the value written is a smi we can
- // safely update the elements array without updating the remembered set.
+ // safely update the elements array without write barrier.
Label in_new_space;
__ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
if (!value_is_constant) {
@@ -7591,10 +7836,10 @@ void Reference::SetValue(InitState init_state) {
// Store the value.
SmiIndex index =
masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(Operand(tmp.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize - kHeapObjectTag),
+ __ movq(FieldOperand(tmp.reg(),
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize),
value.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1);
@@ -7674,7 +7919,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
+ __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// Setup the fixed slots.
__ xor_(rbx, rbx); // Set to NULL.
@@ -7944,14 +8189,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ movl(rcx, rdx);
__ movl(rax, rdx);
__ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
+ __ shrl(rdx, Immediate(8));
+ __ shrl(rcx, Immediate(16));
+ __ shrl(rax, Immediate(24));
__ xorl(rcx, rdx);
__ xorl(rax, rdi);
__ xorl(rcx, rax);
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
__ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+
// ST[0] == double value.
// rbx = bits of double value.
// rcx = TranscendentalCache::hash(double value).
@@ -8349,7 +8595,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(rax, rax);
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
__ cmpl(rdx, rax);
__ j(greater, &runtime);
@@ -8485,14 +8732,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 3: Start of string data
Label setup_two_byte, setup_rest;
__ testb(rdi, rdi);
- __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
__ j(zero, &setup_two_byte);
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
__ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
__ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
__ jmp(&setup_rest);
__ bind(&setup_two_byte);
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
__ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
__ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
@@ -8512,12 +8758,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
- __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
__ j(equal, &success);
Label failure;
- __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
__ j(equal, &failure);
- __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
// If not exception it can only be retry. Handle that in the runtime system.
__ j(not_equal, &runtime);
// Result must now be exception. If there is no pending exception already a
@@ -8627,9 +8873,10 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
- __ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi).
- __ subl(mask, Immediate(1)); // Make mask.
+ __ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide smi tagged length by two.
+ __ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1);
+ __ subq(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@@ -9149,7 +9396,6 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Get the parameters pointer from the stack and untag the length.
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
- __ SmiToInteger32(rcx, rcx);
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -9157,7 +9403,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movl(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
// Copy the fixed array slots.
Label loop;
@@ -10671,143 +10918,191 @@ const char* CompareStub::GetName() {
}
-void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case) {
- Label not_a_flat_string;
- Label try_again_with_new_string;
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
Label ascii_string;
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object, receiver_not_string);
+ __ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
- __ testb(result, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string);
+ __ testb(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
// If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index, index_not_smi);
+ __ JumpIfNotSmi(index_, &index_not_smi_);
- // Check for index out of range.
- __ SmiCompare(index, FieldOperand(object, String::kLengthOffset));
- __ j(above_equal, index_out_of_range);
+ // Put smi-tagged index into scratch register.
+ __ movq(scratch_, index_);
+ __ bind(&got_smi_index_);
- __ bind(&try_again_with_new_string);
- // ----------- S t a t e -------------
- // -- object : string to access
- // -- result : instance type of the string
- // -- scratch : non-negative index < length
- // -----------------------------------
+ // Check for index out of range.
+ __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
// We need special handling for non-flat strings.
- ASSERT_EQ(0, kSeqStringTag);
- __ testb(result, Immediate(kStringRepresentationMask));
- __ j(not_zero, &not_a_flat_string);
+ ASSERT(kSeqStringTag == 0);
+ __ testb(result_, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
- // Put untagged index into scratch register.
- __ SmiToInteger32(scratch, index);
+ // Handle non-flat strings.
+ __ testb(result_, Immediate(kIsConsStringMask));
+ __ j(zero, &call_runtime_);
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, &call_runtime_);
+ // Get the first of the two strings and load its instance type.
+ __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ ASSERT(kSeqStringTag == 0);
+ __ testb(result_, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &call_runtime_);
// Check for 1-byte or 2-byte string.
- ASSERT_EQ(0, kTwoByteStringTag);
- __ testb(result, Immediate(kStringEncodingMask));
+ __ bind(&flat_string);
+ ASSERT(kAsciiStringTag != 0);
+ __ testb(result_, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register.
- __ movzxwl(result, FieldOperand(object,
- scratch,
- times_2,
- SeqTwoByteString::kHeaderSize));
+ __ SmiToInteger32(scratch_, scratch_);
+ __ movzxwl(result_, FieldOperand(object_,
+ scratch_, times_2,
+ SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
- // Handle non-flat strings.
- __ bind(&not_a_flat_string);
- __ and_(result, Immediate(kStringRepresentationMask));
- __ cmpb(result, Immediate(kConsStringTag));
- __ j(not_equal, slow_case);
-
- // ConsString.
- // Check that the right hand side is the empty string (ie if this is really a
- // flat string in a cons string). If that is not the case we would rather go
- // to the runtime system now, to flatten the string.
- __ movq(result, FieldOperand(object, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kEmptyStringRootIndex);
- __ j(not_equal, slow_case);
- // Get the first of the two strings and load its instance type.
- __ movq(object, FieldOperand(object, ConsString::kFirstOffset));
- __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
- __ jmp(&try_again_with_new_string);
-
// ASCII string.
- __ bind(&ascii_string);
// Load the byte into the result register.
- __ movzxbl(result, FieldOperand(object,
- scratch,
- times_1,
- SeqAsciiString::kHeaderSize));
+ __ bind(&ascii_string);
+ __ SmiToInteger32(scratch_, scratch_);
+ __ movzxbl(result_, FieldOperand(object_,
+ scratch_, times_1,
+ SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
- __ Integer32ToSmi(result, result);
+ __ Integer32ToSmi(result_, result_);
+ __ bind(&exit_);
}
-void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register result,
- Register scratch,
- InvokeFlag flag) {
- ASSERT(!code.is(result));
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
- Label slow_case;
- Label exit;
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(rax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ movq(scratch_, rax);
+ }
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(rax)) {
+ __ movq(result_, rax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(code, &slow_case);
- __ SmiToInteger32(scratch, code);
- __ cmpl(scratch, Immediate(String::kMaxAsciiCharCode));
- __ j(above, &slow_case);
-
- __ Move(result, Factory::single_character_string_cache());
- __ movq(result, FieldOperand(result,
- scratch,
- times_pointer_size,
- FixedArray::kHeaderSize));
-
- __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case);
- __ jmp(&exit);
+ __ JumpIfNotSmi(code_, &slow_case_);
+ __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
+ __ j(above, &slow_case_);
- __ bind(&slow_case);
- if (flag == CALL_FUNCTION) {
- __ push(code);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result.is(rax)) {
- __ movq(result, rax);
- }
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- ASSERT(result.is(rax));
- __ pop(rax); // Save return address.
- __ push(code);
- __ push(rax); // Restore return address.
- __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
- }
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
+ __ movq(result_, FieldOperand(result_, index.reg, index.scale,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case_);
+ __ bind(&exit_);
+}
- __ bind(&exit);
- if (flag == JUMP_FUNCTION) {
- ASSERT(result.is(rax));
- __ ret(0);
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(rax)) {
+ __ movq(result_, rax);
}
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -10928,7 +11223,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&allocated);
// Fill the fields of the cons string.
__ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
+ __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
@@ -10978,8 +11273,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Locate first character of result.
__ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Locate first character of first argument
- __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
__ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// rax: first char of first argument
// rbx: result string
@@ -10988,8 +11282,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rdi: length of first argument
StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
// Locate first character of second argument.
- __ movq(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
__ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// rbx: result string
// rcx: next character of result
@@ -11017,8 +11310,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Locate first character of result.
__ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Locate first character of first argument.
- __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
__ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rax: first char of first argument
// rbx: result string
@@ -11027,8 +11319,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rdi: length of first argument
StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
// Locate first character of second argument.
- __ movq(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
__ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rbx: result string
// rcx: next character of result
@@ -11057,15 +11348,15 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
if (ascii) {
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(1));
- __ addq(dest, Immediate(1));
+ __ incq(src);
+ __ incq(dest);
} else {
__ movzxwl(kScratchRegister, Operand(src, 0));
__ movw(Operand(dest, 0), kScratchRegister);
__ addq(src, Immediate(2));
__ addq(dest, Immediate(2));
}
- __ subl(count, Immediate(1));
+ __ decl(count);
__ j(not_zero, &loop);
}
@@ -11078,38 +11369,39 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Copy characters using rep movs of doublewords. Align destination on 4 byte
// boundary before starting rep movs. Copy remaining characters after running
// rep movs.
+ // Count is positive int32, dest and src are character pointers.
ASSERT(dest.is(rdi)); // rep movs destination
ASSERT(src.is(rsi)); // rep movs source
ASSERT(count.is(rcx)); // rep movs count
// Nothing to do for zero characters.
Label done;
- __ testq(count, count);
+ __ testl(count, count);
__ j(zero, &done);
// Make count the number of bytes to copy.
if (!ascii) {
ASSERT_EQ(2, sizeof(uc16)); // NOLINT
- __ addq(count, count);
+ __ addl(count, count);
}
// Don't enter the rep movs if there are less than 4 bytes to copy.
Label last_bytes;
- __ testq(count, Immediate(~7));
+ __ testl(count, Immediate(~7));
__ j(zero, &last_bytes);
// Copy from edi to esi using rep movs instruction.
- __ movq(kScratchRegister, count);
- __ sar(count, Immediate(3)); // Number of doublewords to copy.
+ __ movl(kScratchRegister, count);
+ __ shr(count, Immediate(3)); // Number of doublewords to copy.
__ repmovsq();
// Find number of bytes left.
- __ movq(count, kScratchRegister);
+ __ movl(count, kScratchRegister);
__ and_(count, Immediate(7));
// Check if there are more bytes to copy.
__ bind(&last_bytes);
- __ testq(count, count);
+ __ testl(count, count);
__ j(zero, &done);
// Copy remaining characters.
@@ -11117,9 +11409,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&loop);
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(1));
- __ addq(dest, Immediate(1));
- __ subq(count, Immediate(1));
+ __ incq(src);
+ __ incq(dest);
+ __ decl(count);
__ j(not_zero, &loop);
__ bind(&done);
@@ -11139,13 +11431,11 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Make sure that both characters are not digits as such strings has a
// different hash algorithm. Don't try to look for these in the symbol table.
Label not_array_index;
- __ movq(scratch, c1);
- __ subq(scratch, Immediate(static_cast<int>('0')));
- __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ leal(scratch, Operand(c1, -'0'));
+ __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(above, &not_array_index);
- __ movq(scratch, c2);
- __ subq(scratch, Immediate(static_cast<int>('0')));
- __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ leal(scratch, Operand(c2, -'0'));
+ __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(below_equal, not_found);
__ bind(&not_array_index);
@@ -11169,8 +11459,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Calculate capacity mask from the symbol table capacity.
Register mask = scratch2;
- __ movq(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ SmiToInteger32(mask, mask);
+ __ SmiToInteger32(mask,
+ FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
__ decl(mask);
Register undefined = scratch4;
@@ -11200,10 +11490,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register candidate = scratch; // Scratch register contains candidate.
ASSERT_EQ(1, SymbolTable::kEntrySize);
__ movq(candidate,
- FieldOperand(symbol_table,
- scratch,
- times_pointer_size,
- SymbolTable::kElementsStartOffset));
+ FieldOperand(symbol_table,
+ scratch,
+ times_pointer_size,
+ SymbolTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
__ cmpq(candidate, undefined);
@@ -11280,9 +11570,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch) {
// hash += hash << 3;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(3));
- __ addl(hash, scratch);
+ __ leal(hash, Operand(hash, hash, times_8, 0));
// hash ^= hash >> 11;
__ movl(scratch, hash);
__ sarl(scratch, Immediate(11));
@@ -11294,7 +11582,6 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// if (hash == 0) hash = 27;
Label hash_not_zero;
- __ testl(hash, hash);
__ j(not_zero, &hash_not_zero);
__ movl(hash, Immediate(27));
__ bind(&hash_not_zero);
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 9d465839..cd03d2ac 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -314,7 +314,9 @@ class CodeGenerator: public AstVisitor {
static bool ShouldGenerateLog(Expression* type);
#endif
- static void RecordPositions(MacroAssembler* masm, int pos);
+ static bool RecordPositions(MacroAssembler* masm,
+ int pos,
+ bool right_here = false);
// Accessors
MacroAssembler* masm() { return masm_; }
@@ -475,6 +477,22 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode);
+ // Emits code sequence that jumps to deferred code if the input
+ // is not a smi. Cannot be in MacroAssembler because it takes
+ // advantage of TypeInfo to skip unneeded checks.
+ void JumpIfNotSmiUsingTypeInfo(Register reg,
+ TypeInfo type,
+ DeferredCode* deferred);
+
+ // Emits code sequence that jumps to deferred code if the inputs
+ // are not both smis. Cannot be in MacroAssembler because it takes
+ // advantage of TypeInfo to skip unneeded checks.
+ void JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred);
+
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect.
@@ -542,6 +560,8 @@ class CodeGenerator: public AstVisitor {
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+ static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
+
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
@@ -571,10 +591,13 @@ class CodeGenerator: public AstVisitor {
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
- void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
- void GenerateCharFromCode(ZoneList<Expression*>* args);
+ void GenerateStringCharFromCode(ZoneList<Expression*>* args);
+
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -846,38 +869,6 @@ class GenericBinaryOpStub: public CodeStub {
class StringHelper : public AllStatic {
public:
- // Generates fast code for getting a char code out of a string
- // object at the given index. May bail out for four reasons (in the
- // listed order):
- // * Receiver is not a string (receiver_not_string label).
- // * Index is not a smi (index_not_smi label).
- // * Index is out of range (index_out_of_range).
- // * Some other reason (slow_case label). In this case it's
- // guaranteed that the above conditions are not violated,
- // e.g. it's safe to assume the receiver is a string and the
- // index is a non-negative smi < length.
- // When successful, object, index, and scratch are clobbered.
- // Otherwise, scratch and result are clobbered.
- static void GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case);
-
- // Generates code for creating a one-char string from the given char
- // code. May do a runtime call, so any register can be clobbered
- // and, if the given invoke flag specifies a call, an internal frame
- // is required. In tail call mode the result must be rax register.
- static void GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register result,
- Register scratch,
- InvokeFlag flag);
-
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 89b98f14..96592548 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -124,9 +124,10 @@ void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- rax : key
+ // -- rdx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), false);
}
@@ -144,9 +145,10 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rcx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), false);
}
@@ -179,10 +181,31 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction.
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
+ __ nop();
+ }
+ ASSERT_EQ(Assembler::kDebugBreakSlotLength,
+ masm->SizeOfCodeGeneratedSince(&check_codesize));
+}
+
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, true);
+}
+
+
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on x64");
}
+
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on x64");
}
@@ -215,6 +238,28 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return !Assembler::IsNop(rinfo()->pc());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCodeWithCall(
+ Debug::debug_break_slot()->entry(),
+ Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
+}
+
+
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 5bd09c21..d99ea84a 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -187,12 +187,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
{ Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the body.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- EmitReturnSequence(function()->end_position());
+ EmitReturnSequence();
}
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -207,7 +207,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
- CodeGenerator::RecordPositions(masm_, position);
+ CodeGenerator::RecordPositions(masm_, function()->end_position());
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
@@ -1010,7 +1010,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(rax); // Map.
__ push(rdx); // Enumeration cache.
__ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- __ Integer32ToSmi(rax, rax);
__ push(rax); // Enumeration cache length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
@@ -1020,7 +1019,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(Smi::FromInt(0)); // Map (0) - force slow check.
__ push(rax);
__ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ Integer32ToSmi(rax, rax);
__ push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
@@ -1129,15 +1127,15 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
- __ push(CodeGenerator::GlobalObject());
__ Move(rcx, var->name());
+ __ movq(rax, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// A test rax instruction following the call is used by the IC to
// indicate that the inobject property case was inlined. Ensure there
// is no test rax instruction here.
__ nop();
- DropAndApply(1, context, rax);
+ Apply(context, rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
Comment cmnt(masm_, "Lookup slot");
@@ -1178,7 +1176,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Load the object.
MemOperand object_loc = EmitSlotSearch(object_slot, rax);
- __ push(object_loc);
+ __ movq(rdx, object_loc);
// Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
@@ -1186,7 +1184,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
ASSERT(key_literal->handle()->IsSmi());
// Load the key.
- __ Push(key_literal->handle());
+ __ Move(rax, key_literal->handle());
// Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -1194,8 +1192,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Notice: We must not have a "test rax, ..." instruction after the
// call. It is treated specially by the LoadIC code.
__ nop();
- // Drop key and object left on the stack by IC, and push the result.
- DropAndApply(2, context, rax);
+ Apply(context, rax);
}
}
@@ -1695,18 +1692,16 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
- // Evaluate receiver.
- VisitForValue(expr->obj(), kStack);
-
if (key->IsPropertyName()) {
+ VisitForValue(expr->obj(), kAccumulator);
EmitNamedPropertyLoad(expr);
- // Drop receiver left on the stack by IC.
- DropAndApply(1, context_, rax);
+ Apply(context_, rax);
} else {
- VisitForValue(expr->key(), kStack);
+ VisitForValue(expr->obj(), kStack);
+ VisitForValue(expr->key(), kAccumulator);
+ __ pop(rdx);
EmitKeyedPropertyLoad(expr);
- // Drop key and receiver left on the stack by IC.
- DropAndApply(2, context_, rax);
+ Apply(context_, rax);
}
}
@@ -1828,7 +1823,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Call to a keyed property, use keyed load IC followed by function
// call.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(prop->key(), kAccumulator);
+ __ movq(rdx, Operand(rsp, 0));
// Record source code position for IC call.
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -1836,8 +1832,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// By emitting a nop we make sure that we do not have a "test rax,..."
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
- // Drop key left on the stack by IC.
- __ Drop(1);
// Pop receiver.
__ pop(rbx);
// Push result (function).
@@ -1906,76 +1900,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (strcmp("_IsSmi", *name->ToCString()) == 0) {
- EmitIsSmi(expr->arguments());
- } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
- EmitIsNonNegativeSmi(expr->arguments());
- } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
- EmitIsObject(expr->arguments());
- } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
- EmitIsUndetectableObject(expr->arguments());
- } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
- EmitIsFunction(expr->arguments());
- } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
- EmitIsArray(expr->arguments());
- } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
- EmitIsRegExp(expr->arguments());
- } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
- EmitIsConstructCall(expr->arguments());
- } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
- EmitObjectEquals(expr->arguments());
- } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
- EmitArguments(expr->arguments());
- } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
- EmitArgumentsLength(expr->arguments());
- } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
- EmitClassOf(expr->arguments());
- } else if (strcmp("_Log", *name->ToCString()) == 0) {
- EmitLog(expr->arguments());
- } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
- EmitRandomHeapNumber(expr->arguments());
- } else if (strcmp("_SubString", *name->ToCString()) == 0) {
- EmitSubString(expr->arguments());
- } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
- EmitRegExpExec(expr->arguments());
- } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
- EmitValueOf(expr->arguments());
- } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
- EmitSetValueOf(expr->arguments());
- } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
- EmitNumberToString(expr->arguments());
- } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
- EmitCharFromCode(expr->arguments());
- } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
- EmitFastCharCodeAt(expr->arguments());
- } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
- EmitStringAdd(expr->arguments());
- } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
- EmitStringCompare(expr->arguments());
- } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
- EmitMathPow(expr->arguments());
- } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
- EmitMathSin(expr->arguments());
- } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
- EmitMathCos(expr->arguments());
- } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
- EmitMathSqrt(expr->arguments());
- } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
- EmitCallFunction(expr->arguments());
- } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
- EmitRegExpConstructResult(expr->arguments());
- } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
- EmitSwapElements(expr->arguments());
- } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
- EmitGetFromCache(expr->arguments());
- } else {
- UNREACHABLE();
- }
-}
-
-
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2414,46 +2338,120 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
- Label slow_case, done;
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(rax, &slow_case);
- __ SmiToInteger32(rcx, rax);
- __ cmpl(rcx, Immediate(String::kMaxAsciiCharCode));
- __ j(above, &slow_case);
+ Label done;
+ StringCharFromCodeGenerator generator(rax, rbx);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
- __ Move(rbx, Factory::single_character_string_cache());
- __ movq(rbx, FieldOperand(rbx,
- rcx,
- times_pointer_size,
- FixedArray::kHeaderSize));
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case);
- __ movq(rax, rbx);
+ __ bind(&done);
+ Apply(context_, rbx);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = rbx;
+ Register index = rax;
+ Register scratch = rcx;
+ Register result = rdx;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
__ jmp(&done);
- __ bind(&slow_case);
- __ push(rax);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, rax);
+ Apply(context_, result);
}
-void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
- // TODO(fsc): Port the complete implementation from the classic back-end.
- // Move the undefined value into the result register, which will
- // trigger the slow case.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- Apply(context_, rax);
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = rbx;
+ Register index = rax;
+ Register scratch1 = rcx;
+ Register scratch2 = rdx;
+ Register result = rax;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Move(result, Smi::FromInt(0));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ Apply(context_, result);
}
+
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
@@ -2743,13 +2741,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
!proxy->var()->is_this() &&
proxy->var()->is_global()) {
Comment cmnt(masm_, "Global variable");
- __ push(CodeGenerator::GlobalObject());
__ Move(rcx, proxy->name());
+ __ movq(rax, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ Call(ic, RelocInfo::CODE_TARGET);
- __ movq(Operand(rsp, 0), rax);
+ __ push(rax);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
proxy->var()->slot()->type() == Slot::LOOKUP) {
@@ -2859,11 +2857,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (expr->is_postfix() && context_ != Expression::kEffect) {
__ Push(Smi::FromInt(0));
}
- VisitForValue(prop->obj(), kStack);
if (assign_type == NAMED_PROPERTY) {
+ VisitForValue(prop->obj(), kAccumulator);
+ __ push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
- VisitForValue(prop->key(), kStack);
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kAccumulator);
+ __ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
+ __ push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
}
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 8766ebb1..89c21cba 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -56,18 +56,20 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Register r1,
Register r2,
Register name,
+ Register r4,
DictionaryCheck check_dictionary) {
// Register use:
//
// r0 - used to hold the property dictionary.
//
- // r1 - initially the receiver
- // - used for the index into the property dictionary
+ // r1 - initially the receiver.
+ // - unchanged on any jump to miss_label.
// - holds the result on exit.
//
// r2 - used to hold the capacity of the property dictionary.
//
// name - holds the name of the property and is unchanged.
+ // r4 - used to hold the index into the property dictionary.
Label done;
@@ -104,8 +106,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
const int kCapacityOffset =
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
- __ movq(r2, FieldOperand(r0, kCapacityOffset));
- __ SmiToInteger32(r2, r2);
+ __ SmiToInteger32(r2, FieldOperand(r0, kCapacityOffset));
__ decl(r2);
// Generate an unrolled loop that performs a few probes before
@@ -117,19 +118,19 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r1, Immediate(String::kHashShift));
+ __ movl(r4, FieldOperand(name, String::kHashFieldOffset));
+ __ shrl(r4, Immediate(String::kHashShift));
if (i > 0) {
- __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+ __ addl(r4, Immediate(StringDictionary::GetProbeOffset(i)));
}
- __ and_(r1, r2);
+ __ and_(r4, r2);
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+ __ lea(r4, Operand(r4, r4, times_2, 0)); // r4 = r4 * 3
// Check if the key is identical to the name.
- __ cmpq(name, Operand(r0, r1, times_pointer_size,
+ __ cmpq(name, Operand(r0, r4, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) {
__ j(equal, &done);
@@ -141,14 +142,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Check that the value is a normal property.
__ bind(&done);
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Test(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+ __ Test(Operand(r0, r4, times_pointer_size, kDetailsOffset - kHeapObjectTag),
Smi::FromInt(PropertyDetails::TypeField::mask()));
__ j(not_zero, miss_label);
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ movq(r1,
- Operand(r0, r1, times_pointer_size, kValueOffset - kHeapObjectTag));
+ Operand(r0, r4, times_pointer_size, kValueOffset - kHeapObjectTag));
}
@@ -165,11 +166,11 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
+ // Holds the result on exit if the load succeeded.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
- // Holds the result on exit if the load succeeded.
//
// r1 - used to hold the capacity mask of the dictionary
//
@@ -202,8 +203,8 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
__ xorl(r0, r1);
// Compute capacity mask.
- __ movq(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
- __ SmiToInteger32(r1, r1);
+ __ SmiToInteger32(r1,
+ FieldOperand(elements, NumberDictionary::kCapacityOffset));
__ decl(r1);
// Generate an unrolled loop that performs a few probes before giving up.
@@ -245,7 +246,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
- __ movq(r0, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+ __ movq(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -312,14 +313,14 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
__ pop(rbx);
- __ push(Operand(rsp, 1 * kPointerSize)); // receiver
- __ push(Operand(rsp, 1 * kPointerSize)); // name
+ __ push(rdx); // receiver
+ __ push(rax); // name
__ push(rbx); // return address
// Perform tail call to the entry.
@@ -330,14 +331,14 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
__ pop(rbx);
- __ push(Operand(rsp, 1 * kPointerSize)); // receiver
- __ push(Operand(rsp, 1 * kPointerSize)); // name
+ __ push(rdx); // receiver
+ __ push(rax); // name
__ push(rbx); // return address
// Perform tail call to the entry.
@@ -347,132 +348,136 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
- Label slow, check_string, index_int, index_string;
- Label check_pixel_array, probe_dictionary;
- Label check_number_dictionary;
-
- // Load name and receiver.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ Label slow, check_string, index_smi, index_string;
+ Label check_pixel_array, probe_dictionary, check_number_dictionary;
// Check that the object isn't a smi.
- __ JumpIfSmi(rcx, &slow);
+ __ JumpIfSmi(rdx, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing
// into string objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
+ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(below, &slow);
// Check bit field.
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
- // Save key in rbx in case we want it for the number dictionary
- // case.
- __ movq(rbx, rax);
- __ SmiToInteger32(rax, rax);
- // Get the elements array of the object.
- __ bind(&index_int);
- __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
// Check that the key (index) is within bounds.
- __ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
// Fast case: Do the load.
- __ movq(rax, Operand(rcx, rax, times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
+ __ movq(rbx, FieldOperand(rcx,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, &slow);
+ __ movq(rax, rbx);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
- // Check whether the elements is a pixel array.
- // rax: untagged index
- // rcx: elements array
__ bind(&check_pixel_array);
+ // Check whether the elements object is a pixel array.
+ // rdx: receiver
+ // rax: key
+ // rcx: elements array
+ __ SmiToInteger32(rbx, rax); // Used on both directions of next branch.
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
__ j(not_equal, &check_number_dictionary);
- __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
+ __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
- __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
- __ movzxbq(rax, Operand(rcx, rax, times_1, 0));
+ __ movq(rax, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+ __ movzxbq(rax, Operand(rax, rbx, times_1, 0));
__ Integer32ToSmi(rax, rax);
__ ret(0);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
- // rax: untagged index
- // rbx: key
+ // rdx: receiver
+ // rax: key
+ // rbx: key as untagged int32
// rcx: elements
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
- GenerateNumberDictionaryLoad(masm, &slow, rcx, rbx, rax, rdx, rdi);
+ GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi);
__ ret(0);
- // Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
+ // Slow case: Jump to runtime.
+ // rdx: receiver
+ // rax: key
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
GenerateRuntimeGetProperty(masm);
+
__ bind(&check_string);
// The key is not a smi.
// Is it a string?
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+ // rdx: receiver
+ // rax: key
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
__ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
- __ testl(rbx, Immediate(String::kIsArrayIndexMask));
+ __ testl(rbx, Immediate(String::kContainsCachedArrayIndexMask));
+ __ j(zero, &index_string); // The value in rbx is used at jump target.
// Is the string a symbol?
- __ j(not_zero, &index_string); // The value in rbx is used at jump target.
ASSERT(kSymbolTag != 0);
- __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
+ __ testb(FieldOperand(rcx, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rcx, JSObject::kPropertiesOffset));
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::hash_table_map());
+ __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movl(rdx, rbx);
- __ shr(rdx, Immediate(KeyedLookupCache::kMapHashShift));
- __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
- __ shr(rax, Immediate(String::kHashShift));
- __ xor_(rdx, rax);
- __ and_(rdx, Immediate(KeyedLookupCache::kCapacityMask));
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movl(rcx, rbx);
+ __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
+ __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
+ __ shr(rdi, Immediate(String::kHashShift));
+ __ xor_(rcx, rdi);
+ __ and_(rcx, Immediate(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys();
- __ movq(rdi, rdx);
+ __ movq(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ movq(kScratchRegister, cache_keys);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
__ j(not_equal, &slow);
- __ movq(rdi, Operand(kScratchRegister, rdi, times_1, kPointerSize));
- __ cmpq(Operand(rsp, kPointerSize), rdi);
+ __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
__ j(not_equal, &slow);
// Get field offset which is a 32-bit integer and check that it is
@@ -480,29 +485,32 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets();
__ movq(kScratchRegister, cache_field_offsets);
- __ movl(rax, Operand(kScratchRegister, rdx, times_4, 0));
- __ movzxbq(rdx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ cmpq(rax, rdx);
+ __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
+ __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ subq(rdi, rcx);
__ j(above_equal, &slow);
// Load in-object property.
- __ subq(rax, rdx);
- __ movzxbq(rdx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rax, rdx);
- __ movq(rax, FieldOperand(rcx, rax, times_pointer_size, 0));
+ __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+ __ addq(rcx, rdi);
+ __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
+ __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
+ // rdx: receiver
+ // rax: key
GenerateDictionaryLoad(masm,
&slow,
rbx,
- rcx,
rdx,
+ rcx,
rax,
+ rdi,
DICTIONARY_CHECK_DONE);
- __ movq(rax, rcx);
+ __ movq(rax, rdx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
// If the hash field contains an array index pick it out. The assert checks
@@ -512,78 +520,51 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- __ movl(rax, rbx);
- __ and_(rax, Immediate(String::kArrayIndexHashMask));
- __ shrl(rax, Immediate(String::kHashShift));
- __ jmp(&index_int);
+ // We want the smi-tagged index in rax. Even if we subsequently go to
+ // the slow case, converting the key to a smi is always valid.
+ // rdx: receiver
+ // rax: key (a string)
+ // rbx: key's hash field, including its array index value.
+ __ and_(rbx, Immediate(String::kArrayIndexValueMask));
+ __ shr(rbx, Immediate(String::kHashShift));
+ // Here we actually clobber the key (rax) which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
+ __ Integer32ToSmi(rax, rbx);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- Label index_not_smi;
Label index_out_of_range;
- Label slow_char_code;
- Label got_char_code;
Register receiver = rdx;
Register index = rax;
- Register code = rbx;
- Register scratch = rcx;
-
- __ movq(index, Operand(rsp, 1 * kPointerSize));
- __ movq(receiver, Operand(rsp, 2 * kPointerSize));
-
- StringHelper::GenerateFastCharCodeAt(masm,
- receiver,
- index,
- scratch,
- code,
- &miss, // When not a string.
- &index_not_smi,
- &index_out_of_range,
- &slow_char_code);
- // If we didn't bail out, code register contains smi tagged char
- // code.
- __ bind(&got_char_code);
- StringHelper::GenerateCharFromCode(masm, code, rax, scratch, JUMP_FUNCTION);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from char from code tail call");
-#endif
-
- // Check if key is a heap number.
- __ bind(&index_not_smi);
- __ CompareRoot(FieldOperand(index, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &miss);
-
- // Push receiver and key on the stack (now that we know they are a
- // string and a number), and call runtime.
- __ bind(&slow_char_code);
- __ EnterInternalFrame();
- __ push(receiver);
- __ push(index);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- ASSERT(!code.is(rax));
- __ movq(code, rax);
- __ LeaveInternalFrame();
+ Register scratch1 = rbx;
+ Register scratch2 = rcx;
+ Register result = rax;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ ret(0);
- // Check if the runtime call returned NaN char code. If yes, return
- // undefined. Otherwise, we can continue.
- if (FLAG_debug_code) {
- ASSERT(kSmiTag == 0);
- __ JumpIfSmi(code, &got_char_code);
- __ CompareRoot(FieldOperand(code, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ Assert(equal, "StringCharCodeAt must return smi or heap number");
- }
- __ CompareRoot(code, Heap::kNanValueRootIndex);
- __ j(not_equal, &got_char_code);
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&index_out_of_range);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -597,80 +578,80 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label slow, failed_allocation;
- // Load name and receiver.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
-
// Check that the object isn't a smi.
- __ JumpIfSmi(rcx, &slow);
+ __ JumpIfSmi(rdx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &slow);
// Check that the object is a JS object.
- __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
+ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks. The map is already in rdx.
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: index (as a smi)
- // rcx: JSObject
- __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ // rdx: JSObject
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
- __ SmiToInteger32(rax, rax);
- __ cmpl(rax, FieldOperand(rcx, ExternalArray::kLengthOffset));
+ __ SmiToInteger32(rcx, rax);
+ __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
- // rax: untagged index
- // rcx: elements array
- __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
- // rcx: base pointer of external storage
+ // rax: index (as a smi)
+ // rdx: receiver (JSObject)
+ // rcx: untagged index
+ // rbx: elements array
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
- __ movsxbq(rax, Operand(rcx, rax, times_1, 0));
+ __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalUnsignedByteArray:
- __ movzxbq(rax, Operand(rcx, rax, times_1, 0));
+ __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalShortArray:
- __ movsxwq(rax, Operand(rcx, rax, times_2, 0));
+ __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalUnsignedShortArray:
- __ movzxwq(rax, Operand(rcx, rax, times_2, 0));
+ __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalIntArray:
- __ movsxlq(rax, Operand(rcx, rax, times_4, 0));
+ __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalUnsignedIntArray:
- __ movl(rax, Operand(rcx, rax, times_4, 0));
+ __ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalFloatArray:
- __ fld_s(Operand(rcx, rax, times_4, 0));
+ __ fld_s(Operand(rbx, rcx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
+ // rax: index
+ // rdx: receiver
// For integer array types:
- // rax: value
+ // rcx: value
// For floating-point array type:
// FP(0): value
@@ -681,42 +662,45 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// it to a HeapNumber.
Label box_int;
if (array_type == kExternalIntArray) {
- __ JumpIfNotValidSmiValue(rax, &box_int);
+ __ JumpIfNotValidSmiValue(rcx, &box_int);
} else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray);
- __ JumpIfUIntNotValidSmiValue(rax, &box_int);
+ __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
}
- __ Integer32ToSmi(rax, rax);
+ __ Integer32ToSmi(rax, rcx);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
- __ push(rax);
+ __ push(rcx);
if (array_type == kExternalIntArray) {
__ fild_s(Operand(rsp, 0));
} else {
ASSERT(array_type == kExternalUnsignedIntArray);
- // Need to zero-extend the value.
+ // The value is zero-extended on the stack, because all pushes are
+ // 64-bit and we loaded the value from memory with movl.
__ fild_d(Operand(rsp, 0));
}
- __ pop(rax);
+ __ pop(rcx);
// FP(0): value
- __ AllocateHeapNumber(rax, rbx, &failed_allocation);
+ __ AllocateHeapNumber(rcx, rbx, &failed_allocation);
// Set the value.
+ __ movq(rax, rcx);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
- __ AllocateHeapNumber(rax, rbx, &failed_allocation);
+ __ AllocateHeapNumber(rcx, rbx, &failed_allocation);
// Set the value.
+ __ movq(rax, rcx);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(0);
} else {
- __ Integer32ToSmi(rax, rax);
+ __ Integer32ToSmi(rax, rcx);
__ ret(0);
}
@@ -727,7 +711,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ fincstp();
// Fall through to slow case.
- // Slow case: Load name and receiver from stack and jump to runtime.
+ // Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
@@ -736,37 +720,33 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : key
- // -- rsp[16] : receiver
// -----------------------------------
Label slow;
- // Load key and receiver.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
-
// Check that the receiver isn't a smi.
- __ JumpIfSmi(rcx, &slow);
+ __ JumpIfSmi(rdx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &slow);
// Get the map of the receiver.
- __ movq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
- __ movb(rdx, FieldOperand(rdx, Map::kBitFieldOffset));
- __ andb(rdx, Immediate(kSlowCaseBitFieldMask));
- __ cmpb(rdx, Immediate(1 << Map::kHasIndexedInterceptor));
+ __ movb(rcx, FieldOperand(rcx, Map::kBitFieldOffset));
+ __ andb(rcx, Immediate(kSlowCaseBitFieldMask));
+ __ cmpb(rcx, Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow);
// Everything is fine, call runtime.
- __ pop(rdx);
- __ push(rcx); // receiver
+ __ pop(rcx);
+ __ push(rdx); // receiver
__ push(rax); // key
- __ push(rdx); // return address
+ __ push(rcx); // return address
// Perform tail call to the entry.
__ TailCallExternalReference(ExternalReference(
@@ -852,9 +832,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
- // Untag the key (for checking against untagged length in the fixed array).
- __ SmiToInteger32(rdi, rcx);
- __ cmpl(rdi, FieldOperand(rbx, Array::kLengthOffset));
+ __ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// rax: value
// rbx: FixedArray
// rcx: index (as a smi)
@@ -903,11 +881,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rcx: index (as a smi)
// flags: smicompare (rdx.length(), rbx)
__ j(not_equal, &slow); // do not leave holes in the array
- __ SmiToInteger64(rdi, rcx);
- __ cmpl(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
- // Increment and restore smi-tag.
- __ Integer64PlusConstantToSmi(rdi, rdi, 1);
+ // Increment index to get new length.
+ __ SmiAddConstant(rdi, rcx, Smi::FromInt(1));
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
__ jmp(&fast);
@@ -936,16 +913,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
Label non_smi_value;
__ JumpIfNotSmi(rax, &non_smi_value);
SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
- __ movq(Operand(rbx, index.reg, index.scale,
- FixedArray::kHeaderSize - kHeapObjectTag),
+ __ movq(FieldOperand(rbx, index.reg, index.scale, FixedArray::kHeaderSize),
rax);
__ ret(0);
__ bind(&non_smi_value);
// Slow case that needs to retain rcx for use by RecordWrite.
// Update write barrier for the elements array address.
SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rcx, kPointerSizeLog2);
- __ movq(Operand(rbx, index2.reg, index2.scale,
- FixedArray::kHeaderSize - kHeapObjectTag),
+ __ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize),
rax);
__ movq(rdx, rax);
__ RecordWriteNonSmi(rbx, 0, rdx, rcx);
@@ -1265,7 +1240,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// Search dictionary - put result in register rdx.
- GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, CHECK_DICTIONARY);
+ GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, rdi, CHECK_DICTIONARY);
// Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx);
@@ -1355,6 +1330,21 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
}
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ UNREACHABLE();
+}
+
+
+void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ UNREACHABLE();
+}
+
+
+void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ UNREACHABLE();
+}
+
+
// The offset from the inlined patch site to the start of the
// inlined load instruction.
const int LoadIC::kOffsetToLoadInstruction = 20;
@@ -1370,13 +1360,13 @@ void LoadIC::ClearInlinedVersion(Address address) {
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
__ pop(rbx);
- __ push(Operand(rsp, 0)); // receiver
+ __ push(rax); // receiver
__ push(rcx); // name
__ push(rbx); // return address
@@ -1388,14 +1378,12 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
-
StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -1404,14 +1392,12 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
-
StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -1420,13 +1406,11 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
- __ movq(rax, Operand(rsp, kPointerSize));
-
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
@@ -1440,14 +1424,12 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
void LoadIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss, probe, global;
- __ movq(rax, Operand(rsp, kPointerSize));
-
// Check that the receiver isn't a smi.
__ JumpIfSmi(rax, &miss);
@@ -1469,7 +1451,8 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in rax.
__ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY);
+ GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx,
+ rcx, rdi, CHECK_DICTIONARY);
__ ret(0);
// Global object access: Check access rights.
@@ -1477,23 +1460,20 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ CheckAccessGlobalProxy(rax, rdx, &miss);
__ jmp(&probe);
- // Cache miss: Restore receiver from stack and jump to runtime.
+ // Cache miss: Jump to runtime.
__ bind(&miss);
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
GenerateMiss(masm);
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
-
StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index b7a6aaf9..3823cadb 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -90,58 +90,21 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
- Label fast;
-
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
- ASSERT(is_int32(~Page::kPageAlignmentMask));
- and_(object,
- Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
- Register page_start = object;
-
- // Compute the bit addr in the remembered set/index of the pointer in the
- // page. Reuse 'addr' as pointer_offset.
- subq(addr, page_start);
- shr(addr, Immediate(kPointerSizeLog2));
- Register pointer_offset = addr;
-
- // If the bit offset lies beyond the normal remembered set range, it is in
- // the extra remembered set area of a large object.
- cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
- j(below, &fast);
-
- // We have a large object containing pointers. It must be a FixedArray.
-
- // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
- // extra remembered set after the large object.
-
- // Load the array length into 'scratch'.
- movl(scratch,
- Operand(page_start,
- Page::kObjectStartOffset + FixedArray::kLengthOffset));
- Register array_length = scratch;
-
- // Extra remembered set starts right after the large object (a FixedArray), at
- // page_start + kObjectStartOffset + objectSize
- // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
- // Add the delta between the end of the normal RSet and the start of the
- // extra RSet to 'page_start', so that addressing the bit using
- // 'pointer_offset' hits the extra RSet words.
- lea(page_start,
- Operand(page_start, array_length, times_pointer_size,
- Page::kObjectStartOffset + FixedArray::kHeaderSize
- - Page::kRSetEndOffset));
-
- // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
- // to limit code size. We should probably evaluate this decision by
- // measuring the performance of an equivalent implementation using
- // "simpler" instructions
- bind(&fast);
- bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
-}
-
-
-// Set the remembered set bit for [object+offset].
+ and_(object, Immediate(~Page::kPageAlignmentMask));
+
+ // Compute number of region covering addr. See Page::GetRegionNumberForAddress
+ // method for more details.
+ and_(addr, Immediate(Page::kPageAlignmentMask));
+ shrl(addr, Immediate(Page::kRegionSizeLog2));
+
+ // Set dirty mark for region.
+ bts(Operand(object, Page::kDirtyFlagOffset), addr);
+}
+
+
+// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the smi_index register contains the array index into
// the elements array represented as a smi. Otherwise it can be used as a
@@ -156,9 +119,8 @@ void MacroAssembler::RecordWrite(Register object,
// registers are rsi.
ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
- // First, check if a remembered set write is even needed. The tests below
- // catch stores of Smis and stores into young gen (which does not have space
- // for the remembered set bits).
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
Label done;
JumpIfSmi(value, &done);
@@ -191,8 +153,8 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
bind(&okay);
}
- // Test that the object address is not in the new space. We cannot
- // set remembered set bits in the new space.
+ // Test that the object address is not in the new space. We cannot
+ // update page dirty marks for new space pages.
InNewSpace(object, scratch, equal, &done);
// The offset is relative to a tagged or untagged HeapObject pointer,
@@ -201,48 +163,19 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
- // We use optimized write barrier code if the word being written to is not in
- // a large object page, or is in the first "page" of a large object page.
- // We make sure that an offset is inside the right limits whether it is
- // tagged or untagged.
- if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
- // Compute the bit offset in the remembered set, leave it in 'scratch'.
- lea(scratch, Operand(object, offset));
- ASSERT(is_int32(Page::kPageAlignmentMask));
- and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
- shr(scratch, Immediate(kPointerSizeLog2));
-
- // Compute the page address from the heap object pointer, leave it in
- // 'object' (immediate value is sign extended).
- and_(object, Immediate(~Page::kPageAlignmentMask));
-
- // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
- // to limit code size. We should probably evaluate this decision by
- // measuring the performance of an equivalent implementation using
- // "simpler" instructions
- bts(Operand(object, Page::kRSetOffset), scratch);
+ Register dst = smi_index;
+ if (offset != 0) {
+ lea(dst, Operand(object, offset));
} else {
- Register dst = smi_index;
- if (offset != 0) {
- lea(dst, Operand(object, offset));
- } else {
- // array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric.
- SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
- lea(dst, FieldOperand(object,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- }
- // If we are already generating a shared stub, not inlining the
- // record write code isn't going to save us any memory.
- if (generating_stub()) {
- RecordWriteHelper(object, dst, scratch);
- } else {
- RecordWriteStub stub(object, dst, scratch);
- CallStub(&stub);
- }
+ // array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric.
+ SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
+ lea(dst, FieldOperand(object,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
}
+ RecordWriteHelper(object, dst, scratch);
bind(&done);
@@ -573,6 +506,11 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) {
}
+void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
+ movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+}
+
+
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
@@ -614,7 +552,7 @@ void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
- cmpl(Operand(dst, kIntSize), Immediate(src->value()));
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
}
@@ -638,6 +576,18 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
}
+void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
+ Register src,
+ int power) {
+ ASSERT((0 <= power) && (power < 32));
+ if (dst.is(src)) {
+ shr(dst, Immediate(power + kSmiShift));
+ } else {
+ UNIMPLEMENTED(); // Not used.
+ }
+}
+
+
Condition MacroAssembler::CheckSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testb(src, Immediate(kSmiTagMask));
@@ -916,7 +866,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
- addl(Operand(dst, kIntSize), Immediate(constant->value()));
+ addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
}
}
@@ -2594,7 +2544,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movl(FieldOperand(result, String::kHashFieldOffset),
+ movq(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -2632,7 +2582,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movl(FieldOperand(result, String::kHashFieldOffset),
+ movq(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -2691,20 +2641,27 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
+
int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
- // On Windows stack slots are reserved by the caller for all arguments
- // including the ones passed in registers. On Linux 6 arguments are passed in
- // registers and the caller does not reserve stack slots for them.
+ // On Windows 64 stack slots are reserved by the caller for all arguments
+ // including the ones passed in registers, and space is always allocated for
+ // the four register arguments even if the function takes fewer than four
+ // arguments.
+ // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
+ // and the caller does not reserve stack slots for them.
ASSERT(num_arguments >= 0);
#ifdef _WIN64
- static const int kArgumentsWithoutStackSlot = 0;
+ static const int kMinimumStackSlots = 4;
+ if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
+ return num_arguments;
#else
- static const int kArgumentsWithoutStackSlot = 6;
+ static const int kRegisterPassedArguments = 6;
+ if (num_arguments < kRegisterPassedArguments) return 0;
+ return num_arguments - kRegisterPassedArguments;
#endif
- return num_arguments > kArgumentsWithoutStackSlot ?
- num_arguments - kArgumentsWithoutStackSlot : 0;
}
+
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
int frame_alignment = OS::ActivationFrameAlignment();
ASSERT(frame_alignment != 0);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index b4f3240e..0acce054 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -78,8 +78,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
- // Set the remebered set bit for an address which points into an
- // object. RecordWriteHelper only works if the object is not in new
+ // For page containing |object| mark region covering |addr| dirty.
+ // RecordWriteHelper only works if the object is not in new
// space.
void RecordWriteHelper(Register object,
Register addr,
@@ -93,7 +93,7 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* branch);
- // Set the remembered set bit for [object+offset].
+ // For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
@@ -103,7 +103,7 @@ class MacroAssembler: public Assembler {
Register value,
Register scratch);
- // Set the remembered set bit for [object+offset].
+ // For page containing |object| mark region covering [object+offset] dirty.
// The value is known to not be a smi.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
@@ -210,6 +210,7 @@ class MacroAssembler: public Assembler {
// Convert smi to 32-bit integer. I.e., not sign extended into
// high 32 bits of destination.
void SmiToInteger32(Register dst, Register src);
+ void SmiToInteger32(Register dst, const Operand& src);
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
@@ -220,6 +221,13 @@ class MacroAssembler: public Assembler {
Register src,
int power);
+ // Divide a positive smi's integer value by a power of two.
+ // Provides result as 32-bit integer value.
+ void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
+ Register src,
+ int power);
+
+
// Simple comparison of smis.
void SmiCompare(Register dst, Register src);
void SmiCompare(Register dst, Smi* src);
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 8b095cbb..cc544705 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -114,6 +114,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
}
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype) {
+ // Get the global function with the given index.
+ JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ // Load its initial map. The global functions all have initial maps.
+ __ Move(prototype, Handle<Map>(function->initial_map()));
+ // Load the prototype from the initial map.
+ __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
// Load a fast property out of a holder object (src). In-object properties
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
@@ -375,206 +386,6 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-template <class Compiler>
-static void CompileLoadInterceptor(Compiler* compiler,
- StubCompiler* stub_compiler,
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- stub_compiler->CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- if (lookup->IsProperty() && lookup->IsCacheable()) {
- compiler->CompileCacheable(masm,
- stub_compiler,
- receiver,
- reg,
- scratch1,
- scratch2,
- holder,
- lookup,
- name,
- miss);
- } else {
- compiler->CompileRegular(masm,
- receiver,
- reg,
- scratch2,
- holder,
- miss);
- }
-}
-
-
-class LoadInterceptorCompiler BASE_EMBEDDED {
- public:
- explicit LoadInterceptorCompiler(Register name) : name_(name) {}
-
- void CompileCacheable(MacroAssembler* masm,
- StubCompiler* stub_compiler,
- Register receiver,
- Register holder,
- Register scratch1,
- Register scratch2,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- Label* miss_label) {
- AccessorInfo* callback = NULL;
- bool optimize = false;
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- if (lookup->type() == FIELD) {
- optimize = true;
- } else if (lookup->type() == CALLBACKS) {
- Object* callback_object = lookup->GetCallbackObject();
- if (callback_object->IsAccessorInfo()) {
- callback = AccessorInfo::cast(callback_object);
- optimize = callback->getter() != NULL;
- }
- }
-
- if (!optimize) {
- CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
- miss_label);
- return;
- }
-
- // Note: starting a frame here makes GC aware of pointers pushed below.
- __ EnterInternalFrame();
-
- if (lookup->type() == CALLBACKS) {
- __ push(receiver);
- }
- __ push(holder);
- __ push(name_);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(equal, &interceptor_failed);
- __ LeaveInternalFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ pop(name_);
- __ pop(holder);
- if (lookup->type() == CALLBACKS) {
- __ pop(receiver);
- }
-
- __ LeaveInternalFrame();
-
- if (lookup->type() == FIELD) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Check that the maps from interceptor's holder to field's holder
- // haven't changed...
- holder = stub_compiler->CheckPrototypes(interceptor_holder,
- holder,
- lookup->holder(),
- scratch1,
- scratch2,
- name,
- miss_label);
- // ... and retrieve a field from field's holder.
- stub_compiler->GenerateFastPropertyLoad(masm,
- rax,
- holder,
- lookup->holder(),
- lookup->GetFieldIndex());
- __ ret(0);
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- ASSERT(callback != NULL);
- ASSERT(callback->getter() != NULL);
-
- // Prepare for tail call. Push receiver to stack after return address.
- Label cleanup;
- __ pop(scratch2); // return address
- __ push(receiver);
- __ push(scratch2);
-
- // Check that the maps from interceptor's holder to callback's holder
- // haven't changed.
- holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
- lookup->holder(), scratch1,
- scratch2,
- name,
- &cleanup);
-
- // Continue tail call preparation: push remaining parameters after
- // return address.
- __ pop(scratch2); // return address
- __ push(holder);
- __ Move(holder, Handle<AccessorInfo>(callback));
- __ push(holder);
- __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
- __ push(name_);
- __ push(scratch2); // restore return address
-
- // Tail call to runtime.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallExternalReference(ref, 5, 1);
-
- // Clean up code: we pushed receiver after return address and
- // need to remove it from there.
- __ bind(&cleanup);
- __ pop(scratch1); // return address
- __ pop(scratch2); // receiver
- __ push(scratch1);
- }
- }
-
-
- void CompileRegular(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register scratch,
- JSObject* interceptor_holder,
- Label* miss_label) {
- __ pop(scratch); // save old return address
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
- __ push(scratch); // restore old return address
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallExternalReference(ref, 5, 1);
- }
-
- private:
- Register name_;
-};
-
-
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@@ -761,9 +572,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
- depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver,
+ interceptor_holder, scratch1,
+ scratch2, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -776,10 +587,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(),
- scratch1, scratch2, name,
- depth2, miss);
+ if (interceptor_holder != lookup->holder()) {
+ stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+ lookup->holder(), scratch1,
+ scratch2, name, depth2, miss);
+ } else {
+ // CheckPrototypes has a side effect of fetching a 'holder'
+ // for API (object which is instanceof for the signature). It's
+ // safe to omit it here, as if present, it should be fetched
+ // by the previous CheckPrototypes.
+ ASSERT(depth2 == kInvalidProtoDepth);
+ }
// Invoke function.
if (can_do_fast_api_call) {
@@ -888,6 +706,12 @@ static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
#define __ ACCESS_MASM((masm()))
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -969,9 +793,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
__ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, name, &miss);
}
@@ -989,9 +812,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(not_equal, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- rax);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, name, &miss);
}
@@ -1011,9 +833,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(not_equal, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- rax);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, name, &miss);
}
@@ -1038,8 +859,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Handle call cache miss.
__ bind(&miss_in_smi_check);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1090,8 +910,7 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
@@ -1148,7 +967,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
+ Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into rax and calculate new length.
__ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1156,8 +975,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ SmiAddConstant(rax, rax, Smi::FromInt(argc));
// Get the element's length into rcx.
- __ movl(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ Integer32ToSmi(rcx, rcx);
+ __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ SmiCompare(rax, rcx);
@@ -1176,12 +994,12 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(Operand(rdx, 0), rcx);
// Check if value is a smi.
- __ JumpIfNotSmi(rcx, &with_rset_update);
+ __ JumpIfNotSmi(rcx, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
- __ bind(&with_rset_update);
+ __ bind(&with_write_barrier);
__ InNewSpace(rbx, rcx, equal, &exit);
@@ -1229,11 +1047,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
- __ addl(FieldOperand(rbx, FixedArray::kLengthOffset),
- Immediate(kAllocationDelta));
+ __ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
+ Smi::FromInt(kAllocationDelta));
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- // Elements are in new space, so no remembered set updates are necessary.
+ // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
@@ -1246,8 +1064,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1331,14 +1148,32 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
+Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // TODO(722): implement this.
+ return Heap::undefined_value();
+}
+
+
+Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // TODO(722): implement this.
+ return Heap::undefined_value();
+}
+
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
@@ -1396,8 +1231,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle load cache miss.
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(argc);
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
@@ -1480,8 +1314,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
@@ -1493,13 +1326,12 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
callback, name, &miss, &failure);
@@ -1518,13 +1350,12 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
Object* value,
String* name) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1538,15 +1369,12 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- // Load receiver.
- __ movq(rax, Operand(rsp, kPointerSize));
-
// Chech that receiver is not a smi.
__ JumpIfSmi(rax, &miss);
@@ -1584,13 +1412,12 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
int index,
String* name) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1604,16 +1431,15 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
- __ movq(rax, Operand(rsp, kPointerSize));
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
GenerateLoadInterceptor(receiver,
@@ -1640,15 +1466,12 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
String* name,
bool is_dont_delete) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- // Get the receiver from the stack.
- __ movq(rax, Operand(rsp, kPointerSize));
-
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
@@ -1660,19 +1483,20 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
CheckPrototypes(object, rax, holder, rbx, rdx, name, &miss);
// Get the value from the cell.
- __ Move(rax, Handle<JSGlobalPropertyCell>(cell));
- __ movq(rax, FieldOperand(rax, JSGlobalPropertyCell::kValueOffset));
+ __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+ __ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
__ j(equal, &miss);
} else if (FLAG_debug_code) {
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
__ IncrementCounter(&Counters::named_load_global_inline, 1);
+ __ movq(rax, rbx);
__ ret(0);
__ bind(&miss);
@@ -1689,14 +1513,12 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_callback, 1);
// Check that the name has not changed.
@@ -1704,7 +1526,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ j(not_equal, &miss);
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
+ bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1719,21 +1541,19 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_array_length, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadArrayLength(masm(), rcx, rdx, &miss);
+ GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_array_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1748,21 +1568,19 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
JSObject* holder,
Object* value) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_constant_function, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadConstant(receiver, holder, rcx, rbx, rdx,
+ GenerateLoadConstant(receiver, holder, rdx, rbx, rcx,
value, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_constant_function, 1);
@@ -1775,21 +1593,19 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadFunctionPrototype(masm(), rcx, rdx, rbx, &miss);
+ GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1803,14 +1619,12 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_interceptor, 1);
// Check that the name has not changed.
@@ -1822,9 +1636,9 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
GenerateLoadInterceptor(receiver,
holder,
&lookup,
- rcx,
- rax,
rdx,
+ rax,
+ rcx,
rbx,
name,
&miss);
@@ -1839,21 +1653,19 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_string_length, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadStringLength(masm(), rcx, rdx, rbx, &miss);
+ GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2031,21 +1843,19 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
JSObject* holder,
int index) {
// ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_field, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadField(receiver, holder, rcx, rbx, rdx, index, name, &miss);
+ GenerateLoadField(receiver, holder, rdx, rbx, rcx, index, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_field, 1);
@@ -2117,9 +1927,8 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
-
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
@@ -2127,18 +1936,128 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
- LoadInterceptorCompiler compiler(name_reg);
- CompileLoadInterceptor(&compiler,
- this,
- masm(),
- object,
- holder,
- name,
- lookup,
- receiver,
- scratch1,
- scratch2,
- miss);
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->type() == FIELD) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsAccessorInfo() &&
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+ compile_followup_inline = true;
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ push(receiver);
+ }
+ __ push(holder_reg);
+ __ push(name_reg);
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ j(equal, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ ret(0);
+
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ // Check that the maps from interceptor's holder to lookup's holder
+ // haven't changed. And load lookup's holder into |holder| register.
+ if (interceptor_holder != lookup->holder()) {
+ holder_reg = CheckPrototypes(interceptor_holder,
+ holder_reg,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss);
+ }
+
+ if (lookup->type() == FIELD) {
+ // We found FIELD property in prototype chain of interceptor's holder.
+ // Retrieve a field from field's holder.
+ GenerateFastPropertyLoad(masm(), rax, holder_reg,
+ lookup->holder(), lookup->GetFieldIndex());
+ __ ret(0);
+ } else {
+ // We found CALLBACKS property in prototype chain of interceptor's
+ // holder.
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ // Tail call to runtime.
+ // Important invariant in CALLBACKS case: the code above must be
+ // structured to never clobber |receiver| register.
+ __ pop(scratch2); // return address
+ __ push(receiver);
+ __ push(holder_reg);
+ __ Move(holder_reg, Handle<AccessorInfo>(callback));
+ __ push(holder_reg);
+ __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
+ __ push(name_reg);
+ __ push(scratch2); // restore return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ __ pop(scratch2); // save old return address
+ PushInterceptorArguments(masm(), receiver, holder_reg,
+ name_reg, interceptor_holder);
+ __ push(scratch2); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
}
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index db316bba..a0acd6a2 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -1072,14 +1072,14 @@ void VirtualFrame::MoveResultsToRegisters(Result* a,
Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
- // Name and receiver are on the top of the frame. The IC expects
- // name in rcx and receiver on the stack. It does not drop the
- // receiver.
+ // Name and receiver are on the top of the frame. Both are dropped.
+ // The IC expects name in rcx and receiver in rax.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
Result name = Pop();
- PrepareForCall(1, 0); // One stack arg, not callee-dropped.
- name.ToRegister(rcx);
- name.Unuse();
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
+ MoveResultsToRegisters(&name, &receiver, rcx, rax);
+
return RawCallCodeObject(ic, mode);
}
@@ -1088,7 +1088,10 @@ Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
// Key and receiver are on top of the frame. The IC expects them on
// the stack. It does not drop them.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
+ Result name = Pop();
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
+ MoveResultsToRegisters(&name, &receiver, rax, rdx);
return RawCallCodeObject(ic, mode);
}
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 1c9751bb..affe18ff 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -590,7 +590,7 @@ class VirtualFrame : public ZoneObject {
inline bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
- friend class DeferredCode;
+ friend class FrameRegisterState;
friend class JumpTarget;
};
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 46eaccd5..91231253 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -27,6 +27,8 @@
#include <limits.h>
+#define USE_NEW_QUERY_CALLBACKS
+
#include "v8.h"
#include "api.h"
@@ -610,6 +612,72 @@ THREADED_TEST(ScavengeExternalAsciiString) {
}
+class TestAsciiResourceWithDisposeControl: public TestAsciiResource {
+ public:
+ static int dispose_calls;
+
+ TestAsciiResourceWithDisposeControl(const char* data, bool dispose)
+ : TestAsciiResource(data),
+ dispose_(dispose) { }
+
+ void Dispose() {
+ ++dispose_calls;
+ if (dispose_) delete this;
+ }
+ private:
+ bool dispose_;
+};
+
+
+int TestAsciiResourceWithDisposeControl::dispose_calls = 0;
+
+
+TEST(ExternalStringWithDisposeHandling) {
+ const char* c_source = "1 + 2 * 3";
+
+ // Use a stack allocated external string resource allocated object.
+ TestAsciiResource::dispose_count = 0;
+ TestAsciiResourceWithDisposeControl::dispose_calls = 0;
+ TestAsciiResourceWithDisposeControl res_stack(i::StrDup(c_source), false);
+ {
+ v8::HandleScope scope;
+ LocalContext env;
+ Local<String> source = String::NewExternal(&res_stack);
+ Local<Script> script = Script::Compile(source);
+ Local<Value> value = script->Run();
+ CHECK(value->IsNumber());
+ CHECK_EQ(7, value->Int32Value());
+ v8::internal::Heap::CollectAllGarbage(false);
+ CHECK_EQ(0, TestAsciiResource::dispose_count);
+ }
+ v8::internal::CompilationCache::Clear();
+ v8::internal::Heap::CollectAllGarbage(false);
+ CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
+ CHECK_EQ(0, TestAsciiResource::dispose_count);
+
+ // Use a heap allocated external string resource allocated object.
+ TestAsciiResource::dispose_count = 0;
+ TestAsciiResourceWithDisposeControl::dispose_calls = 0;
+ TestAsciiResource* res_heap =
+ new TestAsciiResourceWithDisposeControl(i::StrDup(c_source), true);
+ {
+ v8::HandleScope scope;
+ LocalContext env;
+ Local<String> source = String::NewExternal(res_heap);
+ Local<Script> script = Script::Compile(source);
+ Local<Value> value = script->Run();
+ CHECK(value->IsNumber());
+ CHECK_EQ(7, value->Int32Value());
+ v8::internal::Heap::CollectAllGarbage(false);
+ CHECK_EQ(0, TestAsciiResource::dispose_count);
+ }
+ v8::internal::CompilationCache::Clear();
+ v8::internal::Heap::CollectAllGarbage(false);
+ CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
+ CHECK_EQ(1, TestAsciiResource::dispose_count);
+}
+
+
THREADED_TEST(StringConcat) {
{
v8::HandleScope scope;
@@ -1120,11 +1188,11 @@ v8::Handle<v8::Boolean> CheckThisIndexedPropertyQuery(
}
-v8::Handle<v8::Boolean> CheckThisNamedPropertyQuery(Local<String> property,
+v8::Handle<v8::Integer> CheckThisNamedPropertyQuery(Local<String> property,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<v8::Boolean>();
+ return v8::Handle<v8::Integer>();
}
@@ -1221,13 +1289,13 @@ static v8::Handle<Value> PrePropertyHandlerGet(Local<String> key,
}
-static v8::Handle<v8::Boolean> PrePropertyHandlerHas(Local<String> key,
- const AccessorInfo&) {
+static v8::Handle<v8::Integer> PrePropertyHandlerQuery(Local<String> key,
+ const AccessorInfo&) {
if (v8_str("pre")->Equals(key)) {
- return v8::True();
+ return v8::Integer::New(v8::None);
}
- return v8::Handle<v8::Boolean>(); // do not intercept the call
+ return v8::Handle<v8::Integer>(); // do not intercept the call
}
@@ -1236,7 +1304,7 @@ THREADED_TEST(PrePropertyHandler) {
v8::Handle<v8::FunctionTemplate> desc = v8::FunctionTemplate::New();
desc->InstanceTemplate()->SetNamedPropertyHandler(PrePropertyHandlerGet,
0,
- PrePropertyHandlerHas);
+ PrePropertyHandlerQuery);
LocalContext env(NULL, desc->InstanceTemplate());
Script::Compile(v8_str(
"var pre = 'Object: pre'; var on = 'Object: on';"))->Run();
@@ -6245,12 +6313,25 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
templ->SetAccessor(v8_str("y"), Return239);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
+
+ // Check the case when receiver and interceptor's holder
+ // are the same objects.
v8::Handle<Value> value = CompileRun(
"var result = 0;"
"for (var i = 0; i < 7; i++) {"
" result = o.y;"
"}");
CHECK_EQ(239, value->Int32Value());
+
+ // Check the case when interceptor's holder is in proto chain
+ // of receiver.
+ value = CompileRun(
+ "r = { __proto__: o };"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result = r.y;"
+ "}");
+ CHECK_EQ(239, value->Int32Value());
}
@@ -6265,6 +6346,8 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
context->Global()->Set(v8_str("p"), templ_p->NewInstance());
+ // Check the case when receiver and interceptor's holder
+ // are the same objects.
v8::Handle<Value> value = CompileRun(
"o.__proto__ = p;"
"var result = 0;"
@@ -6272,6 +6355,16 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
" result = o.x + o.y;"
"}");
CHECK_EQ(239 + 42, value->Int32Value());
+
+ // Check the case when interceptor's holder is in proto chain
+ // of receiver.
+ value = CompileRun(
+ "r = { __proto__: o };"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result = r.x + r.y;"
+ "}");
+ CHECK_EQ(239 + 42, value->Int32Value());
}
@@ -7051,6 +7144,163 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss2) {
}
+v8::Handle<Value> keyed_call_ic_function;
+
+static v8::Handle<Value> InterceptorKeyedCallICGetter(
+ Local<String> name, const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ if (v8_str("x")->Equals(name)) {
+ return keyed_call_ic_function;
+ }
+ return v8::Handle<Value>();
+}
+
+
+// Test the case when we stored cacheable lookup into
+// a stub, but the function name changed (to another cacheable function).
+THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetNamedPropertyHandler(NoBlockGetterX);
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "proto = new Object();"
+ "proto.y = function(x) { return x + 1; };"
+ "proto.z = function(x) { return x - 1; };"
+ "o.__proto__ = proto;"
+ "var result = 0;"
+ "var method = 'y';"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) { method = 'z'; };"
+ " result += o[method](41);"
+ "}");
+ CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+
+// Test the case when we stored cacheable lookup into
+// a stub, but the function name changed (and the new function is present
+// both before and after the interceptor in the prototype chain).
+THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetNamedPropertyHandler(InterceptorKeyedCallICGetter);
+ LocalContext context;
+ context->Global()->Set(v8_str("proto1"), templ->NewInstance());
+ keyed_call_ic_function =
+ v8_compile("function f(x) { return x - 1; }; f")->Run();
+ v8::Handle<Value> value = CompileRun(
+ "o = new Object();"
+ "proto2 = new Object();"
+ "o.y = function(x) { return x + 1; };"
+ "proto2.y = function(x) { return x + 2; };"
+ "o.__proto__ = proto1;"
+ "proto1.__proto__ = proto2;"
+ "var result = 0;"
+ "var method = 'x';"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) { method = 'y'; };"
+ " result += o[method](41);"
+ "}");
+ CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+
+// Same as InterceptorKeyedCallICKeyChange1 only the cacheable function sit
+// on the global object.
+THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetNamedPropertyHandler(NoBlockGetterX);
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "function inc(x) { return x + 1; };"
+ "inc(1);"
+ "function dec(x) { return x - 1; };"
+ "dec(1);"
+ "o.__proto__ = this;"
+ "this.__proto__.x = inc;"
+ "this.__proto__.y = dec;"
+ "var result = 0;"
+ "var method = 'x';"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) { method = 'y'; };"
+ " result += o[method](41);"
+ "}");
+ CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+
+// Test the case when actual function to call sits on global object.
+THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ templ_o->SetNamedPropertyHandler(NoBlockGetterX);
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+
+ v8::Handle<Value> value = CompileRun(
+ "function len(x) { return x.length; };"
+ "o.__proto__ = this;"
+ "var m = 'parseFloat';"
+ "var result = 0;"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) {"
+ " m = 'len';"
+ " saved_result = result;"
+ " };"
+ " result = o[m]('239');"
+ "}");
+ CHECK_EQ(3, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(239, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+}
+
+// Test the map transition before the interceptor.
+THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ templ_o->SetNamedPropertyHandler(NoBlockGetterX);
+ LocalContext context;
+ context->Global()->Set(v8_str("proto"), templ_o->NewInstance());
+
+ v8::Handle<Value> value = CompileRun(
+ "var o = new Object();"
+ "o.__proto__ = proto;"
+ "o.method = function(x) { return x + 1; };"
+ "var m = 'method';"
+ "var result = 0;"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) { o.method = function(x) { return x - 1; }; };"
+ " result += o[m](41);"
+ "}");
+ CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+
+// Test the map transition after the interceptor.
+THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ templ_o->SetNamedPropertyHandler(NoBlockGetterX);
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+
+ v8::Handle<Value> value = CompileRun(
+ "var proto = new Object();"
+ "o.__proto__ = proto;"
+ "proto.method = function(x) { return x + 1; };"
+ "var m = 'method';"
+ "var result = 0;"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) { proto.method = function(x) { return x - 1; }; };"
+ " result += o[m](41);"
+ "}");
+ CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+
static int interceptor_call_count = 0;
static v8::Handle<Value> InterceptorICRefErrorGetter(Local<String> name,
@@ -7203,6 +7453,18 @@ THREADED_TEST(NullIndexedInterceptor) {
}
+THREADED_TEST(NamedPropertyHandlerGetterAttributes) {
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ templ->InstanceTemplate()->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("obj"),
+ templ->GetFunction()->NewInstance());
+ ExpectTrue("obj.x === 42");
+ ExpectTrue("!obj.propertyIsEnumerable('x')");
+}
+
+
static v8::Handle<Value> ParentGetter(Local<String> name,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
@@ -8236,6 +8498,30 @@ TEST(PreCompileDeserializationError) {
}
+// Verifies that the Handle<String> and const char* versions of the API produce
+// the same results (at least for one trivial case).
+TEST(PreCompileAPIVariationsAreSame) {
+ v8::V8::Initialize();
+ v8::HandleScope scope;
+
+ const char* cstring = "function foo(a) { return a+1; }";
+ v8::ScriptData* sd_from_cstring =
+ v8::ScriptData::PreCompile(cstring, i::StrLength(cstring));
+
+ TestAsciiResource* resource = new TestAsciiResource(cstring);
+ v8::ScriptData* sd_from_istring = v8::ScriptData::PreCompile(
+ v8::String::NewExternal(resource));
+
+ CHECK_EQ(sd_from_cstring->Length(), sd_from_istring->Length());
+ CHECK_EQ(0, memcmp(sd_from_cstring->Data(),
+ sd_from_istring->Data(),
+ sd_from_cstring->Length()));
+
+ delete sd_from_cstring;
+ delete sd_from_istring;
+}
+
+
// This tests that we do not allow dictionary load/call inline caches
// to use functions that have not yet been compiled. The potential
// problem of loading a function that has not yet been compiled can
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 7f3404cf..3058c6f8 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -280,4 +280,40 @@ TEST(4) {
}
}
+
+TEST(5) {
+ // Test the ARMv7 bitfield instructions.
+ InitializeVM();
+ v8::HandleScope scope;
+
+ Assembler assm(NULL, 0);
+
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatures::Scope scope(ARMv7);
+ // On entry, r0 = 0xAAAAAAAA = 0b10..10101010.
+ __ ubfx(r0, r0, 1, 12); // 0b00..010101010101 = 0x555
+ __ sbfx(r0, r0, 0, 5); // 0b11..111111110101 = -11
+ __ bfc(r0, 1, 3); // 0b11..111111110001 = -15
+ __ mov(r1, Operand(7));
+ __ bfi(r0, r1, 3, 3); // 0b11..111111111001 = -7
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(desc,
+ NULL,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(Heap::undefined_value()));
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0xAAAAAAAA, 0, 0, 0, 0));
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(-7, res);
+ }
+}
+
#undef __
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index f587fc8a..0e6f09d2 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -16,6 +16,7 @@ using i::CpuProfilesCollection;
using i::ProfileGenerator;
using i::ProfileNode;
using i::ProfilerEventsProcessor;
+using i::TokenEnumerator;
TEST(StartStop) {
@@ -115,7 +116,7 @@ TEST(CodeEvents) {
processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10);
processor.CodeDeleteEvent(ToAddress(0x1600));
processor.FunctionCreateEvent(ToAddress(0x1700), ToAddress(0x1000),
- CodeEntry::kNoSecurityToken);
+ TokenEnumerator::kNoSecurityToken);
// Enqueue a tick event to enable code events processing.
EnqueueTickSampleEvent(&processor, ToAddress(0x1000));
@@ -178,7 +179,7 @@ TEST(TickEvents) {
processor.Stop();
processor.Join();
CpuProfile* profile =
- profiles.StopProfiling(CodeEntry::kNoSecurityToken, "", 1);
+ profiles.StopProfiling(TokenEnumerator::kNoSecurityToken, "", 1);
CHECK_NE(NULL, profile);
// Check call trees.
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 4b4c9504..4c3ff5e3 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -27,6 +27,8 @@
#include <stdlib.h>
+#define USE_NEW_QUERY_CALLBACKS
+
#include "v8.h"
#include "api.h"
@@ -395,8 +397,9 @@ Handle<FixedArray> GetDebuggedFunctions() {
static Handle<Code> ComputeCallDebugBreak(int argc) {
- CALL_HEAP_FUNCTION(v8::internal::StubCache::ComputeCallDebugBreak(argc),
- Code);
+ CALL_HEAP_FUNCTION(
+ v8::internal::StubCache::ComputeCallDebugBreak(argc, Code::CALL_IC),
+ Code);
}
@@ -1228,6 +1231,11 @@ TEST(GCDuringBreakPointProcessing) {
SetBreakPoint(foo, 0);
CallWithBreakPoints(env->Global(), foo, 1, 25);
+ // Test debug break slot break point with garbage collection.
+ foo = CompileFunction(&env, "function foo(){var a;}", "foo");
+ SetBreakPoint(foo, 0);
+ CallWithBreakPoints(env->Global(), foo, 1, 25);
+
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
}
@@ -1657,7 +1665,7 @@ TEST(ConditionalScriptBreakPoint) {
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
- ChangeScriptBreakPointConditionFromJS(sbp1, "a % 2 == 0");
+ ChangeScriptBreakPointConditionFromJS(sbp1, "x % 2 == 0");
break_point_hit_count = 0;
for (int i = 0; i < 10; i++) {
f->Call(env->Global(), 0, NULL);
@@ -2141,17 +2149,19 @@ TEST(DebugEvaluate) {
v8::Local<v8::Function> foo = CompileFunction(&env,
"function foo(x) {"
" var a;"
- " y=0; /* To ensure break location.*/"
+ " y=0;" // To ensure break location 1.
" a=x;"
+ " y=0;" // To ensure break location 2.
"}",
"foo");
- const int foo_break_position = 15;
+ const int foo_break_position_1 = 15;
+ const int foo_break_position_2 = 29;
// Arguments with one parameter "Hello, world!"
v8::Handle<v8::Value> argv_foo[1] = { v8::String::New("Hello, world!") };
// Call foo with breakpoint set before a=x and undefined as parameter.
- int bp = SetBreakPoint(foo, foo_break_position);
+ int bp = SetBreakPoint(foo, foo_break_position_1);
checks = checks_uu;
foo->Call(env->Global(), 0, NULL);
@@ -2161,7 +2171,7 @@ TEST(DebugEvaluate) {
// Call foo with breakpoint set after a=x and parameter "Hello, world!".
ClearBreakPoint(bp);
- SetBreakPoint(foo, foo_break_position + 1);
+ SetBreakPoint(foo, foo_break_position_2);
checks = checks_hh;
foo->Call(env->Global(), 1, argv_foo);
@@ -2423,6 +2433,9 @@ TEST(DebugStepKeyedLoadLoop) {
v8::HandleScope scope;
DebugLocalContext env;
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
// Create a function for testing stepping of keyed load. The statement 'y=1'
// is there to have more than one breakable statement in the loop, TODO(315).
v8::Local<v8::Function> foo = CompileFunction(
@@ -2448,9 +2461,6 @@ TEST(DebugStepKeyedLoadLoop) {
v8::Handle<v8::Value> args[kArgc] = { a };
foo->Call(env->Global(), kArgc, args);
- // Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
-
// Setup break point and step through the function.
SetBreakPoint(foo, 3);
step_action = StepNext;
@@ -2458,7 +2468,7 @@ TEST(DebugStepKeyedLoadLoop) {
foo->Call(env->Global(), kArgc, args);
// With stepping all break locations are hit.
- CHECK_EQ(22, break_point_hit_count);
+ CHECK_EQ(33, break_point_hit_count);
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
@@ -2470,6 +2480,9 @@ TEST(DebugStepKeyedStoreLoop) {
v8::HandleScope scope;
DebugLocalContext env;
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
// Create a function for testing stepping of keyed store. The statement 'y=1'
// is there to have more than one breakable statement in the loop, TODO(315).
v8::Local<v8::Function> foo = CompileFunction(
@@ -2494,9 +2507,6 @@ TEST(DebugStepKeyedStoreLoop) {
v8::Handle<v8::Value> args[kArgc] = { a };
foo->Call(env->Global(), kArgc, args);
- // Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
-
// Setup break point and step through the function.
SetBreakPoint(foo, 3);
step_action = StepNext;
@@ -2504,7 +2514,7 @@ TEST(DebugStepKeyedStoreLoop) {
foo->Call(env->Global(), kArgc, args);
// With stepping all break locations are hit.
- CHECK_EQ(22, break_point_hit_count);
+ CHECK_EQ(32, break_point_hit_count);
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
@@ -2516,6 +2526,9 @@ TEST(DebugStepNamedLoadLoop) {
v8::HandleScope scope;
DebugLocalContext env;
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
// Create a function for testing stepping of named load.
v8::Local<v8::Function> foo = CompileFunction(
&env,
@@ -2538,9 +2551,6 @@ TEST(DebugStepNamedLoadLoop) {
// Call function without any break points to ensure inlining is in place.
foo->Call(env->Global(), 0, NULL);
- // Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
-
// Setup break point and step through the function.
SetBreakPoint(foo, 4);
step_action = StepNext;
@@ -2548,7 +2558,7 @@ TEST(DebugStepNamedLoadLoop) {
foo->Call(env->Global(), 0, NULL);
// With stepping all break locations are hit.
- CHECK_EQ(41, break_point_hit_count);
+ CHECK_EQ(53, break_point_hit_count);
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
@@ -2560,6 +2570,9 @@ TEST(DebugStepLinearMixedICs) {
v8::HandleScope scope;
DebugLocalContext env;
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
// Create a function for testing stepping.
v8::Local<v8::Function> foo = CompileFunction(&env,
"function bar() {};"
@@ -2570,15 +2583,12 @@ TEST(DebugStepLinearMixedICs) {
" a=1;b=2;x=a;y[index]=3;x=y[index];bar();}", "foo");
SetBreakPoint(foo, 0);
- // Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
-
step_action = StepIn;
break_point_hit_count = 0;
foo->Call(env->Global(), 0, NULL);
// With stepping all break locations are hit.
- CHECK_EQ(8, break_point_hit_count);
+ CHECK_EQ(11, break_point_hit_count);
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
@@ -2598,6 +2608,66 @@ TEST(DebugStepLinearMixedICs) {
}
+TEST(DebugStepDeclarations) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
+ // Create a function for testing stepping.
+ const char* src = "function foo() { "
+ " var a;"
+ " var b = 1;"
+ " var c = foo;"
+ " var d = Math.floor;"
+ " var e = b + d(1.2);"
+ "}";
+ v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+ SetBreakPoint(foo, 0);
+
+ // Stepping through the declarations.
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(6, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+TEST(DebugStepLocals) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
+ // Create a function for testing stepping.
+ const char* src = "function foo() { "
+ " var a,b;"
+ " a = 1;"
+ " b = a + 2;"
+ " b = 1 + 2 + 3;"
+ " a = Math.floor(b);"
+ "}";
+ v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+ SetBreakPoint(foo, 0);
+
+ // Stepping through the declarations.
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(6, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
TEST(DebugStepIf) {
v8::HandleScope scope;
DebugLocalContext env;
@@ -2624,14 +2694,14 @@ TEST(DebugStepIf) {
break_point_hit_count = 0;
v8::Handle<v8::Value> argv_true[argc] = { v8::True() };
foo->Call(env->Global(), argc, argv_true);
- CHECK_EQ(3, break_point_hit_count);
+ CHECK_EQ(4, break_point_hit_count);
// Stepping through the false part.
step_action = StepIn;
break_point_hit_count = 0;
v8::Handle<v8::Value> argv_false[argc] = { v8::False() };
foo->Call(env->Global(), argc, argv_false);
- CHECK_EQ(4, break_point_hit_count);
+ CHECK_EQ(5, break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
@@ -2659,6 +2729,7 @@ TEST(DebugStepSwitch) {
" case 3:"
" d = 1;"
" e = 1;"
+ " f = 1;"
" break;"
" }"
"}";
@@ -2670,21 +2741,97 @@ TEST(DebugStepSwitch) {
break_point_hit_count = 0;
v8::Handle<v8::Value> argv_1[argc] = { v8::Number::New(1) };
foo->Call(env->Global(), argc, argv_1);
- CHECK_EQ(4, break_point_hit_count);
+ CHECK_EQ(6, break_point_hit_count);
// Another case.
step_action = StepIn;
break_point_hit_count = 0;
v8::Handle<v8::Value> argv_2[argc] = { v8::Number::New(2) };
foo->Call(env->Global(), argc, argv_2);
- CHECK_EQ(3, break_point_hit_count);
+ CHECK_EQ(5, break_point_hit_count);
// Last case.
step_action = StepIn;
break_point_hit_count = 0;
v8::Handle<v8::Value> argv_3[argc] = { v8::Number::New(3) };
foo->Call(env->Global(), argc, argv_3);
- CHECK_EQ(4, break_point_hit_count);
+ CHECK_EQ(7, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+TEST(DebugStepWhile) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
+ // Create a function for testing stepping.
+ const int argc = 1;
+ const char* src = "function foo(x) { "
+ " var a = 0;"
+ " while (a < x) {"
+ " a++;"
+ " }"
+ "}";
+ v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+ SetBreakPoint(foo, 8); // "var a = 0;"
+
+ // Looping 10 times.
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
+ foo->Call(env->Global(), argc, argv_10);
+ CHECK_EQ(23, break_point_hit_count);
+
+ // Looping 100 times.
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
+ foo->Call(env->Global(), argc, argv_100);
+ CHECK_EQ(203, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+TEST(DebugStepDoWhile) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
+ // Create a function for testing stepping.
+ const int argc = 1;
+ const char* src = "function foo(x) { "
+ " var a = 0;"
+ " do {"
+ " a++;"
+ " } while (a < x)"
+ "}";
+ v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+ SetBreakPoint(foo, 8); // "var a = 0;"
+
+ // Looping 10 times.
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
+ foo->Call(env->Global(), argc, argv_10);
+ CHECK_EQ(22, break_point_hit_count);
+
+ // Looping 100 times.
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
+ foo->Call(env->Global(), argc, argv_100);
+ CHECK_EQ(202, break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
@@ -2730,6 +2877,210 @@ TEST(DebugStepFor) {
}
+TEST(DebugStepForContinue) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
+ // Create a function for testing stepping.
+ const int argc = 1;
+ const char* src = "function foo(x) { "
+ " var a = 0;"
+ " var b = 0;"
+ " var c = 0;"
+ " for (var i = 0; i < x; i++) {"
+ " a++;"
+ " if (a % 2 == 0) continue;"
+ " b++;"
+ " c++;"
+ " }"
+ " return b;"
+ "}";
+ v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+ v8::Handle<v8::Value> result;
+ SetBreakPoint(foo, 8); // "var a = 0;"
+
+ // Each loop generates 4 or 5 steps depending on whether a is equal.
+
+ // Looping 10 times.
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
+ result = foo->Call(env->Global(), argc, argv_10);
+ CHECK_EQ(5, result->Int32Value());
+ CHECK_EQ(50, break_point_hit_count);
+
+ // Looping 100 times.
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
+ result = foo->Call(env->Global(), argc, argv_100);
+ CHECK_EQ(50, result->Int32Value());
+ CHECK_EQ(455, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+TEST(DebugStepForBreak) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
+ // Create a function for testing stepping.
+ const int argc = 1;
+ const char* src = "function foo(x) { "
+ " var a = 0;"
+ " var b = 0;"
+ " var c = 0;"
+ " for (var i = 0; i < 1000; i++) {"
+ " a++;"
+ " if (a == x) break;"
+ " b++;"
+ " c++;"
+ " }"
+ " return b;"
+ "}";
+ v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+ v8::Handle<v8::Value> result;
+ SetBreakPoint(foo, 8); // "var a = 0;"
+
+ // Each loop generates 5 steps except for the last (when break is executed)
+ // which only generates 4.
+
+ // Looping 10 times.
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
+ result = foo->Call(env->Global(), argc, argv_10);
+ CHECK_EQ(9, result->Int32Value());
+ CHECK_EQ(53, break_point_hit_count);
+
+ // Looping 100 times.
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
+ result = foo->Call(env->Global(), argc, argv_100);
+ CHECK_EQ(99, result->Int32Value());
+ CHECK_EQ(503, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+TEST(DebugStepForIn) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
+ v8::Local<v8::Function> foo;
+ const char* src_1 = "function foo() { "
+ " var a = [1, 2];"
+ " for (x in a) {"
+ " b = 0;"
+ " }"
+ "}";
+ foo = CompileFunction(&env, src_1, "foo");
+ SetBreakPoint(foo, 0); // "var a = ..."
+
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(6, break_point_hit_count);
+
+ const char* src_2 = "function foo() { "
+ " var a = {a:[1, 2, 3]};"
+ " for (x in a.a) {"
+ " b = 0;"
+ " }"
+ "}";
+ foo = CompileFunction(&env, src_2, "foo");
+ SetBreakPoint(foo, 0); // "var a = ..."
+
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(8, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+TEST(DebugStepWith) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
+ // Create a function for testing stepping.
+ const char* src = "function foo(x) { "
+ " var a = {};"
+ " with (a) {}"
+ " with (b) {}"
+ "}";
+ env->Global()->Set(v8::String::New("b"), v8::Object::New());
+ v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+ v8::Handle<v8::Value> result;
+ SetBreakPoint(foo, 8); // "var a = {};"
+
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(4, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+TEST(DebugConditional) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
+ // Create a function for testing stepping.
+ const char* src = "function foo(x) { "
+ " var a;"
+ " a = x ? 1 : 2;"
+ " return a;"
+ "}";
+ v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+ SetBreakPoint(foo, 0); // "var a;"
+
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(5, break_point_hit_count);
+
+ step_action = StepIn;
+ break_point_hit_count = 0;
+ const int argc = 1;
+ v8::Handle<v8::Value> argv_true[argc] = { v8::True() };
+ foo->Call(env->Global(), argc, argv_true);
+ CHECK_EQ(5, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
TEST(StepInOutSimple) {
v8::HandleScope scope;
DebugLocalContext env;
@@ -2851,7 +3202,7 @@ TEST(StepInOutBranch) {
// Step through invocation of a.
step_action = StepIn;
break_point_hit_count = 0;
- expected_step_sequence = "abaca";
+ expected_step_sequence = "abbaca";
a->Call(env->Global(), 0, NULL);
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -2920,7 +3271,7 @@ TEST(DebugStepFunctionApply) {
foo->Call(env->Global(), 0, NULL);
// With stepping all break locations are hit.
- CHECK_EQ(6, break_point_hit_count);
+ CHECK_EQ(7, break_point_hit_count);
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
@@ -2964,14 +3315,14 @@ TEST(DebugStepFunctionCall) {
// Check stepping where the if condition in bar is false.
break_point_hit_count = 0;
foo->Call(env->Global(), 0, NULL);
- CHECK_EQ(4, break_point_hit_count);
+ CHECK_EQ(6, break_point_hit_count);
// Check stepping where the if condition in bar is true.
break_point_hit_count = 0;
const int argc = 1;
v8::Handle<v8::Value> argv[argc] = { v8::True() };
foo->Call(env->Global(), argc, argv);
- CHECK_EQ(6, break_point_hit_count);
+ CHECK_EQ(8, break_point_hit_count);
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
@@ -3264,14 +3615,13 @@ TEST(StepWithException) {
b->Call(env->Global(), 0, NULL);
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
-
// Step through invocation of d + e.
v8::Local<v8::Function> d = CompileFunction(&env, src, "d");
SetBreakPoint(d, 0);
ChangeBreakOnException(false, true);
step_action = StepIn;
break_point_hit_count = 0;
- expected_step_sequence = "dded";
+ expected_step_sequence = "ddedd";
d->Call(env->Global(), 0, NULL);
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3280,7 +3630,7 @@ TEST(StepWithException) {
ChangeBreakOnException(true, true);
step_action = StepIn;
break_point_hit_count = 0;
- expected_step_sequence = "ddeed";
+ expected_step_sequence = "ddeedd";
d->Call(env->Global(), 0, NULL);
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3291,7 +3641,7 @@ TEST(StepWithException) {
ChangeBreakOnException(false, true);
step_action = StepIn;
break_point_hit_count = 0;
- expected_step_sequence = "ffghf";
+ expected_step_sequence = "ffghhff";
f->Call(env->Global(), 0, NULL);
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3300,7 +3650,7 @@ TEST(StepWithException) {
ChangeBreakOnException(true, true);
step_action = StepIn;
break_point_hit_count = 0;
- expected_step_sequence = "ffghhf";
+ expected_step_sequence = "ffghhhff";
f->Call(env->Global(), 0, NULL);
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index f083027d..c4be35ee 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -27,6 +27,8 @@
#include <stdlib.h>
+#define USE_NEW_QUERY_CALLBACKS
+
#include "v8.h"
#include "heap.h"
@@ -63,12 +65,12 @@ class DeclarationContext {
int get_count() const { return get_count_; }
int set_count() const { return set_count_; }
- int has_count() const { return has_count_; }
+ int query_count() const { return query_count_; }
protected:
virtual v8::Handle<Value> Get(Local<String> key);
virtual v8::Handle<Value> Set(Local<String> key, Local<Value> value);
- virtual v8::Handle<Boolean> Has(Local<String> key);
+ virtual v8::Handle<Integer> Query(Local<String> key);
void InitializeIfNeeded();
@@ -85,8 +87,8 @@ class DeclarationContext {
static v8::Handle<Value> HandleSet(Local<String> key,
Local<Value> value,
const AccessorInfo& info);
- static v8::Handle<Boolean> HandleHas(Local<String> key,
- const AccessorInfo& info);
+ static v8::Handle<Integer> HandleQuery(Local<String> key,
+ const AccessorInfo& info);
private:
bool is_initialized_;
@@ -95,14 +97,14 @@ class DeclarationContext {
int get_count_;
int set_count_;
- int has_count_;
+ int query_count_;
static DeclarationContext* GetInstance(const AccessorInfo& info);
};
DeclarationContext::DeclarationContext()
- : is_initialized_(false), get_count_(0), set_count_(0), has_count_(0) {
+ : is_initialized_(false), get_count_(0), set_count_(0), query_count_(0) {
// Do nothing.
}
@@ -114,7 +116,7 @@ void DeclarationContext::InitializeIfNeeded() {
Local<Value> data = External::New(this);
GetHolder(function)->SetNamedPropertyHandler(&HandleGet,
&HandleSet,
- &HandleHas,
+ &HandleQuery,
0, 0,
data);
context_ = Context::New(0, function->InstanceTemplate(), Local<Value>());
@@ -124,7 +126,7 @@ void DeclarationContext::InitializeIfNeeded() {
void DeclarationContext::Check(const char* source,
- int get, int set, int has,
+ int get, int set, int query,
Expectations expectations,
v8::Handle<Value> value) {
InitializeIfNeeded();
@@ -137,7 +139,7 @@ void DeclarationContext::Check(const char* source,
Local<Value> result = Script::Compile(String::New(source))->Run();
CHECK_EQ(get, get_count());
CHECK_EQ(set, set_count());
- CHECK_EQ(has, has_count());
+ CHECK_EQ(query, query_count());
if (expectations == EXPECT_RESULT) {
CHECK(!catcher.HasCaught());
if (!value.IsEmpty()) {
@@ -170,11 +172,11 @@ v8::Handle<Value> DeclarationContext::HandleSet(Local<String> key,
}
-v8::Handle<Boolean> DeclarationContext::HandleHas(Local<String> key,
- const AccessorInfo& info) {
+v8::Handle<Integer> DeclarationContext::HandleQuery(Local<String> key,
+ const AccessorInfo& info) {
DeclarationContext* context = GetInstance(info);
- context->has_count_++;
- return context->Has(key);
+ context->query_count_++;
+ return context->Query(key);
}
@@ -194,8 +196,8 @@ v8::Handle<Value> DeclarationContext::Set(Local<String> key,
}
-v8::Handle<Boolean> DeclarationContext::Has(Local<String> key) {
- return v8::Handle<Boolean>();
+v8::Handle<Integer> DeclarationContext::Query(Local<String> key) {
+ return v8::Handle<Integer>();
}
@@ -249,8 +251,8 @@ TEST(Unknown) {
class PresentPropertyContext: public DeclarationContext {
protected:
- virtual v8::Handle<Boolean> Has(Local<String> key) {
- return True();
+ virtual v8::Handle<Integer> Query(Local<String> key) {
+ return Integer::New(v8::None);
}
};
@@ -304,8 +306,8 @@ TEST(Present) {
class AbsentPropertyContext: public DeclarationContext {
protected:
- virtual v8::Handle<Boolean> Has(Local<String> key) {
- return False();
+ virtual v8::Handle<Integer> Query(Local<String> key) {
+ return v8::Handle<Integer>();
}
};
@@ -316,7 +318,7 @@ TEST(Absent) {
{ AbsentPropertyContext context;
context.Check("var x; x",
1, // access
- 2, // declaration + initialization
+ 1, // declaration
2, // declaration + initialization
EXPECT_RESULT, Undefined());
}
@@ -375,24 +377,24 @@ class AppearingPropertyContext: public DeclarationContext {
AppearingPropertyContext() : state_(DECLARE) { }
protected:
- virtual v8::Handle<Boolean> Has(Local<String> key) {
+ virtual v8::Handle<Integer> Query(Local<String> key) {
switch (state_) {
case DECLARE:
// Force declaration by returning that the
// property is absent.
state_ = INITIALIZE_IF_ASSIGN;
- return False();
+ return Handle<Integer>();
case INITIALIZE_IF_ASSIGN:
// Return that the property is present so we only get the
// setter called when initializing with a value.
state_ = UNKNOWN;
- return True();
+ return Integer::New(v8::None);
default:
CHECK(state_ == UNKNOWN);
break;
}
// Do the lookup in the object.
- return v8::Local<Boolean>();
+ return v8::Handle<Integer>();
}
private:
@@ -458,31 +460,31 @@ class ReappearingPropertyContext: public DeclarationContext {
ReappearingPropertyContext() : state_(DECLARE) { }
protected:
- virtual v8::Handle<Boolean> Has(Local<String> key) {
+ virtual v8::Handle<Integer> Query(Local<String> key) {
switch (state_) {
case DECLARE:
// Force the first declaration by returning that
// the property is absent.
state_ = DONT_DECLARE;
- return False();
+ return Handle<Integer>();
case DONT_DECLARE:
// Ignore the second declaration by returning
// that the property is already there.
state_ = INITIALIZE;
- return True();
+ return Integer::New(v8::None);
case INITIALIZE:
// Force an initialization by returning that
// the property is absent. This will make sure
// that the setter is called and it will not
// lead to redeclaration conflicts (yet).
state_ = UNKNOWN;
- return False();
+ return Handle<Integer>();
default:
CHECK(state_ == UNKNOWN);
break;
}
// Do the lookup in the object.
- return v8::Local<Boolean>();
+ return Handle<Integer>();
}
private:
@@ -506,9 +508,9 @@ TEST(Reappearing) {
class ExistsInPrototypeContext: public DeclarationContext {
protected:
- virtual v8::Handle<Boolean> Has(Local<String> key) {
+ virtual v8::Handle<Integer> Query(Local<String> key) {
// Let it seem that the property exists in the prototype object.
- return True();
+ return Integer::New(v8::None);
}
// Use the prototype as the holder for the interceptors.
@@ -568,9 +570,9 @@ TEST(ExistsInPrototype) {
class AbsentInPrototypeContext: public DeclarationContext {
protected:
- virtual v8::Handle<Boolean> Has(Local<String> key) {
+ virtual v8::Handle<Integer> Query(Local<String> key) {
// Let it seem that the property is absent in the prototype object.
- return False();
+ return Handle<Integer>();
}
// Use the prototype as the holder for the interceptors.
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 9b155255..3189e5e1 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -289,3 +289,49 @@ TEST(Type1) {
VERIFY_RUN();
}
+
+
+TEST(Type3) {
+ SETUP();
+
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ COMPARE(ubfx(r0, r1, 5, 10),
+ "e7e902d1 ubfx r0, r1, #5, #10");
+ COMPARE(ubfx(r1, r0, 5, 10),
+ "e7e912d0 ubfx r1, r0, #5, #10");
+ COMPARE(ubfx(r0, r1, 31, 1),
+ "e7e00fd1 ubfx r0, r1, #31, #1");
+ COMPARE(ubfx(r1, r0, 31, 1),
+ "e7e01fd0 ubfx r1, r0, #31, #1");
+
+ COMPARE(sbfx(r0, r1, 5, 10),
+ "e7a902d1 sbfx r0, r1, #5, #10");
+ COMPARE(sbfx(r1, r0, 5, 10),
+ "e7a912d0 sbfx r1, r0, #5, #10");
+ COMPARE(sbfx(r0, r1, 31, 1),
+ "e7a00fd1 sbfx r0, r1, #31, #1");
+ COMPARE(sbfx(r1, r0, 31, 1),
+ "e7a01fd0 sbfx r1, r0, #31, #1");
+
+ COMPARE(bfc(r0, 5, 10),
+ "e7ce029f bfc r0, #5, #10");
+ COMPARE(bfc(r1, 5, 10),
+ "e7ce129f bfc r1, #5, #10");
+ COMPARE(bfc(r0, 31, 1),
+ "e7df0f9f bfc r0, #31, #1");
+ COMPARE(bfc(r1, 31, 1),
+ "e7df1f9f bfc r1, #31, #1");
+
+ COMPARE(bfi(r0, r1, 5, 10),
+ "e7ce0291 bfi r0, r1, #5, #10");
+ COMPARE(bfi(r1, r0, 5, 10),
+ "e7ce1290 bfi r1, r0, #5, #10");
+ COMPARE(bfi(r0, r1, 31, 1),
+ "e7df0f91 bfi r0, r1, #31, #1");
+ COMPARE(bfi(r1, r0, 31, 1),
+ "e7df1f90 bfi r1, r0, #31, #1");
+ }
+
+ VERIFY_RUN();
+}
+
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index f94cd457..c8e01979 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -244,6 +244,9 @@ TEST(DisasmIa320) {
__ test(edx, Immediate(12345));
__ test(edx, Operand(ebx, ecx, times_8, 10000));
+ __ test(Operand(esi, edi, times_1, -20000000), Immediate(300000000));
+ __ test_b(edx, Operand(ecx, ebx, times_2, 1000));
+ __ test_b(Operand(eax, -20), 0x9A);
__ nop();
__ xor_(edx, 12345);
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 31060bf0..195fef49 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -177,7 +177,7 @@ TEST(HeapObjects) {
TEST(Tagging) {
InitializeVM();
int request = 24;
- CHECK_EQ(request, static_cast<int>(OBJECT_SIZE_ALIGN(request)));
+ CHECK_EQ(request, static_cast<int>(OBJECT_POINTER_ALIGN(request)));
CHECK(Smi::FromInt(42)->IsSmi());
CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure());
CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested());
@@ -666,14 +666,14 @@ TEST(JSArray) {
array->SetElementsLength(*length);
uint32_t int_length = 0;
- CHECK(Array::IndexFromObject(*length, &int_length));
+ CHECK(length->ToArrayIndex(&int_length));
CHECK_EQ(*length, array->length());
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
array->SetElement(int_length, *name);
uint32_t new_int_length = 0;
- CHECK(Array::IndexFromObject(array->length(), &new_int_length));
+ CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
CHECK_EQ(array->GetElement(int_length), *name);
CHECK_EQ(array->GetElement(0), *name);
@@ -830,7 +830,7 @@ TEST(LargeObjectSpaceContains) {
}
CHECK(bytes_to_page > FixedArray::kHeaderSize);
- int* flags_ptr = &Page::FromAddress(next_page)->flags;
+ intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags_;
Address flags_addr = reinterpret_cast<Address>(flags_ptr);
int bytes_to_allocate =
@@ -888,7 +888,7 @@ TEST(Regression39128) {
// The plan: create JSObject which references objects in new space.
// Then clone this object (forcing it to go into old space) and check
- // that only bits pertaining to the object are updated in remembered set.
+ // that region dirty marks are updated correctly.
// Step 1: prepare a map for the object. We add 1 inobject property to it.
Handle<JSFunction> object_ctor(Top::global_context()->object_function());
@@ -931,7 +931,7 @@ TEST(Regression39128) {
CHECK(!object->IsFailure());
CHECK(new_space->Contains(object));
JSObject* jsobject = JSObject::cast(object);
- CHECK_EQ(0, jsobject->elements()->length());
+ CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
CHECK_EQ(0, jsobject->properties()->length());
// Create a reference to object in new space in jsobject.
jsobject->FastPropertyAtPut(-1, array);
@@ -951,17 +951,50 @@ TEST(Regression39128) {
}
CHECK(Heap::old_pointer_space()->Contains(clone->address()));
- // Step 5: verify validity of remembered set.
+ // Step 5: verify validity of region dirty marks.
Address clone_addr = clone->address();
Page* page = Page::FromAddress(clone_addr);
- // Check that remembered set tracks a reference from inobject property 1.
- CHECK(page->IsRSetSet(clone_addr, object_size - kPointerSize));
- // Probe several addresses after the object.
- for (int i = 0; i < 7; i++) {
- int offset = object_size + i * kPointerSize;
- if (clone_addr + offset >= page->ObjectAreaEnd()) {
- break;
- }
- CHECK(!page->IsRSetSet(clone_addr, offset));
- }
+ // Check that region covering inobject property 1 is marked dirty.
+ CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
+}
+
+TEST(TestCodeFlushing) {
+ i::FLAG_allow_natives_syntax = true;
+ // If we do not flush code this test is invalid.
+ if (!FLAG_flush_code) return;
+ InitializeVM();
+ v8::HandleScope scope;
+ const char* source = "function foo() {"
+ " var x = 42;"
+ " var y = 42;"
+ " var z = x + y;"
+ "};"
+ "foo()";
+ Handle<String> foo_name = Factory::LookupAsciiSymbol("foo");
+
+ // This compile will add the code to the compilation cache.
+ CompileRun(source);
+
+ // Check function is compiled.
+ Object* func_value = Top::context()->global()->GetProperty(*foo_name);
+ CHECK(func_value->IsJSFunction());
+ Handle<JSFunction> function(JSFunction::cast(func_value));
+ CHECK(function->shared()->is_compiled());
+
+ Heap::CollectAllGarbage(true);
+ Heap::CollectAllGarbage(true);
+
+ // foo should still be in the compilation cache and therefore not
+ // have been removed.
+ CHECK(function->shared()->is_compiled());
+ Heap::CollectAllGarbage(true);
+ Heap::CollectAllGarbage(true);
+ Heap::CollectAllGarbage(true);
+ Heap::CollectAllGarbage(true);
+
+ // foo should no longer be in the compilation cache
+ CHECK(!function->shared()->is_compiled());
+ // Call foo to get it recompiled.
+ CompileRun("foo()");
+ CHECK(function->shared()->is_compiled());
}
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index b438d252..308f764a 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -7,12 +7,14 @@
#include "v8.h"
#include "profile-generator-inl.h"
#include "cctest.h"
+#include "../include/v8-profiler.h"
namespace i = v8::internal;
using i::CodeEntry;
using i::CodeMap;
using i::CpuProfile;
+using i::CpuProfiler;
using i::CpuProfilesCollection;
using i::ProfileNode;
using i::ProfileTree;
@@ -37,7 +39,7 @@ class TokenEnumeratorTester {
TEST(TokenEnumerator) {
TokenEnumerator te;
- CHECK_EQ(CodeEntry::kNoSecurityToken, te.GetTokenId(NULL));
+ CHECK_EQ(TokenEnumerator::kNoSecurityToken, te.GetTokenId(NULL));
v8::HandleScope hs;
v8::Local<v8::String> token1(v8::String::New("1"));
CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
@@ -63,20 +65,20 @@ TEST(TokenEnumerator) {
TEST(ProfileNodeFindOrAddChild) {
ProfileNode node(NULL, NULL);
- CodeEntry entry1(
- i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0,
+ TokenEnumerator::kNoSecurityToken);
ProfileNode* childNode1 = node.FindOrAddChild(&entry1);
CHECK_NE(NULL, childNode1);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
- CodeEntry entry2(
- i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0,
+ TokenEnumerator::kNoSecurityToken);
ProfileNode* childNode2 = node.FindOrAddChild(&entry2);
CHECK_NE(NULL, childNode2);
CHECK_NE(childNode1, childNode2);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
CHECK_EQ(childNode2, node.FindOrAddChild(&entry2));
- CodeEntry entry3(
- i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0,
+ TokenEnumerator::kNoSecurityToken);
ProfileNode* childNode3 = node.FindOrAddChild(&entry3);
CHECK_NE(NULL, childNode3);
CHECK_NE(childNode1, childNode3);
@@ -117,12 +119,12 @@ class ProfileTreeTestHelper {
} // namespace
TEST(ProfileTreeAddPathFromStart) {
- CodeEntry entry1(
- i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
- CodeEntry entry2(
- i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
- CodeEntry entry3(
- i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0,
+ TokenEnumerator::kNoSecurityToken);
+ CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0,
+ TokenEnumerator::kNoSecurityToken);
+ CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0,
+ TokenEnumerator::kNoSecurityToken);
ProfileTree tree;
ProfileTreeTestHelper helper(&tree);
CHECK_EQ(NULL, helper.Walk(&entry1));
@@ -187,12 +189,12 @@ TEST(ProfileTreeAddPathFromStart) {
TEST(ProfileTreeAddPathFromEnd) {
- CodeEntry entry1(
- i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
- CodeEntry entry2(
- i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
- CodeEntry entry3(
- i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0,
+ TokenEnumerator::kNoSecurityToken);
+ CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0,
+ TokenEnumerator::kNoSecurityToken);
+ CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0,
+ TokenEnumerator::kNoSecurityToken);
ProfileTree tree;
ProfileTreeTestHelper helper(&tree);
CHECK_EQ(NULL, helper.Walk(&entry1));
@@ -270,8 +272,8 @@ TEST(ProfileTreeCalculateTotalTicks) {
CHECK_EQ(1, empty_tree.root()->total_ticks());
CHECK_EQ(1, empty_tree.root()->self_ticks());
- CodeEntry entry1(
- i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0,
+ TokenEnumerator::kNoSecurityToken);
CodeEntry* e1_path[] = {&entry1};
Vector<CodeEntry*> e1_path_vec(
e1_path, sizeof(e1_path) / sizeof(e1_path[0]));
@@ -292,8 +294,8 @@ TEST(ProfileTreeCalculateTotalTicks) {
CHECK_EQ(1, node1->total_ticks());
CHECK_EQ(1, node1->self_ticks());
- CodeEntry entry2(
- i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0,
+ TokenEnumerator::kNoSecurityToken);
CodeEntry* e1_e2_path[] = {&entry1, &entry2};
Vector<CodeEntry*> e1_e2_path_vec(
e1_e2_path, sizeof(e1_e2_path) / sizeof(e1_e2_path[0]));
@@ -328,8 +330,8 @@ TEST(ProfileTreeCalculateTotalTicks) {
CodeEntry* e2_path[] = {&entry2};
Vector<CodeEntry*> e2_path_vec(
e2_path, sizeof(e2_path) / sizeof(e2_path[0]));
- CodeEntry entry3(
- i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0,
+ TokenEnumerator::kNoSecurityToken);
CodeEntry* e3_path[] = {&entry3};
Vector<CodeEntry*> e3_path_vec(
e3_path, sizeof(e3_path) / sizeof(e3_path[0]));
@@ -392,7 +394,7 @@ TEST(ProfileTreeFilteredClone) {
CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0, token0);
CodeEntry entry4(
i::Logger::FUNCTION_TAG, "", "ddd", "", 0,
- CodeEntry::kInheritsSecurityToken);
+ TokenEnumerator::kInheritsSecurityToken);
{
CodeEntry* e1_e2_path[] = {&entry1, &entry2};
@@ -489,14 +491,14 @@ static inline i::Address ToAddress(int n) {
TEST(CodeMapAddCode) {
CodeMap code_map;
- CodeEntry entry1(
- i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
- CodeEntry entry2(
- i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
- CodeEntry entry3(
- i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
- CodeEntry entry4(
- i::Logger::FUNCTION_TAG, "", "ddd", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0,
+ TokenEnumerator::kNoSecurityToken);
+ CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0,
+ TokenEnumerator::kNoSecurityToken);
+ CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0,
+ TokenEnumerator::kNoSecurityToken);
+ CodeEntry entry4(i::Logger::FUNCTION_TAG, "", "ddd", "", 0,
+ TokenEnumerator::kNoSecurityToken);
code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
code_map.AddCode(ToAddress(0x1900), &entry3, 0x50);
@@ -523,10 +525,10 @@ TEST(CodeMapAddCode) {
TEST(CodeMapMoveAndDeleteCode) {
CodeMap code_map;
- CodeEntry entry1(
- i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
- CodeEntry entry2(
- i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0,
+ TokenEnumerator::kNoSecurityToken);
+ CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0,
+ TokenEnumerator::kNoSecurityToken);
code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
@@ -599,7 +601,7 @@ TEST(RecordTickSample) {
generator.RecordTickSample(sample3);
CpuProfile* profile =
- profiles.StopProfiling(CodeEntry::kNoSecurityToken, "", 1);
+ profiles.StopProfiling(TokenEnumerator::kNoSecurityToken, "", 1);
CHECK_NE(NULL, profile);
ProfileTreeTestHelper top_down_test_helper(profile->top_down());
CHECK_EQ(NULL, top_down_test_helper.Walk(entry2));
@@ -668,4 +670,109 @@ TEST(SampleRateCalculator) {
CHECK_EQ(kSamplingIntervalMs * 0.66666, calc3.ticks_per_ms());
}
+
+// --- P r o f i l e r E x t e n s i o n ---
+
+class ProfilerExtension : public v8::Extension {
+ public:
+ ProfilerExtension() : v8::Extension("v8/profiler", kSource) { }
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> StartProfiling(const v8::Arguments& args);
+ static v8::Handle<v8::Value> StopProfiling(const v8::Arguments& args);
+ private:
+ static const char* kSource;
+};
+
+
+const char* ProfilerExtension::kSource =
+ "native function startProfiling();"
+ "native function stopProfiling();";
+
+v8::Handle<v8::FunctionTemplate> ProfilerExtension::GetNativeFunction(
+ v8::Handle<v8::String> name) {
+ if (name->Equals(v8::String::New("startProfiling"))) {
+ return v8::FunctionTemplate::New(ProfilerExtension::StartProfiling);
+ } else if (name->Equals(v8::String::New("stopProfiling"))) {
+ return v8::FunctionTemplate::New(ProfilerExtension::StopProfiling);
+ } else {
+ CHECK(false);
+ return v8::Handle<v8::FunctionTemplate>();
+ }
+}
+
+
+v8::Handle<v8::Value> ProfilerExtension::StartProfiling(
+ const v8::Arguments& args) {
+ if (args.Length() > 0)
+ v8::CpuProfiler::StartProfiling(args[0].As<v8::String>());
+ else
+ v8::CpuProfiler::StartProfiling(v8::String::New(""));
+ return v8::Undefined();
+}
+
+
+v8::Handle<v8::Value> ProfilerExtension::StopProfiling(
+ const v8::Arguments& args) {
+ if (args.Length() > 0)
+ v8::CpuProfiler::StopProfiling(args[0].As<v8::String>());
+ else
+ v8::CpuProfiler::StopProfiling(v8::String::New(""));
+ return v8::Undefined();
+}
+
+
+static ProfilerExtension kProfilerExtension;
+v8::DeclareExtension kProfilerExtensionDeclaration(&kProfilerExtension);
+static v8::Persistent<v8::Context> env;
+
+static const ProfileNode* PickChild(const ProfileNode* parent,
+ const char* name) {
+ for (int i = 0; i < parent->children()->length(); ++i) {
+ const ProfileNode* child = parent->children()->at(i);
+ if (strcmp(child->entry()->name(), name) == 0) return child;
+ }
+ return NULL;
+}
+
+
+TEST(RecordStackTraceAtStartProfiling) {
+ if (env.IsEmpty()) {
+ v8::HandleScope scope;
+ const char* extensions[] = { "v8/profiler" };
+ v8::ExtensionConfiguration config(1, extensions);
+ env = v8::Context::New(&config);
+ }
+ v8::HandleScope scope;
+ env->Enter();
+
+ CHECK_EQ(0, CpuProfiler::GetProfilesCount());
+ CompileRun(
+ "function c() { startProfiling(); }\n"
+ "function b() { c(); }\n"
+ "function a() { b(); }\n"
+ "a();\n"
+ "stopProfiling();");
+ CHECK_EQ(1, CpuProfiler::GetProfilesCount());
+ CpuProfile* profile =
+ CpuProfiler::GetProfile(NULL, 0);
+ const ProfileTree* topDown = profile->top_down();
+ const ProfileNode* current = topDown->root();
+ // The tree should look like this:
+ // (root)
+ // (anonymous function)
+ // a
+ // b
+ // c
+ current = PickChild(current, "(anonymous function)");
+ CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ current = PickChild(current, "a");
+ CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ current = PickChild(current, "b");
+ CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ current = PickChild(current, "c");
+ CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ CHECK_EQ(0, current->children()->length());
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc
index 3ee0e4e8..2811ee6c 100644
--- a/test/cctest/test-spaces.cc
+++ b/test/cctest/test-spaces.cc
@@ -32,40 +32,32 @@
using namespace v8::internal;
-static void VerifyRSet(Address page_start) {
-#ifdef DEBUG
- Page::set_rset_state(Page::IN_USE);
-#endif
-
+static void VerifyRegionMarking(Address page_start) {
Page* p = Page::FromAddress(page_start);
- p->ClearRSet();
+ p->SetRegionMarks(Page::kAllRegionsCleanMarks);
for (Address addr = p->ObjectAreaStart();
addr < p->ObjectAreaEnd();
addr += kPointerSize) {
- CHECK(!Page::IsRSetSet(addr, 0));
+ CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
}
for (Address addr = p->ObjectAreaStart();
addr < p->ObjectAreaEnd();
addr += kPointerSize) {
- Page::SetRSet(addr, 0);
+ Page::FromAddress(addr)->MarkRegionDirty(addr);
}
for (Address addr = p->ObjectAreaStart();
addr < p->ObjectAreaEnd();
addr += kPointerSize) {
- CHECK(Page::IsRSetSet(addr, 0));
+ CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
}
}
TEST(Page) {
-#ifdef DEBUG
- Page::set_rset_state(Page::NOT_IN_USE);
-#endif
-
byte* mem = NewArray<byte>(2*Page::kPageSize);
CHECK(mem != NULL);
@@ -90,8 +82,8 @@ TEST(Page) {
CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
- // test remember set
- VerifyRSet(page_start);
+ // test region marking
+ VerifyRegionMarking(page_start);
DeleteArray(mem);
}
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 0e30092d..3a9e4da2 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -433,3 +433,51 @@ TEST(ExternalShortStringAdd) {
CHECK_EQ(0,
v8::Script::Compile(v8::String::New(source))->Run()->Int32Value());
}
+
+
+TEST(CachedHashOverflow) {
+ // We incorrectly allowed strings to be tagged as array indices even if their
+ // values didn't fit in the hash field.
+ // See http://code.google.com/p/v8/issues/detail?id=728
+ ZoneScope zone(DELETE_ON_EXIT);
+
+ InitializeVM();
+ v8::HandleScope handle_scope;
+ // Lines must be executed sequentially. Combining them into one script
+ // makes the bug go away.
+ const char* lines[] = {
+ "var x = [];",
+ "x[4] = 42;",
+ "var s = \"1073741828\";",
+ "x[s];",
+ "x[s] = 37;",
+ "x[4];",
+ "x[s];",
+ NULL
+ };
+
+ Handle<Smi> fortytwo(Smi::FromInt(42));
+ Handle<Smi> thirtyseven(Smi::FromInt(37));
+ Handle<Object> results[] = {
+ Factory::undefined_value(),
+ fortytwo,
+ Factory::undefined_value(),
+ Factory::undefined_value(),
+ thirtyseven,
+ fortytwo,
+ thirtyseven // Bug yielded 42 here.
+ };
+
+ const char* line;
+ for (int i = 0; (line = lines[i]); i++) {
+ printf("%s\n", line);
+ v8::Local<v8::Value> result =
+ v8::Script::Compile(v8::String::New(line))->Run();
+ CHECK_EQ(results[i]->IsUndefined(), result->IsUndefined());
+ CHECK_EQ(results[i]->IsNumber(), result->IsNumber());
+ if (result->IsNumber()) {
+ CHECK_EQ(Smi::cast(results[i]->ToSmi())->value(),
+ result->ToInt32()->Value());
+ }
+ }
+}
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index 24b3c908..bcb185d2 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -79,3 +79,55 @@ TEST(SNPrintF) {
buffer.Dispose();
}
}
+
+
+void TestMemCopy(Vector<byte> src,
+ Vector<byte> dst,
+ int source_alignment,
+ int destination_alignment,
+ int length_alignment) {
+ memset(dst.start(), 0xFF, dst.length());
+ byte* to = dst.start() + 32 + destination_alignment;
+ byte* from = src.start() + source_alignment;
+ int length = kMinComplexMemCopy + length_alignment;
+ MemCopy(to, from, static_cast<size_t>(length));
+ printf("[%d,%d,%d]\n",
+ source_alignment, destination_alignment, length_alignment);
+ for (int i = 0; i < length; i++) {
+ CHECK_EQ(from[i], to[i]);
+ }
+ CHECK_EQ(0xFF, to[-1]);
+ CHECK_EQ(0xFF, to[length]);
+}
+
+
+
+TEST(MemCopy) {
+ const int N = kMinComplexMemCopy + 128;
+ Vector<byte> buffer1 = Vector<byte>::New(N);
+ Vector<byte> buffer2 = Vector<byte>::New(N);
+
+ for (int i = 0; i < N; i++) {
+ buffer1[i] = static_cast<byte>(i & 0x7F);
+ }
+
+ // Same alignment.
+ for (int i = 0; i < 32; i++) {
+ TestMemCopy(buffer1, buffer2, i, i, i * 2);
+ }
+
+ // Different alignment.
+ for (int i = 0; i < 32; i++) {
+ for (int j = 1; j < 32; j++) {
+ TestMemCopy(buffer1, buffer2, i, (i + j) & 0x1F , 0);
+ }
+ }
+
+ // Different lengths
+ for (int i = 0; i < 32; i++) {
+ TestMemCopy(buffer1, buffer2, 3, 7, i);
+ }
+
+ buffer2.Dispose();
+ buffer1.Dispose();
+}
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index 5225c327..9d9dc3cd 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -200,11 +200,6 @@ chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-34: FAIL_OK
# SUBSETFAIL
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-35: FAIL_OK
-# getOwnPropertyDescriptor not implemented on array indices
-chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-b-1: FAIL_OK
-
-
-
# We fail this because Object.keys returns numbers for element indices
# rather than strings.
@@ -260,9 +255,6 @@ chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-5-1: FAIL_OK
# Same as 15.4.4.16-7-7
chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-8-7: FAIL_OK
-# Uses a array index number as a property
-chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-8-c-iii-1: FAIL_OK
-
chapter15/15.5: UNIMPLEMENTED
chapter15/15.6: UNIMPLEMENTED
diff --git a/test/mjsunit/const-eval-init.js b/test/mjsunit/const-eval-init.js
index d3636de7..5bcd9175 100644
--- a/test/mjsunit/const-eval-init.js
+++ b/test/mjsunit/const-eval-init.js
@@ -91,7 +91,7 @@ function testInitSlowCaseExtension() {
var source = "";
// Introduce 100 properties on the context extension object to force
// it in slow case.
- for (var i = 0; i < 100; i++) source += ("var a" + i + " = i;");
+ for (var i = 0; i < 100; i++) source += ("var a" + i + " = " + i + ";");
source += "const x = 10; assertEquals(10, x); x = 11; assertEquals(10, x)";
eval(source);
}
diff --git a/test/mjsunit/debug-conditional-breakpoints.js b/test/mjsunit/debug-conditional-breakpoints.js
index 5859451b..bd4cdd15 100644
--- a/test/mjsunit/debug-conditional-breakpoints.js
+++ b/test/mjsunit/debug-conditional-breakpoints.js
@@ -45,7 +45,7 @@ Debug.setListener(listener);
count = 0;
function f() {};
function g() {h(count++)};
-function h(x) {var a=x;};
+function h(x) {var a=x; return a};
// Conditional breakpoint which syntax error.
@@ -136,7 +136,7 @@ Debug.clearBreakPoint(bp);
// Conditional breakpoint which checks a local variable.
break_point_hit_count = 0;
-bp = Debug.setBreakPoint(h, 0, 0, 'a % 2 == 0');
+bp = Debug.setBreakPoint(h, 0, 23, 'a % 2 == 0');
for (var i = 0; i < 10; i++) {
g();
}
@@ -146,8 +146,8 @@ Debug.clearBreakPoint(bp);
// Multiple conditional breakpoint which the same condition.
break_point_hit_count = 0;
-bp1 = Debug.setBreakPoint(h, 0, 0, 'a % 2 == 0');
-bp2 = Debug.setBreakPoint(h, 0, 0, 'a % 2 == 0');
+bp1 = Debug.setBreakPoint(h, 0, 23, 'a % 2 == 0');
+bp2 = Debug.setBreakPoint(h, 0, 23, 'a % 2 == 0');
for (var i = 0; i < 10; i++) {
g();
}
@@ -159,8 +159,8 @@ Debug.clearBreakPoint(bp2);
// Multiple conditional breakpoint which different conditions.
break_point_hit_count = 0;
-bp1 = Debug.setBreakPoint(h, 0, 0, 'a % 2 == 0');
-bp2 = Debug.setBreakPoint(h, 0, 0, '(a + 1) % 2 == 0');
+bp1 = Debug.setBreakPoint(h, 0, 23, 'a % 2 == 0');
+bp2 = Debug.setBreakPoint(h, 0, 23, '(a + 1) % 2 == 0');
for (var i = 0; i < 10; i++) {
g();
}
diff --git a/test/mjsunit/debug-return-value.js b/test/mjsunit/debug-return-value.js
new file mode 100644
index 00000000..a9ac5204
--- /dev/null
+++ b/test/mjsunit/debug-return-value.js
@@ -0,0 +1,163 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+
+function ParsedResponse(json) {
+ this.response_ = eval('(' + json + ')');
+ this.refs_ = [];
+ if (this.response_.refs) {
+ for (var i = 0; i < this.response_.refs.length; i++) {
+ this.refs_[this.response_.refs[i].handle] = this.response_.refs[i];
+ }
+ }
+}
+
+
+ParsedResponse.prototype.response = function() {
+ return this.response_;
+}
+
+
+ParsedResponse.prototype.body = function() {
+ return this.response_.body;
+}
+
+
+ParsedResponse.prototype.running = function() {
+ return this.response_.running;
+}
+
+
+ParsedResponse.prototype.lookup = function(handle) {
+ return this.refs_[handle];
+}
+
+
+listener_complete = false;
+exception = false;
+break_count = 0;
+expected_return_value = 0;
+debugger_source_position = 0;
+
+// Listener which expects to do four steps to reach returning from the function.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break)
+ {
+ break_count++;
+ if (break_count < 4) {
+ assertFalse(exec_state.frame(0).isAtReturn())
+ switch (break_count) {
+ case 1:
+ // Collect the position of the debugger statement.
+ debugger_source_position = exec_state.frame(0).sourcePosition();
+ break;
+ case 2:
+ // Position now at the if statement.
+ assertEquals(debugger_source_position + 10,
+ exec_state.frame(0).sourcePosition());
+ break;
+ case 3:
+ // Position now at either of the returns.
+ if (expected_return_value == 1) {
+ assertEquals(debugger_source_position + 19,
+ exec_state.frame(0).sourcePosition());
+ } else {
+ assertEquals(debugger_source_position + 38,
+ exec_state.frame(0).sourcePosition());
+ }
+ break;
+ default:
+ fail("Unexpected");
+ }
+ exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ } else {
+ // Position at the end of the function.
+ assertEquals(debugger_source_position + 51,
+ exec_state.frame(0).sourcePosition());
+
+ // Just about to return from the function.
+ assertTrue(exec_state.frame(0).isAtReturn())
+ assertEquals(expected_return_value,
+ exec_state.frame(0).returnValue().value());
+
+ // Check the same using the JSON commands.
+ var dcp = exec_state.debugCommandProcessor(false);
+ var request = '{"seq":0,"type":"request","command":"backtrace"}';
+ var resp = dcp.processDebugJSONRequest(request);
+ response = new ParsedResponse(resp);
+ frames = response.body().frames;
+ assertTrue(frames[0].atReturn);
+ assertEquals(expected_return_value,
+ response.lookup(frames[0].returnValue.ref).value);
+
+ listener_complete = true;
+ }
+ }
+ } catch (e) {
+ exception = e
+ };
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Four steps from the debugger statement in this function will position us at
+// the function return.
+// 0 1 2 3 4 5
+// 0123456789012345678901234567890123456789012345678901
+
+function f(x) {debugger; if (x) { return 1; } else { return 2; } };
+
+// Call f expecting different return values.
+break_count = 0;
+expected_return_value = 2;
+listener_complete = false;
+f();
+assertFalse(exception, "exception in listener")
+assertTrue(listener_complete);
+assertEquals(4, break_count);
+
+break_count = 0;
+expected_return_value = 1;
+listener_complete = false;
+f(true);
+assertFalse(exception, "exception in listener")
+assertTrue(listener_complete);
+assertEquals(4, break_count);
+
+break_count = 0;
+expected_return_value = 2;
+listener_complete = false;
+f(false);
+assertFalse(exception, "exception in listener")
+assertTrue(listener_complete);
+assertEquals(4, break_count);
diff --git a/test/mjsunit/debug-step.js b/test/mjsunit/debug-step.js
index a887514a..2233e36c 100644
--- a/test/mjsunit/debug-step.js
+++ b/test/mjsunit/debug-step.js
@@ -55,8 +55,9 @@ Debug.setListener(listener);
// Test debug event for break point.
function f() {
- for (i = 0; i < 1000; i++) { // Line 1.
- x = 1; // Line 2.
+ var i; // Line 1.
+ for (i = 0; i < 1000; i++) { // Line 2.
+ x = 1; // Line 3.
}
};
@@ -74,7 +75,7 @@ assertEquals(499, result);
// multiple steps have been requested.
state = 0;
result = -1;
-bp2 = Debug.setBreakPoint(f, 2);
+bp2 = Debug.setBreakPoint(f, 3);
f();
assertEquals(0, result);
diff --git a/test/mjsunit/delete.js b/test/mjsunit/delete.js
index 6fc15e92..8d4636af 100644
--- a/test/mjsunit/delete.js
+++ b/test/mjsunit/delete.js
@@ -44,16 +44,11 @@ assertEquals(42, x);
assertTrue(delete x);
assertTrue(typeof x === 'undefined', "x is gone");
-/****
- * This test relies on DontDelete attributes. This is not
- * working yet.
-
var y = 87; // should have DontDelete attribute
assertEquals(87, y);
assertFalse(delete y, "don't delete");
assertFalse(typeof y === 'undefined');
assertEquals(87, y);
-*/
var o = { x: 42, y: 87 };
assertTrue(has(o, 'x'));
@@ -161,3 +156,25 @@ assertFalse(has(a, 1), "delete 1");
assertFalse(has(a, Math.pow(2,30)-1), "delete 2^30-1");
assertFalse(has(a, Math.pow(2,31)-1), "delete 2^31-1");
assertEquals(Math.pow(2,31), a.length);
+
+// Check that a LoadIC for a dictionary field works, even
+// when the dictionary probe misses.
+function load_deleted_property_using_IC() {
+ var x = new Object();
+ x.a = 3;
+ x.b = 4;
+ x.c = 5;
+
+ delete x.c;
+ assertEquals(3, load_a(x));
+ assertEquals(3, load_a(x));
+ delete x.a;
+ assertTrue(typeof load_a(x) === 'undefined', "x.a is gone");
+ assertTrue(typeof load_a(x) === 'undefined', "x.a is gone");
+}
+
+function load_a(x) {
+ return x.a;
+}
+
+load_deleted_property_using_IC();
diff --git a/test/mjsunit/eval.js b/test/mjsunit/eval.js
index 95357c73..25cfcb62 100644
--- a/test/mjsunit/eval.js
+++ b/test/mjsunit/eval.js
@@ -50,7 +50,7 @@ global_eval = eval;
assertEquals(void 0, eval(eval("var eval = function f(x) { return 'hest';}")))
eval = global_eval;
-//Test eval with different number of parameters.
+// Test eval with different number of parameters.
global_eval = eval;
eval = function(x, y) { return x + y; };
assertEquals(4, eval(2, 2));
diff --git a/test/mjsunit/get-own-property-descriptor.js b/test/mjsunit/get-own-property-descriptor.js
index 79172c86..ceb77153 100644
--- a/test/mjsunit/get-own-property-descriptor.js
+++ b/test/mjsunit/get-own-property-descriptor.js
@@ -25,15 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-function get(){return x}
-function set(x){this.x=x};
+// This file only tests very simple descriptors that always have
+// configurable, enumerable, and writable set to true.
+// A range of more elaborate tests are performed in
+// object-define-property.js
-var obj = {x:1};
+function get() { return x; }
+function set(x) { this.x = x; }
+
+var obj = {x: 1};
obj.__defineGetter__("accessor", get);
obj.__defineSetter__("accessor", set);
+var a = new Array();
+a[1] = 42;
+obj[1] = 42;
-
-var descIsData = Object.getOwnPropertyDescriptor(obj,'x');
+var descIsData = Object.getOwnPropertyDescriptor(obj, 'x');
assertTrue(descIsData.enumerable);
assertTrue(descIsData.writable);
assertTrue(descIsData.configurable);
@@ -49,3 +56,50 @@ assertTrue(descIsNotData == undefined);
var descIsNotAccessor = Object.getOwnPropertyDescriptor(obj, 'not-accessor');
assertTrue(descIsNotAccessor == undefined);
+
+var descArray = Object.getOwnPropertyDescriptor(a, '1');
+assertTrue(descArray.enumerable);
+assertTrue(descArray.configurable);
+assertTrue(descArray.writable);
+assertEquals(descArray.value, 42);
+
+var descObjectElement = Object.getOwnPropertyDescriptor(obj, '1');
+assertTrue(descObjectElement.enumerable);
+assertTrue(descObjectElement.configurable);
+assertTrue(descObjectElement.writable);
+assertEquals(descObjectElement.value, 42);
+
+// String objects.
+var a = new String('foobar');
+for (var i = 0; i < a.length; i++) {
+ var descStringObject = Object.getOwnPropertyDescriptor(a, i);
+ assertFalse(descStringObject.enumerable);
+ assertFalse(descStringObject.configurable);
+ assertFalse(descStringObject.writable);
+ assertEquals(descStringObject.value, a.substring(i, i+1));
+}
+
+// Support for additional attributes on string objects.
+a.x = 42;
+a[10] = 'foo';
+var descStringProperty = Object.getOwnPropertyDescriptor(a, 'x');
+assertTrue(descStringProperty.enumerable);
+assertTrue(descStringProperty.configurable);
+assertTrue(descStringProperty.writable);
+assertEquals(descStringProperty.value, 42);
+
+var descStringElement = Object.getOwnPropertyDescriptor(a, '10');
+assertTrue(descStringElement.enumerable);
+assertTrue(descStringElement.configurable);
+assertTrue(descStringElement.writable);
+assertEquals(descStringElement.value, 'foo');
+
+// Test that elements in the prototype chain is not returned.
+var proto = {};
+proto[10] = 42;
+
+var objWithProto = new Array();
+objWithProto.prototype = proto;
+objWithProto[0] = 'bar';
+var descWithProto = Object.getOwnPropertyDescriptor(objWithProto, '10');
+assertEquals(undefined, descWithProto);
diff --git a/test/mjsunit/keyed-call-generic.js b/test/mjsunit/keyed-call-generic.js
new file mode 100644
index 00000000..0b49b3e8
--- /dev/null
+++ b/test/mjsunit/keyed-call-generic.js
@@ -0,0 +1,96 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// 'AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A test for keyed call ICs with a mix of smi and string keys.
+
+function testOne(receiver, key, result) {
+ for(var i = 0; i != 10; i++ ) {
+ assertEquals(result, receiver[key]());
+ }
+}
+
+function testMany(receiver, keys, results) {
+ for (var i = 0; i != 10; i++) {
+ for (var k = 0; k != keys.length; k++) {
+ assertEquals(results[k], receiver[keys[k]]());
+ }
+ }
+}
+
+var toStringNonSymbol = 'to';
+toStringNonSymbol += 'String';
+
+function TypeOfThis() { return typeof this; }
+
+Number.prototype.square = function() { return this * this; }
+Number.prototype.power4 = function() { return this.square().square(); }
+
+Number.prototype.type = TypeOfThis;
+String.prototype.type = TypeOfThis;
+Boolean.prototype.type = TypeOfThis;
+
+// Use a non-symbol key to force inline cache to generic case.
+testOne(0, toStringNonSymbol, '0');
+
+testOne(1, 'toString', '1');
+testOne('1', 'toString', '1');
+testOne(1.0, 'toString', '1');
+
+testOne(1, 'type', 'object');
+testOne(2.3, 'type', 'object');
+testOne('x', 'type', 'object');
+testOne(true, 'type', 'object');
+testOne(false, 'type', 'object');
+
+testOne(2, 'square', 4);
+testOne(2, 'power4', 16);
+
+function zero () { return 0; }
+function one () { return 1; }
+function two () { return 2; }
+
+var fixed_array = [zero, one, two];
+
+var dict_array = [ zero, one, two ];
+dict_array[100000] = 1;
+
+var fast_prop = { zero: zero, one: one, two: two };
+
+var normal_prop = { zero: zero, one: one, two: two };
+normal_prop.x = 0;
+delete normal_prop.x;
+
+var first3num = [0, 1, 2];
+var first3str = ['zero', 'one', 'two'];
+
+// Use a non-symbol key to force inline cache to generic case.
+testMany('123', [toStringNonSymbol, 'charAt', 'charCodeAt'], ['123', '1', 49]);
+
+testMany(fixed_array, first3num, first3num);
+testMany(dict_array, first3num, first3num);
+testMany(fast_prop, first3str, first3num);
+testMany(normal_prop, first3str, first3num);
diff --git a/test/mjsunit/keyed-call-ic.js b/test/mjsunit/keyed-call-ic.js
new file mode 100644
index 00000000..9d82965f
--- /dev/null
+++ b/test/mjsunit/keyed-call-ic.js
@@ -0,0 +1,205 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A test for keyed call ICs.
+
+var toStringName = 'toString';
+var global = this;
+
+function globalFunction1() {
+ return 'function1';
+}
+
+function globalFunction2() {
+ return 'function2';
+}
+
+assertEquals("[object global]", this[toStringName]());
+assertEquals("[object global]", global[toStringName]());
+
+function testGlobals() {
+ assertEquals("[object global]", this[toStringName]());
+ assertEquals("[object global]", global[toStringName]());
+}
+
+testGlobals();
+
+
+function F() {}
+
+F.prototype.one = function() {return 'one'; }
+F.prototype.two = function() {return 'two'; }
+F.prototype.three = function() {return 'three'; }
+
+var keys =
+ ['one', 'one', 'one', 'one', 'two', 'two', 'one', 'three', 'one', 'two'];
+
+function testKeyTransitions() {
+ var i, key, result, message;
+
+ var f = new F();
+
+ // Custom call generators
+ var array = [];
+ for (i = 0; i != 10; i++) {
+ key = (i < 8) ? 'push' : 'pop';
+ array[key](i);
+ }
+
+ assertEquals(6, array.length);
+ for (i = 0; i != array.length; i++) {
+ assertEquals(i, array[i]);
+ }
+
+ for (i = 0; i != 10; i++) {
+ key = (i < 3) ? 'pop' : 'push';
+ array[key](i);
+ }
+
+ assertEquals(10, array.length);
+ for (i = 0; i != array.length; i++) {
+ assertEquals(i, array[i]);
+ }
+
+ var string = 'ABCDEFGHIJ';
+ for (i = 0; i != 10; i++) {
+ key = ((i < 5) ? 'charAt' : 'charCodeAt');
+ result = string[key](i);
+ message = '\'' + string + '\'[\'' + key + '\'](' + i + ')';
+ if (i < 5) {
+ assertEquals(string.charAt(i), result, message);
+ } else {
+ assertEquals(string.charCodeAt(i), result, message);
+ }
+ }
+
+ for (i = 0; i != 10; i++) {
+ key = ((i < 5) ? 'charCodeAt' : 'charAt');
+ result = string[key](i);
+ message = '\'' + string + '\'[\'' + key + '\'](' + i + ')';
+ if (i < 5) {
+ assertEquals(string.charCodeAt(i), result, message);
+ } else {
+ assertEquals(string.charAt(i), result, message);
+ }
+ }
+
+ // Function is a constant property
+ key = 'one';
+ for (i = 0; i != 10; i++) {
+ assertEquals(key, f[key]());
+ if (i == 5) {
+ key = 'two'; // the name change should case a miss
+ }
+ }
+
+ // Function is a fast property
+ f.field = function() { return 'field'; }
+ key = 'field';
+ for (i = 0; i != 10; i++) {
+ assertEquals(key, f[key]());
+ if (i == 5) {
+ key = 'two'; // the name change should case a miss
+ }
+ }
+
+ // Calling on slow case object
+ f.prop = 0;
+ delete f.prop; // force the object to the slow case
+ f.four = function() { return 'four'; }
+ f.five = function() { return 'five'; }
+
+ key = 'four';
+ for (i = 0; i != 10; i++) {
+ assertEquals(key, f[key]());
+ if (i == 5) {
+ key = 'five';
+ }
+ }
+
+ // Calling on global object
+ key = 'globalFunction1';
+ var expect = 'function1';
+ for (i = 0; i != 10; i++) {
+ assertEquals(expect, global[key]());
+ if (i == 5) {
+ key = 'globalFunction2';
+ expect = 'function2';
+ }
+ }
+}
+
+testKeyTransitions();
+
+function testTypeTransitions() {
+ var f = new F();
+ var s = '';
+ var m = 'one';
+ var i;
+
+ s = '';
+ for (i = 0; i != 10; i++) {
+ if (i == 5) { F.prototype.one = function() { return '1'; } }
+ s += f[m]();
+ }
+ assertEquals("oneoneoneoneone11111", s);
+
+ s = '';
+ for (i = 0; i != 10; i++) {
+ if (i == 5) { f.__proto__ = { one: function() { return 'I'; } } }
+ s += f[m]();
+ }
+ assertEquals("11111IIIII", s);
+
+ s = '';
+ for (i = 0; i != 10; i++) {
+ if (i == 5) { f.one = function() { return 'ONE'; } }
+ s += f[m]();
+ }
+ assertEquals("IIIIIONEONEONEONEONE", s);
+
+ m = 'toString';
+
+ s = '';
+ var obj = { toString: function() { return '2'; } };
+ for (i = 0; i != 10; i++) {
+ if (i == 5) { obj = "TWO"; }
+ s += obj[m]();
+ }
+ assertEquals("22222TWOTWOTWOTWOTWO", s);
+
+ s = '';
+ obj = { toString: function() { return 'ONE'; } };
+ m = 'toString';
+ for (i = 0; i != 10; i++) {
+ if (i == 5) { obj = 1; }
+ s += obj[m]();
+ }
+ assertEquals("ONEONEONEONEONE11111", s);
+}
+
+testTypeTransitions();
diff --git a/test/mjsunit/regress/regress-728.js b/test/mjsunit/regress/regress-728.js
new file mode 100644
index 00000000..196b1188
--- /dev/null
+++ b/test/mjsunit/regress/regress-728.js
@@ -0,0 +1,42 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var obj = { 0: "obj0" };
+
+// Array index k is to big to fit into the string hash field.
+var k = 16777217;
+var h = "" + k;
+
+obj[k] = "obj" + k;
+
+// Force computation of hash for the string representation of array index.
+for (var i = 0; i < 10; i++) { ({})[h]; }
+
+function get(idx) { return obj[idx]; }
+
+assertEquals(get(0), "obj0");
+assertEquals(get(h), "obj" + h);
diff --git a/test/mjsunit/regress/regress-732.js b/test/mjsunit/regress/regress-732.js
new file mode 100644
index 00000000..2b02ea63
--- /dev/null
+++ b/test/mjsunit/regress/regress-732.js
@@ -0,0 +1,46 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// idx is a valid array index but is too big to be cached in hash field.
+var idx = 10000000;
+
+// Create a JSObject with NumberDictionary as a backing store for elements.
+var obj = { };
+for (var i = 0; i < 100000; i += 100) { obj[i] = "obj" + i; }
+
+// Set value using numeric index.
+obj[idx] = "obj" + idx;
+
+// Make a string from index.
+var str = "" + idx;
+
+// Force hash computation for the string representation of index.
+for (var i = 0; i < 10; i++) { ({})[str]; }
+
+// Try getting value back using string and number representations of
+// the same index.
+assertEquals(obj[str], obj[idx])
diff --git a/test/mjsunit/samevalue.js b/test/mjsunit/samevalue.js
index 6cb35e6e..2de677e6 100644
--- a/test/mjsunit/samevalue.js
+++ b/test/mjsunit/samevalue.js
@@ -1,102 +1,102 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Flags: --expose-natives_as natives
-// Test the SameValue internal method.
-
-var obj1 = {x: 10, y: 11, z: "test"};
-var obj2 = {x: 10, y: 11, z: "test"};
-
-assertTrue(natives.SameValue(0, 0));
-assertTrue(natives.SameValue(+0, +0));
-assertTrue(natives.SameValue(-0, -0));
-assertTrue(natives.SameValue(1, 1));
-assertTrue(natives.SameValue(2, 2));
-assertTrue(natives.SameValue(-1, -1));
-assertTrue(natives.SameValue(0.5, 0.5));
-assertTrue(natives.SameValue(true, true));
-assertTrue(natives.SameValue(false, false));
-assertTrue(natives.SameValue(NaN, NaN));
-assertTrue(natives.SameValue(null, null));
-assertTrue(natives.SameValue("foo", "foo"));
-assertTrue(natives.SameValue(obj1, obj1));
-// Undefined values.
-assertTrue(natives.SameValue());
-assertTrue(natives.SameValue(undefined, undefined));
-
-assertFalse(natives.SameValue(0,1));
-assertFalse(natives.SameValue("foo", "bar"));
-assertFalse(natives.SameValue(obj1, obj2));
-assertFalse(natives.SameValue(true, false));
-
-assertFalse(natives.SameValue(obj1, true));
-assertFalse(natives.SameValue(obj1, "foo"));
-assertFalse(natives.SameValue(obj1, 1));
-assertFalse(natives.SameValue(obj1, undefined));
-assertFalse(natives.SameValue(obj1, NaN));
-
-assertFalse(natives.SameValue(undefined, true));
-assertFalse(natives.SameValue(undefined, "foo"));
-assertFalse(natives.SameValue(undefined, 1));
-assertFalse(natives.SameValue(undefined, obj1));
-assertFalse(natives.SameValue(undefined, NaN));
-
-assertFalse(natives.SameValue(NaN, true));
-assertFalse(natives.SameValue(NaN, "foo"));
-assertFalse(natives.SameValue(NaN, 1));
-assertFalse(natives.SameValue(NaN, obj1));
-assertFalse(natives.SameValue(NaN, undefined));
-
-assertFalse(natives.SameValue("foo", true));
-assertFalse(natives.SameValue("foo", 1));
-assertFalse(natives.SameValue("foo", obj1));
-assertFalse(natives.SameValue("foo", undefined));
-assertFalse(natives.SameValue("foo", NaN));
-
-assertFalse(natives.SameValue(true, 1));
-assertFalse(natives.SameValue(true, obj1));
-assertFalse(natives.SameValue(true, undefined));
-assertFalse(natives.SameValue(true, NaN));
-assertFalse(natives.SameValue(true, "foo"));
-
-assertFalse(natives.SameValue(1, true));
-assertFalse(natives.SameValue(1, obj1));
-assertFalse(natives.SameValue(1, undefined));
-assertFalse(natives.SameValue(1, NaN));
-assertFalse(natives.SameValue(1, "foo"));
-
-// Special string cases.
-assertFalse(natives.SameValue("1", 1));
-assertFalse(natives.SameValue("true", true));
-assertFalse(natives.SameValue("false", false));
-assertFalse(natives.SameValue("undefined", undefined));
-assertFalse(natives.SameValue("NaN", NaN));
-
-// -0 and +0 are should be different
-assertFalse(natives.SameValue(+0, -0));
-assertFalse(natives.SameValue(-0, +0));
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Flags: --expose-natives_as natives
+// Test the SameValue internal method.
+
+var obj1 = {x: 10, y: 11, z: "test"};
+var obj2 = {x: 10, y: 11, z: "test"};
+
+assertTrue(natives.SameValue(0, 0));
+assertTrue(natives.SameValue(+0, +0));
+assertTrue(natives.SameValue(-0, -0));
+assertTrue(natives.SameValue(1, 1));
+assertTrue(natives.SameValue(2, 2));
+assertTrue(natives.SameValue(-1, -1));
+assertTrue(natives.SameValue(0.5, 0.5));
+assertTrue(natives.SameValue(true, true));
+assertTrue(natives.SameValue(false, false));
+assertTrue(natives.SameValue(NaN, NaN));
+assertTrue(natives.SameValue(null, null));
+assertTrue(natives.SameValue("foo", "foo"));
+assertTrue(natives.SameValue(obj1, obj1));
+// Undefined values.
+assertTrue(natives.SameValue());
+assertTrue(natives.SameValue(undefined, undefined));
+
+assertFalse(natives.SameValue(0,1));
+assertFalse(natives.SameValue("foo", "bar"));
+assertFalse(natives.SameValue(obj1, obj2));
+assertFalse(natives.SameValue(true, false));
+
+assertFalse(natives.SameValue(obj1, true));
+assertFalse(natives.SameValue(obj1, "foo"));
+assertFalse(natives.SameValue(obj1, 1));
+assertFalse(natives.SameValue(obj1, undefined));
+assertFalse(natives.SameValue(obj1, NaN));
+
+assertFalse(natives.SameValue(undefined, true));
+assertFalse(natives.SameValue(undefined, "foo"));
+assertFalse(natives.SameValue(undefined, 1));
+assertFalse(natives.SameValue(undefined, obj1));
+assertFalse(natives.SameValue(undefined, NaN));
+
+assertFalse(natives.SameValue(NaN, true));
+assertFalse(natives.SameValue(NaN, "foo"));
+assertFalse(natives.SameValue(NaN, 1));
+assertFalse(natives.SameValue(NaN, obj1));
+assertFalse(natives.SameValue(NaN, undefined));
+
+assertFalse(natives.SameValue("foo", true));
+assertFalse(natives.SameValue("foo", 1));
+assertFalse(natives.SameValue("foo", obj1));
+assertFalse(natives.SameValue("foo", undefined));
+assertFalse(natives.SameValue("foo", NaN));
+
+assertFalse(natives.SameValue(true, 1));
+assertFalse(natives.SameValue(true, obj1));
+assertFalse(natives.SameValue(true, undefined));
+assertFalse(natives.SameValue(true, NaN));
+assertFalse(natives.SameValue(true, "foo"));
+
+assertFalse(natives.SameValue(1, true));
+assertFalse(natives.SameValue(1, obj1));
+assertFalse(natives.SameValue(1, undefined));
+assertFalse(natives.SameValue(1, NaN));
+assertFalse(natives.SameValue(1, "foo"));
+
+// Special string cases.
+assertFalse(natives.SameValue("1", 1));
+assertFalse(natives.SameValue("true", true));
+assertFalse(natives.SameValue("false", false));
+assertFalse(natives.SameValue("undefined", undefined));
+assertFalse(natives.SameValue("NaN", NaN));
+
+// -0 and +0 are should be different
+assertFalse(natives.SameValue(+0, -0));
+assertFalse(natives.SameValue(-0, +0));
diff --git a/test/mjsunit/string-charat.js b/test/mjsunit/string-charat.js
index d1989dfd..5ce4e894 100644
--- a/test/mjsunit/string-charat.js
+++ b/test/mjsunit/string-charat.js
@@ -27,29 +27,58 @@
var s = "test";
-assertEquals("t", s.charAt());
-assertEquals("t", s.charAt("string"));
-assertEquals("t", s.charAt(null));
-assertEquals("t", s.charAt(void 0));
-assertEquals("t", s.charAt(false));
-assertEquals("e", s.charAt(true));
-assertEquals("", s.charAt(-1));
-assertEquals("", s.charAt(4));
-assertEquals("t", s.charAt(0));
-assertEquals("t", s.charAt(3));
-assertEquals("t", s.charAt(NaN));
-
-assertEquals(116, s.charCodeAt());
-assertEquals(116, s.charCodeAt("string"));
-assertEquals(116, s.charCodeAt(null));
-assertEquals(116, s.charCodeAt(void 0));
-assertEquals(116, s.charCodeAt(false));
-assertEquals(101, s.charCodeAt(true));
-assertEquals(116, s.charCodeAt(0));
-assertEquals(116, s.charCodeAt(3));
-assertEquals(116, s.charCodeAt(NaN));
-assertTrue(isNaN(s.charCodeAt(-1)));
-assertTrue(isNaN(s.charCodeAt(4)));
+function getTwoByteString() { return "\u1234t"; }
+function getCons() { return "testtesttesttest" + getTwoByteString() }
+
+var slowIndex1 = { valueOf: function() { return 1; } };
+var slowIndex2 = { toString: function() { return "2"; } };
+var slowIndexOutOfRange = { valueOf: function() { return -1; } };
+
+function basicTest(s, len) {
+ assertEquals("t", s().charAt());
+ assertEquals("t", s().charAt("string"));
+ assertEquals("t", s().charAt(null));
+ assertEquals("t", s().charAt(void 0));
+ assertEquals("t", s().charAt(false));
+ assertEquals("e", s().charAt(true));
+ assertEquals("", s().charAt(-1));
+ assertEquals("", s().charAt(len));
+ assertEquals("", s().charAt(slowIndexOutOfRange));
+ assertEquals("", s().charAt(1/0));
+ assertEquals("", s().charAt(-1/0));
+ assertEquals("t", s().charAt(0));
+ assertEquals("t", s().charAt(-0.0));
+ assertEquals("t", s().charAt(-0.1));
+ assertEquals("t", s().charAt(0.4));
+ assertEquals("e", s().charAt(slowIndex1));
+ assertEquals("s", s().charAt(slowIndex2));
+ assertEquals("t", s().charAt(3));
+ assertEquals("t", s().charAt(3.4));
+ assertEquals("t", s().charAt(NaN));
+
+ assertEquals(116, s().charCodeAt());
+ assertEquals(116, s().charCodeAt("string"));
+ assertEquals(116, s().charCodeAt(null));
+ assertEquals(116, s().charCodeAt(void 0));
+ assertEquals(116, s().charCodeAt(false));
+ assertEquals(101, s().charCodeAt(true));
+ assertEquals(116, s().charCodeAt(0));
+ assertEquals(116, s().charCodeAt(-0.0));
+ assertEquals(116, s().charCodeAt(-0.1));
+ assertEquals(116, s().charCodeAt(0.4));
+ assertEquals(101, s().charCodeAt(slowIndex1));
+ assertEquals(115, s().charCodeAt(slowIndex2));
+ assertEquals(116, s().charCodeAt(3));
+ assertEquals(116, s().charCodeAt(3.4));
+ assertEquals(116, s().charCodeAt(NaN));
+ assertTrue(isNaN(s().charCodeAt(-1)));
+ assertTrue(isNaN(s().charCodeAt(len)));
+ assertTrue(isNaN(s().charCodeAt(slowIndexOutOfRange)));
+ assertTrue(isNaN(s().charCodeAt(1/0)));
+ assertTrue(isNaN(s().charCodeAt(-1/0)));
+}
+basicTest(function() { return s; }, s.length);
+basicTest(getCons, getCons().length);
// Make sure enough of the one-char string cache is filled.
var alpha = ['@'];
@@ -64,3 +93,163 @@ for (var i = 1; i < 128; i++) {
assertEquals(alpha[i], alphaStr.charAt(i));
assertEquals(String.fromCharCode(i), alphaStr.charAt(i));
}
+
+// Test stealing String.prototype.{charAt,charCodeAt}.
+var o = {
+ charAt: String.prototype.charAt,
+ charCodeAt: String.prototype.charCodeAt,
+ toString: function() { return "012"; },
+ valueOf: function() { return "should not be called"; }
+};
+
+function stealTest() {
+ assertEquals("0", o.charAt(0));
+ assertEquals("1", o.charAt(1));
+ assertEquals("1", o.charAt(1.4));
+ assertEquals("1", o.charAt(slowIndex1));
+ assertEquals("2", o.charAt(2));
+ assertEquals("2", o.charAt(slowIndex2));
+ assertEquals(48, o.charCodeAt(0));
+ assertEquals(49, o.charCodeAt(1));
+ assertEquals(49, o.charCodeAt(1.4));
+ assertEquals(49, o.charCodeAt(slowIndex1));
+ assertEquals(50, o.charCodeAt(2));
+ assertEquals(50, o.charCodeAt(slowIndex2));
+ assertEquals("", o.charAt(-1));
+ assertEquals("", o.charAt(-1.4));
+ assertEquals("", o.charAt(10));
+ assertEquals("", o.charAt(slowIndexOutOfRange));
+ assertTrue(isNaN(o.charCodeAt(-1)));
+ assertTrue(isNaN(o.charCodeAt(-1.4)));
+ assertTrue(isNaN(o.charCodeAt(10)));
+ assertTrue(isNaN(o.charCodeAt(slowIndexOutOfRange)));
+}
+stealTest();
+
+// Test custom string IC-s.
+for (var i = 0; i < 20; i++) {
+ basicTest(function() { return s; }, s.length);
+ basicTest(getCons, getCons().length);
+ stealTest();
+}
+
+var badToString = function() { return []; };
+
+function testBadToString_charAt() {
+ var goodToString = o.toString;
+ var hasCaught = false;
+ var numCalls = 0;
+ var result;
+ try {
+ for (var i = 0; i < 20; i++) {
+ if (i == 10) o.toString = o.valueOf = badToString;
+ result = o.charAt(1);
+ numCalls++;
+ }
+ } catch (e) {
+ hasCaught = true;
+ } finally {
+ o.toString = goodToString;
+ }
+ assertTrue(hasCaught);
+ assertEquals("1", result);
+ assertEquals(10, numCalls);
+}
+testBadToString_charAt();
+
+function testBadToString_charCodeAt() {
+ var goodToString = o.toString;
+ var hasCaught = false;
+ var numCalls = 0;
+ var result;
+ try {
+ for (var i = 0; i < 20; i++) {
+ if (i == 10) o.toString = o.valueOf = badToString;
+ result = o.charCodeAt(1);
+ numCalls++;
+ }
+ } catch (e) {
+ hasCaught = true;
+ } finally {
+ o.toString = goodToString;
+ }
+ assertTrue(hasCaught);
+ assertEquals(49, result);
+ assertEquals(10, numCalls);
+}
+testBadToString_charCodeAt();
+
+var badIndex = {
+ toString: badToString,
+ valueOf: badToString
+};
+
+function testBadIndex_charAt() {
+ var index = 1;
+ var hasCaught = false;
+ var numCalls = 0;
+ var result;
+ try {
+ for (var i = 0; i < 20; i++) {
+ if (i == 10) index = badIndex;
+ result = o.charAt(index);
+ numCalls++;
+ }
+ } catch (e) {
+ hasCaught = true;
+ }
+ assertTrue(hasCaught);
+ assertEquals("1", result);
+ assertEquals(10, numCalls);
+}
+testBadIndex_charAt();
+
+function testBadIndex_charCodeAt() {
+ var index = 1;
+ var hasCaught = false;
+ var numCalls = 0;
+ var result;
+ try {
+ for (var i = 0; i < 20; i++) {
+ if (i == 10) index = badIndex;
+ result = o.charCodeAt(index);
+ numCalls++;
+ }
+ } catch (e) {
+ hasCaught = true;
+ }
+ assertTrue(hasCaught);
+ assertEquals(49, result);
+ assertEquals(10, numCalls);
+}
+testBadIndex_charCodeAt();
+
+function testPrototypeChange_charAt() {
+ var result, oldResult;
+ for (var i = 0; i < 20; i++) {
+ if (i == 10) {
+ oldResult = result;
+ String.prototype.charAt = function() { return "%"; };
+ }
+ result = s.charAt(1);
+ }
+ assertEquals("%", result);
+ assertEquals("e", oldResult);
+ delete String.prototype.charAt; // Restore the default.
+}
+testPrototypeChange_charAt();
+
+function testPrototypeChange_charCodeAt() {
+ var result, oldResult;
+ for (var i = 0; i < 20; i++) {
+ if (i == 10) {
+ oldResult = result;
+ String.prototype.charCodeAt = function() { return 42; };
+ }
+ result = s.charCodeAt(1);
+ }
+ assertEquals(42, result);
+ assertEquals(101, oldResult);
+ delete String.prototype.charCodeAt; // Restore the default.
+}
+testPrototypeChange_charCodeAt();
diff --git a/test/mjsunit/string-charcodeat.js b/test/mjsunit/string-charcodeat.js
index 39275577..831f688f 100644
--- a/test/mjsunit/string-charcodeat.js
+++ b/test/mjsunit/string-charcodeat.js
@@ -153,6 +153,10 @@ TestStringType(Slice16End, true);
TestStringType(Flat16, true);
TestStringType(NotAString16, true);
+for (var i = 0; i != 10; i++) {
+ assertEquals(101, Cons16().charCodeAt(1.1));
+ assertEquals('e', Cons16().charAt(1.1));
+}
function StupidThing() {
// Doesn't return a string from toString!
diff --git a/test/mjsunit/string-index.js b/test/mjsunit/string-index.js
index 866faa89..1d6476ea 100644
--- a/test/mjsunit/string-index.js
+++ b/test/mjsunit/string-index.js
@@ -207,6 +207,28 @@ for (var i = 0; i < 100; ++i) {
assertEquals(expected, actual);
}
+// Test negative zero case.
+var keys = [0, -0.0];
+var str = 'ab', arr = ['a', 'a'];
+for (var i = 0; i < 100; ++i) {
+ var index = Math.floor(i / 50);
+ var key = keys[index];
+ var expected = arr[index];
+ var actual = str[key];
+ assertEquals(expected, actual);
+}
+
+// Test "not-an-array-index" case.
+var keys = [0, 0.5];
+var str = 'ab', arr = ['a', undefined];
+for (var i = 0; i < 100; ++i) {
+ var index = Math.floor(i / 50);
+ var key = keys[index];
+ var expected = arr[index];
+ var actual = str[key];
+ assertEquals(expected, actual);
+}
+
// Test out of range case.
var keys = [0, -1];
var str = 'ab', arr = ['a', undefined];
@@ -234,4 +256,4 @@ for (var i = 0; i < 50; ++i) {
var expected = arr[0];
var actual = str[0];
assertEquals(expected, actual);
-} \ No newline at end of file
+}