summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKristian Monsen <kristianm@google.com>2010-06-28 14:14:28 +0100
committerKristian Monsen <kristianm@google.com>2010-07-02 09:44:56 +0100
commit9dcf7e2f83591d471e88bf7d230651900b8e424b (patch)
tree0a26792d5c298ecf46ab9be2252662fee5628f66
parenta94adf74b8a91ff002b9cade1736e5c4a50d52fb (diff)
downloadandroid_external_v8-9dcf7e2f83591d471e88bf7d230651900b8e424b.tar.gz
android_external_v8-9dcf7e2f83591d471e88bf7d230651900b8e424b.tar.bz2
android_external_v8-9dcf7e2f83591d471e88bf7d230651900b8e424b.zip
Update V8 to r4924 as required by WebKit r61871
Change-Id: Ic819dad0c1c9e035b8ffd306c96656ba87c5e85a
-rw-r--r--ChangeLog30
-rw-r--r--V8_MERGE_REVISION4
-rw-r--r--include/v8-profiler.h141
-rw-r--r--include/v8.h70
-rw-r--r--src/api.cc307
-rw-r--r--src/arm/assembler-arm-inl.h5
-rw-r--r--src/arm/assembler-arm.cc122
-rw-r--r--src/arm/assembler-arm.h44
-rw-r--r--src/arm/builtins-arm.cc20
-rw-r--r--src/arm/codegen-arm.cc415
-rw-r--r--src/arm/codegen-arm.h10
-rw-r--r--src/arm/constants-arm.h3
-rw-r--r--src/arm/disasm-arm.cc22
-rw-r--r--src/arm/fast-codegen-arm.cc3
-rw-r--r--src/arm/full-codegen-arm.cc76
-rw-r--r--src/arm/ic-arm.cc465
-rw-r--r--src/arm/macro-assembler-arm.cc125
-rw-r--r--src/arm/macro-assembler-arm.h23
-rw-r--r--src/arm/simulator-arm.cc9
-rw-r--r--src/arm/stub-cache-arm.cc28
-rw-r--r--src/arm/virtual-frame-arm.cc1
-rw-r--r--src/arm/virtual-frame-arm.h5
-rw-r--r--src/bootstrapper.cc1
-rw-r--r--src/checks.h8
-rw-r--r--src/debug-debugger.js6
-rw-r--r--src/debug.cc8
-rw-r--r--src/debug.h4
-rw-r--r--src/execution.cc114
-rw-r--r--src/execution.h13
-rw-r--r--src/flag-definitions.h4
-rw-r--r--src/heap-profiler.cc79
-rw-r--r--src/heap-profiler.h32
-rw-r--r--src/heap.cc53
-rw-r--r--src/heap.h6
-rw-r--r--src/ia32/assembler-ia32-inl.h4
-rw-r--r--src/ia32/assembler-ia32.cc16
-rw-r--r--src/ia32/assembler-ia32.h10
-rw-r--r--src/ia32/codegen-ia32.cc104
-rw-r--r--src/ia32/ic-ia32.cc134
-rw-r--r--src/ia32/stub-cache-ia32.cc9
-rw-r--r--src/ic.cc39
-rw-r--r--src/jsregexp.cc6
-rw-r--r--src/objects-debug.cc2
-rw-r--r--src/objects-inl.h19
-rw-r--r--src/objects.cc35
-rw-r--r--src/objects.h67
-rw-r--r--src/profile-generator.cc66
-rw-r--r--src/profile-generator.h25
-rw-r--r--src/runtime.cc76
-rwxr-xr-xsrc/scanner.cc3
-rw-r--r--src/scanner.h113
-rw-r--r--src/scopeinfo.cc12
-rw-r--r--src/scopeinfo.h3
-rw-r--r--src/serialize.cc52
-rw-r--r--src/stub-cache.cc34
-rw-r--r--src/utils.h35
-rw-r--r--src/v8.cc4
-rw-r--r--src/version.cc4
-rw-r--r--src/virtual-frame-light-inl.h4
-rw-r--r--src/x64/assembler-x64-inl.h5
-rw-r--r--src/x64/assembler-x64.cc25
-rw-r--r--src/x64/assembler-x64.h10
-rw-r--r--src/x64/codegen-x64.cc657
-rw-r--r--src/x64/full-codegen-x64.cc62
-rw-r--r--src/x64/ic-x64.cc535
-rw-r--r--src/x64/macro-assembler-x64.cc167
-rw-r--r--src/x64/macro-assembler-x64.h11
-rw-r--r--src/x64/stub-cache-x64.cc60
-rw-r--r--src/x64/virtual-frame-x64.cc37
-rw-r--r--src/x64/virtual-frame-x64.h2
-rw-r--r--test/cctest/test-api.cc80
-rw-r--r--test/cctest/test-debug.cc55
-rw-r--r--test/cctest/test-decls.cc2
-rw-r--r--test/cctest/test-disasm-arm.cc66
-rw-r--r--test/cctest/test-disasm-ia32.cc4
-rw-r--r--test/cctest/test-func-name-inference.cc2
-rw-r--r--test/cctest/test-heap-profiler.cc234
-rw-r--r--test/cctest/test-liveedit.cc4
-rw-r--r--test/cctest/test-serialize.cc18
-rw-r--r--test/mjsunit/apply.js15
-rw-r--r--test/mjsunit/debug-setbreakpoint.js8
-rw-r--r--test/mjsunit/keyed-call-generic.js17
-rw-r--r--test/mjsunit/mjsunit.status3
-rw-r--r--test/mjsunit/object-define-property.js153
-rw-r--r--test/mjsunit/regress/regress-619.js (renamed from test/mjsunit/bugs/bug-619.js)5
-rw-r--r--test/mjsunit/regress/regress-747.js56
-rw-r--r--test/mjsunit/samevalue.js204
-rw-r--r--test/mjsunit/string-externalize.js95
88 files changed, 4140 insertions, 1584 deletions
diff --git a/ChangeLog b/ChangeLog
index 941c314a..95c11337 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,33 @@
+2010-06-23: Version 2.2.19
+
+ Fix bug that causes the build to break when profillingsupport=off
+ (issue 738).
+
+ Added expose-externalize-string flag for testing extensions.
+
+ Resolve linker issues with using V8 as a DLL causing a number of
+ problems with unresolved symbols.
+
+ Fix build failure for cctests when ENABLE_DEBUGGER_SUPPORT is not
+ defined.
+
+ Performance improvements on all platforms.
+
+
+2010-06-16: Version 2.2.18
+
+ Added API functions to retrieve information on indexed properties
+ managed by the embedding layer. Fixes bug 737.
+
+ Make ES5 Object.defineProperty support array elements. Fixes bug 619.
+
+ Add heap profiling to the API.
+
+ Remove old named property query from the API.
+
+ Incremental performance improvements.
+
+
2010-06-14: Version 2.2.17
Improved debugger support for stepping out of functions.
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 1a569dd3..428ddacf 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,4 @@
We use a V8 revision that has been used for a Chromium release.
-http://src.chromium.org/svn/releases/6.0.436.0/DEPS
-http://v8.googlecode.com/svn/trunk@4851
+http://src.chromium.org/svn/releases/6.0.450.0/DEPS
+http://v8.googlecode.com/svn/trunk@4924
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index bb410722..3e1952c7 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -184,6 +184,147 @@ class V8EXPORT CpuProfiler {
};
+class HeapGraphNode;
+
+
+/**
+ * HeapSnapshotEdge represents a directed connection between heap
+ * graph nodes: from retaners to retained nodes.
+ */
+class V8EXPORT HeapGraphEdge {
+ public:
+ enum Type {
+ CONTEXT_VARIABLE = 0, // A variable from a function context.
+ ELEMENT = 1, // An element of an array.
+ PROPERTY = 2, // A named object property.
+ INTERNAL = 3 // A link that can't be accessed from JS,
+ // thus, its name isn't a real property name.
+ };
+
+ /** Returns edge type (see HeapGraphEdge::Type). */
+ Type GetType() const;
+
+ /**
+ * Returns edge name. This can be a variable name, an element index, or
+ * a property name.
+ */
+ Handle<Value> GetName() const;
+
+ /** Returns origin node. */
+ const HeapGraphNode* GetFromNode() const;
+
+ /** Returns destination node. */
+ const HeapGraphNode* GetToNode() const;
+};
+
+
+class V8EXPORT HeapGraphPath {
+ public:
+ /** Returns the number of edges in the path. */
+ int GetEdgesCount() const;
+
+ /** Returns an edge from the path. */
+ const HeapGraphEdge* GetEdge(int index) const;
+
+ /** Returns origin node. */
+ const HeapGraphNode* GetFromNode() const;
+
+ /** Returns destination node. */
+ const HeapGraphNode* GetToNode() const;
+};
+
+
+/**
+ * HeapGraphNode represents a node in a heap graph.
+ */
+class V8EXPORT HeapGraphNode {
+ public:
+ enum Type {
+ INTERNAL = 0, // Internal node, a virtual one, for housekeeping.
+ ARRAY = 1, // An array of elements.
+ STRING = 2, // A string.
+ OBJECT = 3, // A JS object (except for arrays and strings).
+ CODE = 4, // Compiled code.
+ CLOSURE = 5 // Function closure.
+ };
+
+ /** Returns node type (see HeapGraphNode::Type). */
+ Type GetType() const;
+
+ /**
+ * Returns node name. Depending on node's type this can be the name
+ * of the constructor (for objects), the name of the function (for
+ * closures), string value, or an empty string (for compiled code).
+ */
+ Handle<String> GetName() const;
+
+ /** Returns node's own size, in bytes. */
+ int GetSelfSize() const;
+
+ /** Returns node's network (self + reachable nodes) size, in bytes. */
+ int GetTotalSize() const;
+
+ /**
+ * Returns node's private size, in bytes. That is, the size of memory
+ * that will be reclaimed having this node collected.
+ */
+ int GetPrivateSize() const;
+
+ /** Returns child nodes count of the node. */
+ int GetChildrenCount() const;
+
+ /** Retrieves a child by index. */
+ const HeapGraphEdge* GetChild(int index) const;
+
+ /** Returns retainer nodes count of the node. */
+ int GetRetainersCount() const;
+
+ /** Returns a retainer by index. */
+ const HeapGraphEdge* GetRetainer(int index) const;
+
+ /** Returns the number of simple retaining paths from the root to the node. */
+ int GetRetainingPathsCount() const;
+
+ /** Returns a retaining path by index. */
+ const HeapGraphPath* GetRetainingPath(int index) const;
+};
+
+
+/**
+ * HeapSnapshots record the state of the JS heap at some moment.
+ */
+class V8EXPORT HeapSnapshot {
+ public:
+ /** Returns heap snapshot UID (assigned by the profiler.) */
+ unsigned GetUid() const;
+
+ /** Returns heap snapshot title. */
+ Handle<String> GetTitle() const;
+
+ /** Returns the root node of the heap graph. */
+ const HeapGraphNode* GetHead() const;
+};
+
+
+/**
+ * Interface for controlling heap profiling.
+ */
+class V8EXPORT HeapProfiler {
+ public:
+ /** Returns the number of snapshots taken. */
+ static int GetSnapshotsCount();
+
+ /** Returns a snapshot by index. */
+ static const HeapSnapshot* GetSnapshot(int index);
+
+ /** Returns a profile by uid. */
+ static const HeapSnapshot* FindSnapshot(unsigned uid);
+
+ /** Takes a heap snapshot and returns it. Title may be an empty string. */
+ static const HeapSnapshot* TakeSnapshot(Handle<String> title);
+};
+
+
} // namespace v8
diff --git a/include/v8.h b/include/v8.h
index 24b4cbe3..b6256187 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1570,6 +1570,9 @@ class V8EXPORT Object : public Value {
* the backing store is preserved while V8 has a reference.
*/
void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
+ bool HasIndexedPropertiesInPixelData();
+ uint8_t* GetIndexedPropertiesPixelData();
+ int GetIndexedPropertiesPixelDataLength();
/**
* Set the backing store of the indexed properties to be managed by the
@@ -1581,6 +1584,10 @@ class V8EXPORT Object : public Value {
void SetIndexedPropertiesToExternalArrayData(void* data,
ExternalArrayType array_type,
int number_of_elements);
+ bool HasIndexedPropertiesInExternalArrayData();
+ void* GetIndexedPropertiesExternalArrayData();
+ ExternalArrayType GetIndexedPropertiesExternalArrayDataType();
+ int GetIndexedPropertiesExternalArrayDataLength();
static Local<Object> New();
static inline Object* Cast(Value* obj);
@@ -1761,20 +1768,11 @@ typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
/**
* Returns a non-empty handle if the interceptor intercepts the request.
- * The result is either boolean (true if property exists and false
- * otherwise) or an integer encoding property attributes.
+ * The result is an integer encoding property attributes (like v8::None,
+ * v8::DontEnum, etc.)
*/
-#ifdef USE_NEW_QUERY_CALLBACKS
typedef Handle<Integer> (*NamedPropertyQuery)(Local<String> property,
const AccessorInfo& info);
-#else
-typedef Handle<Boolean> (*NamedPropertyQuery)(Local<String> property,
- const AccessorInfo& info);
-#endif
-
-typedef Handle<Value> (*NamedPropertyQueryImpl)(Local<String> property,
- const AccessorInfo& info);
-
/**
@@ -2026,16 +2024,7 @@ class V8EXPORT FunctionTemplate : public Template {
NamedPropertyQuery query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
- NamedPropertyQueryImpl casted =
- reinterpret_cast<NamedPropertyQueryImpl>(query);
- SetNamedInstancePropertyHandlerImpl(getter,
- setter,
- casted,
- remover,
- enumerator,
- data);
- }
+ Handle<Value> data);
void SetIndexedInstancePropertyHandler(IndexedPropertyGetter getter,
IndexedPropertySetter setter,
IndexedPropertyQuery query,
@@ -2047,13 +2036,6 @@ class V8EXPORT FunctionTemplate : public Template {
friend class Context;
friend class ObjectTemplate;
- private:
- void SetNamedInstancePropertyHandlerImpl(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQueryImpl query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data);
};
@@ -2111,7 +2093,8 @@ class V8EXPORT ObjectTemplate : public Template {
*
* \param getter The callback to invoke when getting a property.
* \param setter The callback to invoke when setting a property.
- * \param query The callback to invoke to check if an object has a property.
+ * \param query The callback to invoke to check if a property is present,
+ * and if present, get its attributes.
* \param deleter The callback to invoke when deleting a property.
* \param enumerator The callback to invoke to enumerate all the named
* properties of an object.
@@ -2123,26 +2106,7 @@ class V8EXPORT ObjectTemplate : public Template {
NamedPropertyQuery query = 0,
NamedPropertyDeleter deleter = 0,
NamedPropertyEnumerator enumerator = 0,
- Handle<Value> data = Handle<Value>()) {
- NamedPropertyQueryImpl casted =
- reinterpret_cast<NamedPropertyQueryImpl>(query);
- SetNamedPropertyHandlerImpl(getter,
- setter,
- casted,
- deleter,
- enumerator,
- data);
- }
-
- private:
- void SetNamedPropertyHandlerImpl(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQueryImpl query,
- NamedPropertyDeleter deleter,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data);
-
- public:
+ Handle<Value> data = Handle<Value>());
/**
* Sets an indexed property handler on the object template.
@@ -3247,11 +3211,9 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
- // These constants are compiler dependent so their values must be
- // defined within the implementation.
- V8EXPORT static int kJSObjectType;
- V8EXPORT static int kFirstNonstringType;
- V8EXPORT static int kProxyType;
+ static const int kJSObjectType = 0x9f;
+ static const int kFirstNonstringType = 0x80;
+ static const int kProxyType = 0x85;
static inline bool HasHeapObjectTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
diff --git a/src/api.cc b/src/api.cc
index cb5e96df..464ca54d 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -34,6 +34,7 @@
#include "debug.h"
#include "execution.h"
#include "global-handles.h"
+#include "heap-profiler.h"
#include "messages.h"
#include "platform.h"
#include "profile-generator-inl.h"
@@ -105,9 +106,6 @@ static i::HandleScopeImplementer thread_local;
static FatalErrorCallback exception_behavior = NULL;
-int i::Internals::kJSObjectType = JS_OBJECT_TYPE;
-int i::Internals::kFirstNonstringType = FIRST_NONSTRING_TYPE;
-int i::Internals::kProxyType = PROXY_TYPE;
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
@@ -853,10 +851,10 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
}
-void FunctionTemplate::SetNamedInstancePropertyHandlerImpl(
+void FunctionTemplate::SetNamedInstancePropertyHandler(
NamedPropertyGetter getter,
NamedPropertySetter setter,
- NamedPropertyQueryImpl query,
+ NamedPropertyQuery query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
@@ -987,13 +985,12 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
}
-void ObjectTemplate::SetNamedPropertyHandlerImpl(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQueryImpl query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator
- enumerator,
- Handle<Value> data) {
+void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQuery query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data) {
if (IsDeadCheck("v8::ObjectTemplate::SetNamedPropertyHandler()")) return;
ENTER_V8;
HandleScope scope;
@@ -1001,12 +998,12 @@ void ObjectTemplate::SetNamedPropertyHandlerImpl(NamedPropertyGetter getter,
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetNamedInstancePropertyHandlerImpl(getter,
- setter,
- query,
- remover,
- enumerator,
- data);
+ Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
+ setter,
+ query,
+ remover,
+ enumerator,
+ data);
}
@@ -2613,6 +2610,35 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
}
+bool v8::Object::HasIndexedPropertiesInPixelData() {
+ ON_BAILOUT("v8::HasIndexedPropertiesInPixelData()", return false);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ return self->HasPixelElements();
+}
+
+
+uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
+ ON_BAILOUT("v8::GetIndexedPropertiesPixelData()", return NULL);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ if (self->HasPixelElements()) {
+ return i::PixelArray::cast(self->elements())->external_pointer();
+ } else {
+ return NULL;
+ }
+}
+
+
+int v8::Object::GetIndexedPropertiesPixelDataLength() {
+ ON_BAILOUT("v8::GetIndexedPropertiesPixelDataLength()", return -1);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ if (self->HasPixelElements()) {
+ return i::PixelArray::cast(self->elements())->length();
+ } else {
+ return -1;
+ }
+}
+
+
void v8::Object::SetIndexedPropertiesToExternalArrayData(
void* data,
ExternalArrayType array_type,
@@ -2637,6 +2663,60 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
}
+bool v8::Object::HasIndexedPropertiesInExternalArrayData() {
+ ON_BAILOUT("v8::HasIndexedPropertiesInExternalArrayData()", return false);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ return self->HasExternalArrayElements();
+}
+
+
+void* v8::Object::GetIndexedPropertiesExternalArrayData() {
+ ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayData()", return NULL);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ if (self->HasExternalArrayElements()) {
+ return i::ExternalArray::cast(self->elements())->external_pointer();
+ } else {
+ return NULL;
+ }
+}
+
+
+ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
+ ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayDataType()",
+ return static_cast<ExternalArrayType>(-1));
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ switch (self->elements()->map()->instance_type()) {
+ case i::EXTERNAL_BYTE_ARRAY_TYPE:
+ return kExternalByteArray;
+ case i::EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ return kExternalUnsignedByteArray;
+ case i::EXTERNAL_SHORT_ARRAY_TYPE:
+ return kExternalShortArray;
+ case i::EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ return kExternalUnsignedShortArray;
+ case i::EXTERNAL_INT_ARRAY_TYPE:
+ return kExternalIntArray;
+ case i::EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ return kExternalUnsignedIntArray;
+ case i::EXTERNAL_FLOAT_ARRAY_TYPE:
+ return kExternalFloatArray;
+ default:
+ return static_cast<ExternalArrayType>(-1);
+ }
+}
+
+
+int v8::Object::GetIndexedPropertiesExternalArrayDataLength() {
+ ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayDataLength()", return 0);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ if (self->HasExternalArrayElements()) {
+ return i::ExternalArray::cast(self->elements())->length();
+ } else {
+ return -1;
+ }
+}
+
+
Local<v8::Object> Function::NewInstance() const {
return NewInstance(0, NULL);
}
@@ -4363,6 +4443,197 @@ const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
*Utils::OpenHandle(*title)));
}
+
+HeapGraphEdge::Type HeapGraphEdge::GetType() const {
+ IsDeadCheck("v8::HeapGraphEdge::GetType");
+ return static_cast<HeapGraphEdge::Type>(
+ reinterpret_cast<const i::HeapGraphEdge*>(this)->type());
+}
+
+
+Handle<Value> HeapGraphEdge::GetName() const {
+ IsDeadCheck("v8::HeapGraphEdge::GetName");
+ const i::HeapGraphEdge* edge =
+ reinterpret_cast<const i::HeapGraphEdge*>(this);
+ switch (edge->type()) {
+ case i::HeapGraphEdge::CONTEXT_VARIABLE:
+ case i::HeapGraphEdge::INTERNAL:
+ case i::HeapGraphEdge::PROPERTY:
+ return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ edge->name())));
+ case i::HeapGraphEdge::ELEMENT:
+ return Handle<Number>(ToApi<Number>(i::Factory::NewNumberFromInt(
+ edge->index())));
+ default: UNREACHABLE();
+ }
+ return ImplementationUtilities::Undefined();
+}
+
+
+const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
+ IsDeadCheck("v8::HeapGraphEdge::GetFromNode");
+ const i::HeapEntry* from =
+ reinterpret_cast<const i::HeapGraphEdge*>(this)->from();
+ return reinterpret_cast<const HeapGraphNode*>(from);
+}
+
+
+const HeapGraphNode* HeapGraphEdge::GetToNode() const {
+ IsDeadCheck("v8::HeapGraphEdge::GetToNode");
+ const i::HeapEntry* to =
+ reinterpret_cast<const i::HeapGraphEdge*>(this)->to();
+ return reinterpret_cast<const HeapGraphNode*>(to);
+}
+
+
+int HeapGraphPath::GetEdgesCount() const {
+ return reinterpret_cast<const i::HeapGraphPath*>(this)->path()->length();
+}
+
+
+const HeapGraphEdge* HeapGraphPath::GetEdge(int index) const {
+ return reinterpret_cast<const HeapGraphEdge*>(
+ reinterpret_cast<const i::HeapGraphPath*>(this)->path()->at(index));
+}
+
+
+const HeapGraphNode* HeapGraphPath::GetFromNode() const {
+ return GetEdgesCount() > 0 ? GetEdge(0)->GetFromNode() : NULL;
+}
+
+
+const HeapGraphNode* HeapGraphPath::GetToNode() const {
+ const int count = GetEdgesCount();
+ return count > 0 ? GetEdge(count - 1)->GetToNode() : NULL;
+}
+
+
+HeapGraphNode::Type HeapGraphNode::GetType() const {
+ IsDeadCheck("v8::HeapGraphNode::GetType");
+ return static_cast<HeapGraphNode::Type>(
+ reinterpret_cast<const i::HeapEntry*>(this)->type());
+}
+
+
+Handle<String> HeapGraphNode::GetName() const {
+ IsDeadCheck("v8::HeapGraphNode::GetName");
+ return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ reinterpret_cast<const i::HeapEntry*>(this)->name())));
+}
+
+
+int HeapGraphNode::GetSelfSize() const {
+ IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
+ return reinterpret_cast<const i::HeapEntry*>(this)->self_size();
+}
+
+
+int HeapGraphNode::GetTotalSize() const {
+ IsDeadCheck("v8::HeapSnapshot::GetHead");
+ return const_cast<i::HeapEntry*>(
+ reinterpret_cast<const i::HeapEntry*>(this))->TotalSize();
+}
+
+
+int HeapGraphNode::GetPrivateSize() const {
+ IsDeadCheck("v8::HeapSnapshot::GetPrivateSize");
+ return const_cast<i::HeapEntry*>(
+ reinterpret_cast<const i::HeapEntry*>(this))->NonSharedTotalSize();
+}
+
+
+int HeapGraphNode::GetChildrenCount() const {
+ IsDeadCheck("v8::HeapSnapshot::GetChildrenCount");
+ return reinterpret_cast<const i::HeapEntry*>(this)->children()->length();
+}
+
+
+const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
+ IsDeadCheck("v8::HeapSnapshot::GetChild");
+ return reinterpret_cast<const HeapGraphEdge*>(
+ reinterpret_cast<const i::HeapEntry*>(this)->children()->at(index));
+}
+
+
+int HeapGraphNode::GetRetainersCount() const {
+ IsDeadCheck("v8::HeapSnapshot::GetRetainersCount");
+ return reinterpret_cast<const i::HeapEntry*>(this)->retainers()->length();
+}
+
+
+const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
+ IsDeadCheck("v8::HeapSnapshot::GetRetainer");
+ return reinterpret_cast<const HeapGraphEdge*>(
+ reinterpret_cast<const i::HeapEntry*>(this)->retainers()->at(index));
+}
+
+
+int HeapGraphNode::GetRetainingPathsCount() const {
+ IsDeadCheck("v8::HeapSnapshot::GetRetainingPathsCount");
+ return const_cast<i::HeapEntry*>(
+ reinterpret_cast<const i::HeapEntry*>(
+ this))->GetRetainingPaths()->length();
+}
+
+
+const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
+ IsDeadCheck("v8::HeapSnapshot::GetRetainingPath");
+ return reinterpret_cast<const HeapGraphPath*>(
+ const_cast<i::HeapEntry*>(
+ reinterpret_cast<const i::HeapEntry*>(
+ this))->GetRetainingPaths()->at(index));
+}
+
+
+unsigned HeapSnapshot::GetUid() const {
+ IsDeadCheck("v8::HeapSnapshot::GetUid");
+ return reinterpret_cast<const i::HeapSnapshot*>(this)->uid();
+}
+
+
+Handle<String> HeapSnapshot::GetTitle() const {
+ IsDeadCheck("v8::HeapSnapshot::GetTitle");
+ const i::HeapSnapshot* snapshot =
+ reinterpret_cast<const i::HeapSnapshot*>(this);
+ return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ snapshot->title())));
+}
+
+
+const HeapGraphNode* HeapSnapshot::GetHead() const {
+ IsDeadCheck("v8::HeapSnapshot::GetHead");
+ const i::HeapSnapshot* snapshot =
+ reinterpret_cast<const i::HeapSnapshot*>(this);
+ return reinterpret_cast<const HeapGraphNode*>(snapshot->const_root());
+}
+
+
+int HeapProfiler::GetSnapshotsCount() {
+ IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount");
+ return i::HeapProfiler::GetSnapshotsCount();
+}
+
+
+const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
+ IsDeadCheck("v8::HeapProfiler::GetSnapshot");
+ return reinterpret_cast<const HeapSnapshot*>(
+ i::HeapProfiler::GetSnapshot(index));
+}
+
+
+const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
+ IsDeadCheck("v8::HeapProfiler::FindSnapshot");
+ return reinterpret_cast<const HeapSnapshot*>(
+ i::HeapProfiler::FindSnapshot(uid));
+}
+
+
+const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title) {
+ IsDeadCheck("v8::HeapProfiler::TakeSnapshot");
+ return reinterpret_cast<const HeapSnapshot*>(
+ i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title)));
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 8ca91265..114ec234 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -45,11 +45,6 @@
namespace v8 {
namespace internal {
-Condition NegateCondition(Condition cc) {
- ASSERT(cc != al);
- return static_cast<Condition>(cc ^ ne);
-}
-
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 025f28e5..f8d98db9 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -279,6 +279,25 @@ const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
+const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
+const Instr kMovMvnPattern = 0xd * B21;
+const Instr kMovMvnFlip = B22;
+const Instr kMovLeaveCCMask = 0xdff * B16;
+const Instr kMovLeaveCCPattern = 0x1a0 * B16;
+const Instr kMovwMask = 0xff * B20;
+const Instr kMovwPattern = 0x30 * B20;
+const Instr kMovwLeaveCCFlip = 0x5 * B21;
+const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
+const Instr kCmpCmnPattern = 0x15 * B20;
+const Instr kCmpCmnFlip = B21;
+const Instr kALUMask = 0x6f * B21;
+const Instr kAddPattern = 0x4 * B21;
+const Instr kSubPattern = 0x2 * B21;
+const Instr kBicPattern = 0xe * B21;
+const Instr kAndPattern = 0x0 * B21;
+const Instr kAddSubFlip = 0x6 * B21;
+const Instr kAndBicFlip = 0xe * B21;
+
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kRdMask = 0x0000f000;
static const int kRdShift = 12;
@@ -375,6 +394,12 @@ void Assembler::Align(int m) {
}
+void Assembler::CodeTargetAlign() {
+ // Preferred alignment of jump targets on some ARM chips.
+ Align(8);
+}
+
+
bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
@@ -626,7 +651,16 @@ void Assembler::next(Label* L) {
}
+static Instr EncodeMovwImmediate(uint32_t immediate) {
+ ASSERT(immediate < 0x10000);
+ return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+}
+
+
// Low-level code emission routines depending on the addressing mode.
+// If this returns true then you have to use the rotate_imm and immed_8
+// that it returns, because it may have already changed the instruction
+// to match them!
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
@@ -640,11 +674,43 @@ static bool fits_shifter(uint32_t imm32,
return true;
}
}
- // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
- if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= 0x2*B21;
- return true;
+ // If the opcode is one with a complementary version and the complementary
+ // immediate fits, change the opcode.
+ if (instr != NULL) {
+ if ((*instr & kMovMvnMask) == kMovMvnPattern) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kMovMvnFlip;
+ return true;
+ } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ if (imm32 < 0x10000) {
+ *instr ^= kMovwLeaveCCFlip;
+ *instr |= EncodeMovwImmediate(imm32);
+ *rotate_imm = *immed_8 = 0; // Not used for movw.
+ return true;
+ }
+ }
+ }
+ } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
+ if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kCmpCmnFlip;
+ return true;
+ }
+ } else {
+ Instr alu_insn = (*instr & kALUMask);
+ if (alu_insn == kAddPattern ||
+ alu_insn == kSubPattern) {
+ if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kAddSubFlip;
+ return true;
+ }
+ } else if (alu_insn == kAndPattern ||
+ alu_insn == kBicPattern) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kAndBicFlip;
+ return true;
+ }
+ }
}
}
return false;
@@ -655,7 +721,7 @@ static bool fits_shifter(uint32_t imm32,
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-static bool MustUseIp(RelocInfo::Mode rmode) {
+static bool MustUseConstantPool(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
@@ -670,6 +736,14 @@ static bool MustUseIp(RelocInfo::Mode rmode) {
}
+bool Operand::is_single_instruction() const {
+ if (rm_.is_valid()) return true;
+ if (MustUseConstantPool(rmode_)) return false;
+ uint32_t dummy1, dummy2;
+ return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
+}
+
+
void Assembler::addrmod1(Instr instr,
Register rn,
Register rd,
@@ -680,19 +754,34 @@ void Assembler::addrmod1(Instr instr,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (MustUseIp(x.rmode_) ||
+ if (MustUseConstantPool(x.rmode_) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]'.
- RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask);
if ((instr & ~CondMask) == 13*B21) { // mov, S not set
- ldr(rd, MemOperand(pc, 0), cond);
+ if (MustUseConstantPool(x.rmode_) ||
+ !CpuFeatures::IsSupported(ARMv7)) {
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ ldr(rd, MemOperand(pc, 0), cond);
+ } else {
+ // Will probably use movw, will certainly not use constant pool.
+ mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
+ movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ }
} else {
- ldr(ip, MemOperand(pc, 0), cond);
+ // If this is not a mov or mvn instruction we may still be able to avoid
+ // a constant pool entry by using mvn or movw.
+ if (!MustUseConstantPool(x.rmode_) &&
+ (instr & kMovMvnMask) != kMovMvnPattern) {
+ mov(ip, x, LeaveCC, cond);
+ } else {
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ ldr(ip, MemOperand(pc, 0), cond);
+ }
addrmod1(instr, rn, rd, Operand(ip));
}
return;
@@ -1003,6 +1092,17 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
}
+void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
+ ASSERT(immediate < 0x10000);
+ mov(reg, Operand(immediate), LeaveCC, cond);
+}
+
+
+void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
+ emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
+}
+
+
void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 14*B21 | s, src1, dst, src2);
@@ -1183,7 +1283,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (MustUseIp(src.rmode_) ||
+ if (MustUseConstantPool(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index e5d42f9a..869227a7 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -279,7 +279,10 @@ enum Condition {
// Returns the equivalent of !cc.
-INLINE(Condition NegateCondition(Condition cc));
+inline Condition NegateCondition(Condition cc) {
+ ASSERT(cc != al);
+ return static_cast<Condition>(cc ^ ne);
+}
// Corresponds to transposing the operands of a comparison.
@@ -418,6 +421,15 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
+ // Return true of this operand fits in one instruction so that no
+ // 2-instruction solution with a load into the ip register is necessary.
+ bool is_single_instruction() const;
+
+ inline int32_t immediate() const {
+ ASSERT(!rm_.is_valid());
+ return imm32_;
+ }
+
Register rm() const { return rm_; }
private:
@@ -532,6 +544,27 @@ extern const Instr kLdrPCPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
+extern const Instr kMovMvnMask;
+extern const Instr kMovMvnPattern;
+extern const Instr kMovMvnFlip;
+
+extern const Instr kMovLeaveCCMask;
+extern const Instr kMovLeaveCCPattern;
+extern const Instr kMovwMask;
+extern const Instr kMovwPattern;
+extern const Instr kMovwLeaveCCFlip;
+
+extern const Instr kCmpCmnMask;
+extern const Instr kCmpCmnPattern;
+extern const Instr kCmpCmnFlip;
+
+extern const Instr kALUMask;
+extern const Instr kAddPattern;
+extern const Instr kSubPattern;
+extern const Instr kAndPattern;
+extern const Instr kBicPattern;
+extern const Instr kAddSubFlip;
+extern const Instr kAndBicFlip;
class Assembler : public Malloced {
public:
@@ -670,6 +703,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
// Branch instructions
void b(int branch_offset, Condition cond = al);
@@ -748,6 +783,13 @@ class Assembler : public Malloced {
mov(dst, Operand(src), s, cond);
}
+ // ARMv7 instructions for loading a 32 bit immediate in two instructions.
+ // This may actually emit a different mov instruction, but on an ARMv7 it
+ // is guaranteed to only emit one instruction.
+ void movw(Register reg, uint32_t immediate, Condition cond = al);
+ // The constant for movt should be in the range 0-0xffff.
+ void movt(Register reg, uint32_t immediate, Condition cond = al);
+
void bic(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index ddbb9777..b1f29ba3 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -136,7 +136,8 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
- __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+ ASSERT(kSmiTag == 0);
+ __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
@@ -240,9 +241,10 @@ static void AllocateJSArray(MacroAssembler* masm,
FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
- __ and_(elements_array_storage,
- elements_array_storage,
- Operand(~kHeapObjectTagMask));
+ ASSERT(kSmiTag == 0);
+ __ sub(elements_array_storage,
+ elements_array_storage,
+ Operand(kHeapObjectTag));
// Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
@@ -617,12 +619,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ and_(r6,
- r0,
- Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
- __ add(r3, r3, Operand(r6, LSR, Map::kPreAllocatedPropertyFieldsByte * 8));
- __ and_(r6, r0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
- __ sub(r3, r3, Operand(r6, LSR, Map::kInObjectPropertiesByte * 8), SetCC);
+ __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
+ __ add(r3, r3, Operand(r6));
+ __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
+ __ sub(r3, r3, Operand(r6), SetCC);
// Done if no extra properties are to be allocated.
__ b(eq, &allocated);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 1ca236d1..8e87614c 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -268,8 +268,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Load the offset into r3.
int slot_offset =
FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(slot_offset));
- __ RecordWrite(r2, r3, r1);
+ __ RecordWrite(r2, Operand(slot_offset), r3, r1);
}
}
}
@@ -342,56 +341,27 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
}
- // Generate the return sequence if necessary.
- if (has_valid_frame() || function_return_.is_linked()) {
- if (!function_return_.is_linked()) {
- CodeForReturnPosition(info->function());
- }
- // exit
- // r0: result
- // sp: stack pointer
- // fp: frame pointer
- // cp: callee's context
+ // Handle the return from the function.
+ if (has_valid_frame()) {
+ // If there is a valid frame, control flow can fall off the end of
+ // the body. In that case there is an implicit return statement.
+ ASSERT(!function_return_is_shadowed_);
+ frame_->PrepareForReturn();
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-
+ if (function_return_.is_bound()) {
+ function_return_.Jump();
+ } else {
+ function_return_.Bind();
+ GenerateReturnSequence();
+ }
+ } else if (function_return_.is_linked()) {
+ // If the return target has dangling jumps to it, then we have not
+ // yet generated the return sequence. This can happen when (a)
+ // control does not flow off the end of the body so we did not
+ // compile an artificial return statement just above, and (b) there
+ // are return statements in the body but (c) they are all shadowed.
function_return_.Bind();
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns the parameter as it is.
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
-
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- // Make sure that the constant pool is not emitted inside of the return
- // sequence.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Tear down the frame which will restore the caller's frame pointer and
- // the link register.
- frame_->Exit();
-
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
-
-#ifdef DEBUG
- // Check that the size of the code used for returning matches what is
- // expected by the debugger. If the sp_delts above cannot be encoded in
- // the add instruction the add will generate two instructions.
- int return_sequence_length =
- masm_->InstructionsGeneratedSince(&check_exit_codesize);
- CHECK(return_sequence_length ==
- Assembler::kJSReturnSequenceInstructions ||
- return_sequence_length ==
- Assembler::kJSReturnSequenceInstructions + 1);
-#endif
- }
+ GenerateReturnSequence();
}
// Adjust for function-level loop nesting.
@@ -1203,7 +1173,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
frame_->EmitPush(tos, TypeInfo::Smi());
@@ -1215,7 +1185,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
deferred->BindExit();
@@ -1958,8 +1928,56 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
// returning thus making it easier to merge.
frame_->EmitPop(r0);
frame_->PrepareForReturn();
+ if (function_return_.is_bound()) {
+ // If the function return label is already bound we reuse the
+ // code by jumping to the return site.
+ function_return_.Jump();
+ } else {
+ function_return_.Bind();
+ GenerateReturnSequence();
+ }
+ }
+}
- function_return_.Jump();
+
+void CodeGenerator::GenerateReturnSequence() {
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns the parameter as it is.
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence.
+ { Assembler::BlockConstPoolScope block_const_pool(masm_);
+ // Tear down the frame which will restore the caller's frame pointer and
+ // the link register.
+ frame_->Exit();
+
+ // Here we use masm_-> instead of the __ macro to avoid the code coverage
+ // tool from instrumenting as we rely on the code size here.
+ int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+ masm_->add(sp, sp, Operand(sp_delta));
+ masm_->Jump(lr);
+ DeleteFrame();
+
+#ifdef DEBUG
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger. If the sp_delts above cannot be encoded in
+ // the add instruction the add will generate two instructions.
+ int return_sequence_length =
+ masm_->InstructionsGeneratedSince(&check_exit_codesize);
+ CHECK(return_sequence_length ==
+ Assembler::kJSReturnSequenceInstructions ||
+ return_sequence_length ==
+ Assembler::kJSReturnSequenceInstructions + 1);
+#endif
}
}
@@ -3090,9 +3108,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
exit.Branch(eq);
// scratch is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(offset));
// r1 could be identical with tos, but that doesn't matter.
- __ RecordWrite(scratch, r3, r1);
+ __ RecordWrite(scratch, Operand(offset), r3, r1);
}
// If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole
@@ -3445,8 +3462,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
__ str(r0, FieldMemOperand(r1, offset));
// Update the write barrier for the array address.
- __ mov(r3, Operand(offset));
- __ RecordWrite(r1, r3, r2);
+ __ RecordWrite(r1, Operand(offset), r3, r2);
}
ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -4069,28 +4085,34 @@ void CodeGenerator::VisitCall(Call* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Load(property->obj());
- if (!property->is_synthetic()) {
- // Duplicate receiver for later use.
- __ ldr(r0, MemOperand(sp, 0));
- frame_->EmitPush(r0);
- }
- Load(property->key());
- EmitKeyedLoad();
- // Put the function below the receiver.
if (property->is_synthetic()) {
+ Load(property->key());
+ EmitKeyedLoad();
+ // Put the function below the receiver.
// Use the global receiver.
frame_->EmitPush(r0); // Function.
LoadGlobalReceiver(r0);
+ // Call the function.
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
+ frame_->EmitPush(r0);
} else {
- // Switch receiver and function.
- frame_->EmitPop(r1); // Receiver.
- frame_->EmitPush(r0); // Function.
- frame_->EmitPush(r1); // Receiver.
- }
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- frame_->EmitPush(r0);
+ // Set the name register and call the IC initialization code.
+ Load(property->key());
+ frame_->EmitPop(r2); // Function name.
+
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeKeyedCallInitialize(arg_count, in_loop);
+ CodeForSourcePosition(node->position());
+ frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
+ __ ldr(cp, frame_->Context());
+ frame_->EmitPush(r0);
+ }
}
} else {
@@ -4254,8 +4276,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
// Store the value.
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier.
- __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
// Leave.
leave.Bind();
frame_->EmitPush(r0);
@@ -4685,7 +4706,8 @@ void CodeGenerator::GenerateRandomHeapNumber(
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
- __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
@@ -6628,8 +6650,12 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
// Gets the wrong answer for 0, but we already checked for that case above.
__ CountLeadingZeros(source_, mantissa, zeros_);
// Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here.
- __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
+ // We use mantissa as a scratch register here. Use a fudge factor to
+ // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
+ // that fit in the ARM's constant field.
+ int fudge = 0x400;
+ __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
+ __ add(mantissa, mantissa, Operand(fudge));
__ orr(exponent,
exponent,
Operand(mantissa, LSL, HeapNumber::kExponentShift));
@@ -6702,15 +6728,12 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
- Register exp_mask_reg = r5;
__ cmp(r0, r1);
__ b(ne, &not_identical);
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
if (cc != eq || !never_nan_nan) {
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
-
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
@@ -6771,8 +6794,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Read top bits of double representation (second word of value).
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
- __ and_(r3, r2, Operand(exp_mask_reg));
- __ cmp(r3, Operand(exp_mask_reg));
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
__ b(ne, &return_equal);
// Shift out flag and all exponent bits, retaining only mantissa.
@@ -6893,14 +6917,14 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
Register rhs_mantissa = exp_first ? r1 : r0;
Register lhs_mantissa = exp_first ? r3 : r2;
Label one_is_nan, neither_is_nan;
- Label lhs_not_nan_exp_mask_is_loaded;
-
- Register exp_mask_reg = r5;
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
- __ cmp(r4, Operand(exp_mask_reg));
- __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
+ __ Sbfx(r4,
+ lhs_exponent,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r4, Operand(-1));
+ __ b(ne, lhs_not_nan);
__ mov(r4,
Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
@@ -6909,10 +6933,12 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
__ b(ne, &one_is_nan);
__ bind(lhs_not_nan);
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ bind(&lhs_not_nan_exp_mask_is_loaded);
- __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
- __ cmp(r4, Operand(exp_mask_reg));
+ __ Sbfx(r4,
+ rhs_exponent,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r4, Operand(-1));
__ b(ne, &neither_is_nan);
__ mov(r4,
Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
@@ -7178,7 +7204,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::Generate(MacroAssembler* masm) {
- __ RecordWriteHelper(object_, offset_, scratch_);
+ __ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_);
__ Ret();
}
@@ -7338,12 +7364,16 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
+ Register heap_number_map = r6;
if (ShouldGenerateSmiCode()) {
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
// Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate.
- // The new heap number is in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow);
+ // The new heap number is in r5. r3 and r7 are scratch.
+ __ AllocateHeapNumber(
+ r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
@@ -7356,14 +7386,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ vmov(s13, r7);
__ vcvt_f64_s32(d6, s13);
} else {
- // Write Smi from rhs to r3 and r2 in double format. r6 is scratch.
+ // Write Smi from rhs to r3 and r2 in double format. r3 is scratch.
__ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ ConvertToDoubleStub stub1(r3, r2, r7, r9);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from lhs to r1 and r0 in double format. r6 is scratch.
+ // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
__ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ ConvertToDoubleStub stub2(r1, r0, r7, r9);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
@@ -7372,6 +7402,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
// We branch here if at least one of r0 and r1 is not a Smi.
__ bind(not_smi);
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// After this point we have the left hand side in r1 and the right hand side
// in r0.
@@ -7394,18 +7425,22 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
default:
break;
}
+ // Restore heap number map register.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
if (mode_ == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
}
// Move r0 to a double in r2-r3.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
@@ -7423,7 +7458,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&r0_is_smi);
if (mode_ == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (use_fp_registers) {
@@ -7435,7 +7470,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r6);
+ ConvertToDoubleStub stub3(r3, r2, r7, r4);
__ push(lr);
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
@@ -7448,6 +7483,8 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm);
+ // Restore heap number map register.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&r1_is_smi);
}
@@ -7457,7 +7494,9 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ bind(&r1_is_not_smi);
- __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
@@ -7475,7 +7514,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&r1_is_smi);
if (mode_ == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (use_fp_registers) {
@@ -7487,7 +7526,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r6);
+ ConvertToDoubleStub stub4(r1, r0, r7, r9);
__ push(lr);
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
@@ -7548,13 +7587,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
}
}
-
if (lhs.is(r0)) {
__ b(&slow);
__ bind(&slow_reverse);
__ Swap(r0, r1, ip);
}
+ heap_number_map = no_reg; // Don't use this any more from here on.
+
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
@@ -7633,7 +7673,10 @@ static void GetInt32(MacroAssembler* masm,
// Get exponent word.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
- __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+ __ Ubfx(scratch2,
+ scratch,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
__ mov(dest, Operand(0));
@@ -7641,9 +7684,14 @@ static void GetInt32(MacroAssembler* masm,
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
// the exponent that we are fastest at and also the highest exponent we can
// handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(scratch2, Operand(non_smi_exponent));
+ const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
+ // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
+ // split it up to avoid a constant pool entry. You can't do that in general
+ // for cmp because of the overflow flag, but we know the exponent is in the
+ // range 0-2047 so there is no overflow.
+ int fudge_factor = 0x400;
+ __ sub(scratch2, scratch2, Operand(fudge_factor));
+ __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
// If we have a match of the int32-but-not-Smi exponent then skip some logic.
__ b(eq, &right_exponent);
// If the exponent is higher than that then go to slow case. This catches
@@ -7653,17 +7701,14 @@ static void GetInt32(MacroAssembler* masm,
// We know the exponent is smaller than 30 (biased). If it is less than
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
// it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
+ const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
+ __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
if (!CpuFeatures::IsSupported(VFP3)) {
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- __ rsb(dest, dest, Operand(30));
+ // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
+ // get how much to shift down.
+ __ rsb(dest, scratch2, Operand(30));
}
__ bind(&right_exponent);
if (CpuFeatures::IsSupported(VFP3)) {
@@ -7715,9 +7760,13 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
Label rhs_is_smi, lhs_is_smi;
Label done_checking_rhs, done_checking_lhs;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
__ tst(lhs, Operand(kSmiTagMask));
__ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, lhs, r3, r5, r4, &slow);
__ jmp(&done_checking_lhs);
@@ -7727,7 +7776,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ tst(rhs, Operand(kSmiTagMask));
__ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, rhs, r2, r5, r4, &slow);
__ jmp(&done_checking_rhs);
@@ -7787,8 +7837,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
break;
}
case NO_OVERWRITE: {
- // Get a new heap number in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
default: break;
}
@@ -7807,8 +7857,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate);
- // Get a new heap number in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
__ jmp(&got_a_heap_number);
}
@@ -7934,10 +7984,11 @@ const char* GenericBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s",
+ "GenericBinaryOpStub_%s_%s%s_%s",
op_name,
overwrite_name,
- specialized_on_rhs_ ? "_ConstantRhs" : 0);
+ specialized_on_rhs_ ? "_ConstantRhs" : "",
+ BinaryOpIC::GetName(runtime_operands_type_));
return name_;
}
@@ -8130,6 +8181,28 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
__ Ret();
__ bind(&smi_is_unsuitable);
+ } else if (op_ == Token::MOD &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS) {
+ // Do generate a bit of smi code for modulus even though the default for
+ // modulus is not to do it, but as the ARM processor has no coprocessor
+ // support for modulus checking for smis makes sense.
+ Label slow;
+ ASSERT(!ShouldGenerateSmiCode());
+ ASSERT(kSmiTag == 0); // Adjust code below.
+ // Check for two positive smis.
+ __ orr(smi_test_reg, lhs, Operand(rhs));
+ __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
+ __ b(ne, &slow);
+ // Check that rhs is a power of two and not zero.
+ __ sub(scratch, rhs, Operand(1), SetCC);
+ __ b(mi, &slow);
+ __ tst(rhs, scratch);
+ __ b(ne, &slow);
+ // Calculate power of two modulus.
+ __ and_(result, lhs, Operand(scratch));
+ __ Ret();
+ __ bind(&slow);
}
HandleBinaryOpSlowCases(
masm,
@@ -8276,20 +8349,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&loaded);
// r2 = low 32 bits of double value
// r3 = high 32 bits of double value
- // Compute hash:
+ // Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, LSR, 16));
- __ eor(r1, r1, Operand(r1, LSR, 8));
+ __ eor(r1, r1, Operand(r1, ASR, 16));
+ __ eor(r1, r1, Operand(r1, ASR, 8));
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- if (CpuFeatures::IsSupported(ARMv7)) {
- const int kTranscendentalCacheSizeBits = 9;
- ASSERT_EQ(1 << kTranscendentalCacheSizeBits,
- TranscendentalCache::kCacheSize);
- __ ubfx(r1, r1, 0, kTranscendentalCacheSizeBits);
- } else {
- __ and_(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
- }
+ __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
// r2 = low 32 bits of double value.
// r3 = high 32 bits of double value.
@@ -8364,6 +8430,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
if (op_ == Token::SUB) {
// Check whether the value is a smi.
Label try_float;
@@ -8384,7 +8453,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(&done);
__ bind(&try_float);
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
__ b(ne, &slow);
// r0 is a heap number. Get a new heap number in r1.
if (overwrite_) {
@@ -8392,7 +8463,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else {
- __ AllocateHeapNumber(r1, r2, r3, &slow);
+ __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
@@ -8402,7 +8473,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
}
} else if (op_ == Token::BIT_NOT) {
// Check if the operand is a heap number.
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
@@ -8422,7 +8495,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
// Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case
// that needs the value in r0.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ mov(r0, Operand(r2));
}
@@ -9248,15 +9321,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// regexp_data: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
Label seq_string;
- const int kStringRepresentationEncodingMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- __ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
- // First check for sequential string.
- ASSERT_EQ(0, kStringTag);
- ASSERT_EQ(0, kSeqStringTag);
- __ tst(r1, Operand(kIsNotStringMask | kStringRepresentationMask));
+ // First check for flat string.
+ __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
+ ASSERT_EQ(0, kStringTag | kSeqStringTag);
__ b(eq, &seq_string);
// subject: Subject string
@@ -9266,8 +9335,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
- __ and_(r0, r0, Operand(kStringRepresentationMask));
- __ cmp(r0, Operand(kConsStringTag));
+ ASSERT(kExternalStringTag !=0);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
__ b(ne, &runtime);
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
__ LoadRoot(r1, Heap::kEmptyStringRootIndex);
@@ -9276,25 +9346,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ // Is first part a flat string?
ASSERT_EQ(0, kSeqStringTag);
__ tst(r0, Operand(kStringRepresentationMask));
__ b(nz, &runtime);
- __ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
__ bind(&seq_string);
- // r1: suject string type & kStringRepresentationEncodingMask
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
- // Check that the irregexp code has been generated for an ascii string. If
- // it has, the field contains a code object otherwise it contains the hole.
-#ifdef DEBUG
- const int kSeqAsciiString = kStringTag | kSeqStringTag | kAsciiStringTag;
- const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
- CHECK_EQ(4, kSeqAsciiString);
- CHECK_EQ(0, kSeqTwoByteString);
-#endif
+ // r0: Instance type of subject string
+ ASSERT_EQ(4, kAsciiStringTag);
+ ASSERT_EQ(0, kTwoByteStringTag);
// Find the code object based on the assumptions above.
- __ mov(r3, Operand(r1, ASR, 2), SetCC);
+ __ and_(r0, r0, Operand(kStringEncodingMask));
+ __ mov(r3, Operand(r0, ASR, 2), SetCC);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
@@ -9412,17 +9477,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
__ mov(r3, last_match_info_elements); // Moved up to reduce latency.
- __ mov(r2, Operand(RegExpImpl::kLastSubjectOffset)); // Ditto.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, r2, r7);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
__ mov(r3, last_match_info_elements);
- __ mov(r2, Operand(RegExpImpl::kLastInputOffset));
- __ RecordWrite(r3, r2, r7);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -10524,13 +10587,14 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
- Label non_ascii, allocated;
+ Label non_ascii, allocated, ascii_data;
ASSERT_EQ(0, kTwoByteStringTag);
__ tst(r4, Operand(kStringEncodingMask));
__ tst(r5, Operand(kStringEncodingMask), ne);
__ b(eq, &non_ascii);
// Allocate an ASCII cons string.
+ __ bind(&ascii_data);
__ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
@@ -10542,6 +10606,19 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // r4: first instance type.
+ // r5: second instance type.
+ __ tst(r4, Operand(kAsciiDataHintMask));
+ __ tst(r5, Operand(kAsciiDataHintMask), ne);
+ __ b(ne, &ascii_data);
+ __ eor(r4, r4, Operand(r5));
+ ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ b(eq, &ascii_data);
+
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
__ jmp(&allocated);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 91adff0f..be4d5561 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -315,6 +315,12 @@ class CodeGenerator: public AstVisitor {
// Main code generation function
void Generate(CompilationInfo* info);
+ // Generate the return sequence code. Should be called no more than
+ // once per compiled function, immediately after binding the return
+ // target (which can not be done more than once). The return value should
+ // be in r0.
+ void GenerateReturnSequence();
+
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode();
@@ -663,7 +669,9 @@ class GenericBinaryOpStub : public CodeStub {
}
void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index e36f595c..fa9adbd7 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -284,6 +284,9 @@ class Instr {
// with immediate
inline int RotateField() const { return Bits(11, 8); }
inline int Immed8Field() const { return Bits(7, 0); }
+ inline int Immed4Field() const { return Bits(19, 16); }
+ inline int ImmedMovwMovtField() const {
+ return Immed4Field() << 12 | Offset12Field(); }
// Fields used in Load/Store instructions
inline int PUField() const { return Bits(24, 23); }
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 1c05bc3a..40053699 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -101,6 +101,7 @@ class Decoder {
void PrintSRegister(int reg);
void PrintDRegister(int reg);
int FormatVFPRegister(Instr* instr, const char* format);
+ void PrintMovwMovt(Instr* instr);
int FormatVFPinstruction(Instr* instr, const char* format);
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
@@ -375,6 +376,16 @@ int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
}
+// Print the movw or movt instruction.
+void Decoder::PrintMovwMovt(Instr* instr) {
+ int imm = instr->ImmedMovwMovtField();
+ int rd = instr->RdField();
+ PrintRegister(rd);
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", imm);
+}
+
+
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
@@ -430,7 +441,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
return 1;
}
case 'm': {
- if (format[1] == 'e') { // 'memop: load/store instructions
+ if (format[1] == 'w') {
+ // 'mw: movt/movw instructions.
+ PrintMovwMovt(instr);
+ return 2;
+ }
+ if (format[1] == 'e') { // 'memop: load/store instructions.
ASSERT(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) {
Print("ldr");
@@ -776,7 +792,7 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "tst'cond 'rn, 'shift_op");
} else {
- Unknown(instr); // not used by V8
+ Format(instr, "movw'cond 'mw");
}
break;
}
@@ -794,7 +810,7 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "cmp'cond 'rn, 'shift_op");
} else {
- Unknown(instr); // not used by V8
+ Format(instr, "movt'cond 'mw");
}
break;
}
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 48eaf46a..36ac2aa3 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -102,8 +102,7 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
}
if (needs_write_barrier) {
- __ mov(scratch1(), Operand(offset));
- __ RecordWrite(scratch0(), scratch1(), scratch2());
+ __ RecordWrite(scratch0(), Operand(offset), scratch1(), scratch2());
}
if (destination().is(accumulator1())) {
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index e6196639..67328738 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -110,10 +110,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
__ mov(r1, Operand(Context::SlotOffset(slot->index())));
__ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
+ // registers, so we have to use two more registers to avoid
// clobbering cp.
__ mov(r2, Operand(cp));
- __ RecordWrite(r2, r1, r0);
+ __ RecordWrite(r2, Operand(r1), r3, r0);
}
}
}
@@ -666,8 +666,10 @@ void FullCodeGenerator::Move(Slot* dst,
__ str(src, location);
// Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) {
- __ mov(scratch2, Operand(Context::SlotOffset(dst->index())));
- __ RecordWrite(scratch1, scratch2, src);
+ __ RecordWrite(scratch1,
+ Operand(Context::SlotOffset(dst->index())),
+ scratch2,
+ src);
}
}
@@ -715,10 +717,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ str(result_register(),
CodeGenerator::ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index());
- __ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
__ mov(r1, Operand(cp));
- __ RecordWrite(r1, r2, result_register());
+ __ RecordWrite(r1, Operand(offset), r2, result_register());
}
break;
@@ -1252,8 +1253,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Update the write barrier for the array store with r0 as the scratch
// register.
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, result_register());
+ __ RecordWrite(r1, Operand(offset), r2, result_register());
}
if (result_saved) {
@@ -1493,8 +1493,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(offset), r2, r3);
break;
}
@@ -1648,6 +1647,30 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
}
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key,
+ RelocInfo::Mode mode) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ VisitForValue(key, kAccumulator);
+ __ mov(r2, r0);
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
+ in_loop);
+ __ Call(ic, mode);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ Apply(context_, r0);
+}
+
+
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
@@ -1743,35 +1766,28 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
- // Call to a keyed property, use keyed load IC followed by function
- // call.
+ // Call to a keyed property.
+ // For a synthetic property use keyed load IC followed by function call,
+ // for a regular property use keyed CallIC.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
if (prop->is_synthetic()) {
+ VisitForValue(prop->key(), kAccumulator);
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
__ pop(r1); // We do not need to keep the receiver.
- } else {
- __ ldr(r1, MemOperand(sp, 0)); // Keep receiver, to call function on.
- }
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- if (prop->is_synthetic()) {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
// Push result (function).
__ push(r0);
// Push Global receiver.
__ ldr(r1, CodeGenerator::GlobalObject());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ push(r1);
+ EmitCallWithStub(expr);
} else {
- // Pop receiver.
- __ pop(r1);
- // Push result (function).
- __ push(r0);
- __ push(r1);
+ EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
- EmitCallWithStub(expr);
}
} else {
// Call to some other expression. If the expression is an anonymous
@@ -2140,7 +2156,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
- __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
@@ -2259,8 +2276,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ bind(&done);
Apply(context_, r0);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index d0a32e81..c6de4d8e 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -167,16 +167,22 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register key,
+ Register result,
Register t0,
Register t1,
Register t2) {
// Register use:
//
- // elements - holds the slow-case elements of the receiver and is unchanged.
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
//
- // key - holds the smi key on entry and is unchanged if a branch is
- // performed to the miss label.
- // Holds the result on exit if the load succeeded.
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
//
// Scratch registers:
//
@@ -248,7 +254,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
- __ ldr(key, FieldMemOperand(t2, kValueOffset));
+ __ ldr(result, FieldMemOperand(t2, kValueOffset));
}
@@ -298,22 +304,159 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
}
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+ // Check that the object isn't a smi.
+ __ BranchOnSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+ __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ b(ne, slow);
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ cmp(scratch1, Operand(JS_OBJECT_TYPE));
+ __ b(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - holds the elements of the receiver on exit.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used to hold elements map and elements length.
+ // Holds the elements map if not_fast_array branch is taken.
+ //
+ // scratch2 - used to hold the loaded value.
+
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(scratch1, ip);
+ __ b(ne, not_fast_array);
+ // Check that the key (index) is within bounds.
+ __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(scratch1));
+ __ b(hs, out_of_range);
+ // Fast case: Do the load.
+ __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // The key is a smi.
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ ldr(scratch2,
+ MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, ip);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ b(eq, out_of_range);
+ __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if a key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_symbol) {
+ // The key is not a smi.
+ // Is it a string?
+ __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
+ __ b(ge, not_symbol);
+
+ // Is the string an array index, with cached numeric value?
+ __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
+ __ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
+ __ b(eq, index_string);
+
+ // Is the string a symbol?
+ // map: key map
+ __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kSymbolTag != 0);
+ __ tst(hash, Operand(kIsSymbolMask));
+ __ b(eq, not_symbol);
+}
+
+
+// Picks out an array index from the hash field.
+static void GenerateIndexFromHash(MacroAssembler* masm,
+ Register key,
+ Register hash) {
+ // Register use:
+ // key - holds the overwritten key on exit.
+ // hash - holds the key's hash. Clobbered.
+
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ // Here we actually clobber the key which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ __ mov(key, Operand(hash, LSL, kSmiTagSize));
+}
+
+
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind) {
// ----------- S t a t e -------------
+ // -- r1 : receiver
// -- r2 : name
- // -- lr : return address
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
// Probe the stub cache.
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// If the stub cache probing failed, the receiver might be a value.
@@ -355,9 +498,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
- // Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
}
@@ -390,7 +531,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -443,13 +584,11 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ CheckAccessGlobalProxy(r1, r0, &miss);
__ b(&invoke);
- // Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -465,7 +604,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
__ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
+ __ mov(r1, Operand(ExternalReference(IC_Utility(id))));
CEntryStub stub(1);
__ CallStub(&stub);
@@ -496,18 +635,165 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+}
+
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into r1.
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
+ GenerateMiss(masm, argc);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
+ GenerateMiss(masm, argc);
+}
+
+
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- UNREACHABLE();
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- UNREACHABLE();
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into r1.
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+
+ Label do_call, slow_call, slow_load, slow_reload_receiver;
+ Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+ Label index_smi, index_string;
+
+ // Check that the key is a smi.
+ __ BranchOnNotSmi(r2, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &slow_call);
+
+ GenerateFastArrayLoad(
+ masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
+ __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1, r0, r3);
+
+ __ bind(&do_call);
+ // receiver in r1 is not used after this point.
+ // r2: key
+ // r1: function
+
+ // Check that the value in r1 is a JSFunction.
+ __ BranchOnSmi(r1, &slow_call);
+ __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
+ __ b(ne, &slow_call);
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+
+ __ bind(&check_number_dictionary);
+ // r2: key
+ // r3: elements map
+ // r4: elements
+ // Check whether the elements is a number dictionary.
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r3, ip);
+ __ b(ne, &slow_load);
+ __ mov(r0, Operand(r2, ASR, kSmiTagSize));
+ // r0: untagged index
+ GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
+ __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1, r0, r3);
+ __ jmp(&do_call);
+
+ __ bind(&slow_load);
+ // This branch is taken when calling KeyedCallIC_Miss is neither required
+ // nor beneficial.
+ __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1, r0, r3);
+ __ EnterInternalFrame();
+ __ push(r2); // save the key
+ __ Push(r1, r2); // pass the receiver and the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(r2); // restore the key
+ __ LeaveInternalFrame();
+ __ mov(r1, r0);
+ __ jmp(&do_call);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
+
+ // The key is known to be a symbol.
+ // If the receiver is a regular JS object with slow properties then do
+ // a quick inline probe of the receiver's dictionary.
+ // Otherwise do the monomorphic cache probe.
+ GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &lookup_monomorphic_cache);
+
+ __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+ __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r3, ip);
+ __ b(ne, &lookup_monomorphic_cache);
+
+ GenerateDictionaryLoad(
+ masm, &slow_load, r1, r2, r1, r0, r3, r4, DICTIONARY_CHECK_DONE);
+ __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3);
+ __ jmp(&do_call);
+
+ __ bind(&lookup_monomorphic_cache);
+ __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1, r0, r3);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+ // Fall through on miss.
+
+ __ bind(&slow_call);
+ // This branch is taken if:
+ // - the receiver requires boxing or access check,
+ // - the key is neither smi nor symbol,
+ // - the value loaded is not a function,
+ // - there is hope that the runtime will create a monomorphic call stub
+ // that will get fetched next time.
+ __ IncrementCounter(&Counters::keyed_call_generic_slow, 1, r0, r3);
+ GenerateMiss(masm, argc);
+
+ __ bind(&index_string);
+ GenerateIndexFromHash(masm, r2, r3);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- UNREACHABLE();
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
+ GenerateMiss(masm, argc);
}
@@ -759,49 +1045,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Register key = r0;
Register receiver = r1;
- // Check that the object isn't a smi.
- __ BranchOnSmi(receiver, &slow);
- // Get the map of the receiver.
- __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r3, Operand(kSlowCaseBitFieldMask));
- __ b(ne, &slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(JS_OBJECT_TYPE));
- __ b(lt, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r3, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
- __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &check_pixel_array);
- // Check that the key (index) is within bounds.
- __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ cmp(key, Operand(r3));
- __ b(hs, &slow);
- // Fast case: Do the load.
- __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(r2, MemOperand(r3, key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ b(eq, &slow);
- __ mov(r0, r2);
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
__ Ret();
@@ -831,7 +1084,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &slow);
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
- GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
+ GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
@@ -840,24 +1093,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
- // The key is not a smi.
- // Is it a string?
- // r0: key
- // r1: receiver
- __ CompareObjectType(r0, r2, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, &slow);
-
- // Is the string an array index, with cached numeric value?
- __ ldr(r3, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r3, Operand(String::kContainsCachedArrayIndexMask));
- __ b(eq, &index_string);
-
- // Is the string a symbol?
- // r2: key map
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- ASSERT(kSymbolTag != 0);
- __ tst(r3, Operand(kIsSymbolMask));
- __ b(eq, &slow);
+ GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
@@ -873,7 +1109,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
- __ and_(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
+ __ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
@@ -918,25 +1154,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
__ Ret();
- __ b(&slow);
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- // r0: key (string)
- // r1: receiver
- // r3: hash field
- // We want the smi-tagged index in r0. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ and_(r3, r3, Operand(String::kArrayIndexValueMask));
- // Here we actually clobber the key (r0) which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- __ mov(r0, Operand(r3, ASR, String::kHashShift - kSmiTagSize));
+ GenerateIndexFromHash(masm, key, r3);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1120,7 +1339,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Use r0 for result as key is not needed any more.
- __ AllocateHeapNumber(r0, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r0, r3, r4, r6, &slow);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
@@ -1151,7 +1371,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
@@ -1188,7 +1409,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
- __ AllocateHeapNumber(r4, r5, r6, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
@@ -1204,7 +1426,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
@@ -1215,7 +1438,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
- __ AllocateHeapNumber(r3, r4, r5, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
@@ -1473,7 +1697,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ Ret(eq);
// Update write barrier for the elements array address.
__ sub(r4, r5, Operand(elements));
- __ RecordWrite(elements, r4, r5);
+ __ RecordWrite(elements, Operand(r4), r5, r6);
__ Ret();
}
@@ -1665,32 +1889,29 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
if (array_type == kExternalFloatArray) {
+ // vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
__ vcvt_f32_f64(s0, d0);
__ vmov(r5, s0);
__ str(r5, MemOperand(r3, r4, LSL, 2));
} else {
- Label done;
-
// Need to perform float-to-int conversion.
- // Test for NaN.
- __ vcmp(d0, d0);
- // Move vector status bits to normal status bits.
- __ vmrs(v8::internal::pc);
- __ mov(r5, Operand(0), LeaveCC, vs); // NaN converts to 0.
- __ b(vs, &done);
-
- // Test whether exponent equal to 0x7FF (infinity or NaN).
- __ vmov(r6, r7, d0);
- __ mov(r5, Operand(0x7FF00000));
- __ and_(r6, r6, Operand(r5));
- __ teq(r6, Operand(r5));
- __ mov(r6, Operand(0), LeaveCC, eq);
+ // Test for NaN or infinity (both give zero).
+ __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
+
+ // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+
+ __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs and Infinities have all-one exponents so they sign extend to -1.
+ __ cmp(r6, Operand(-1));
+ __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
@@ -1698,10 +1919,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
} else {
__ vcvt_u32_f64(s0, d0, ne);
}
-
__ vmov(r5, s0, ne);
- __ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6292b581..29e168c5 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -216,6 +216,71 @@ void MacroAssembler::Move(Register dst, Register src) {
}
+void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
+ Condition cond) {
+ if (!CpuFeatures::IsSupported(ARMv7) || src2.is_single_instruction()) {
+ and_(dst, src1, src2, LeaveCC, cond);
+ return;
+ }
+ int32_t immediate = src2.immediate();
+ if (immediate == 0) {
+ mov(dst, Operand(0), LeaveCC, cond);
+ return;
+ }
+ if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) {
+ ubfx(dst, src1, 0, WhichPowerOf2(immediate + 1), cond);
+ return;
+ }
+ and_(dst, src1, src2, LeaveCC, cond);
+}
+
+
+void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
+ Condition cond) {
+ ASSERT(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ and_(dst, src1, Operand(mask), LeaveCC, cond);
+ if (lsb != 0) {
+ mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
+ }
+ } else {
+ ubfx(dst, src1, lsb, width, cond);
+ }
+}
+
+
+void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
+ Condition cond) {
+ ASSERT(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ and_(dst, src1, Operand(mask), LeaveCC, cond);
+ int shift_up = 32 - lsb - width;
+ int shift_down = lsb + shift_up;
+ if (shift_up != 0) {
+ mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
+ }
+ if (shift_down != 0) {
+ mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
+ }
+ } else {
+ sbfx(dst, src1, lsb, width, cond);
+ }
+}
+
+
+void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
+ ASSERT(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ bic(dst, dst, Operand(mask));
+ } else {
+ bfc(dst, lsb, width, cond);
+ }
+}
+
+
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
@@ -245,31 +310,32 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::RecordWriteHelper(Register object,
- Register offset,
- Register scratch) {
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
if (FLAG_debug_code) {
// Check that the object is not in new space.
Label not_in_new_space;
- InNewSpace(object, scratch, ne, &not_in_new_space);
+ InNewSpace(object, scratch1, ne, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
}
- mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once.
-
- // Calculate region number.
- add(offset, object, Operand(offset)); // Add offset into the object.
- and_(offset, offset, Operand(ip)); // Offset into page of the object.
- mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
+ // Add offset into the object.
+ add(scratch0, object, offset);
// Calculate page address.
- bic(object, object, Operand(ip));
+ Bfc(object, 0, kPageSizeBits);
+
+ // Calculate region number.
+ Ubfx(scratch0, scratch0, Page::kRegionSizeLog2,
+ kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
- ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
- orr(scratch, scratch, Operand(ip, LSL, offset));
- str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ orr(scratch1, scratch1, Operand(ip, LSL, scratch0));
+ str(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
}
@@ -287,21 +353,23 @@ void MacroAssembler::InNewSpace(Register object,
// Will clobber 4 registers: object, offset, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object, Register offset,
- Register scratch) {
+void MacroAssembler::RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
Label done;
// First, test that the object is not in the new space. We cannot set
// region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
+ InNewSpace(object, scratch0, eq, &done);
// Record the actual write.
- RecordWriteHelper(object, offset, scratch);
+ RecordWriteHelper(object, offset, scratch0, scratch1);
bind(&done);
@@ -309,8 +377,8 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(offset, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
}
}
@@ -1460,6 +1528,16 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
}
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index) {
+ if (FLAG_debug_code) {
+ LoadRoot(ip, index);
+ cmp(reg, ip);
+ Check(eq, "Register did not match expected root");
+ }
+}
+
+
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
b(cc, &L);
@@ -1578,6 +1656,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
+ Register heap_number_map,
Label* gc_required) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
@@ -1588,9 +1667,9 @@ void MacroAssembler::AllocateHeapNumber(Register result,
gc_required,
TAG_OBJECT);
- // Get heap number map and store it in the allocated object.
- LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ // Store heap number map in the allocated object.
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 87f7b5fe..e02a6c8a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -93,6 +93,15 @@ class MacroAssembler: public Assembler {
Register scratch = no_reg,
Condition cond = al);
+
+ void And(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+ void Ubfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+ void Sbfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+ void Bfc(Register dst, int lsb, int width, Condition cond = al);
+
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// May do nothing if the registers are identical.
@@ -119,13 +128,19 @@ class MacroAssembler: public Assembler {
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object, Register offset, Register scratch);
+ void RecordWriteHelper(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
- // The 'scratch' register is used in the implementation and all 3 registers
+ // The 'scratch' registers are used in the implementation and all 3 registers
// are clobbered by the operation, as well as the ip register.
- void RecordWrite(Register object, Register offset, Register scratch);
+ void RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
@@ -364,6 +379,7 @@ class MacroAssembler: public Assembler {
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
+ Register heap_number_map,
Label* gc_required);
// ---------------------------------------------------------------------------
@@ -543,6 +559,7 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
+ void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 3bdca38e..77776c2b 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1859,7 +1859,9 @@ void Simulator::DecodeType01(Instr* instr) {
SetNZFlags(alu_out);
SetCFlag(shifter_carry_out);
} else {
- UNIMPLEMENTED();
+ // Format(instr, "movw'cond 'rd, 'imm").
+ alu_out = instr->ImmedMovwMovtField();
+ set_register(rd, alu_out);
}
break;
}
@@ -1888,7 +1890,10 @@ void Simulator::DecodeType01(Instr* instr) {
SetCFlag(!BorrowFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
} else {
- UNIMPLEMENTED();
+ // Format(instr, "movt'cond 'rd, 'imm").
+ alu_out = (get_register(rd) & 0xffff) |
+ (instr->ImmedMovwMovtField() << 16);
+ set_register(rd, alu_out);
}
break;
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 3992d6c5..3e5ba112 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -336,9 +336,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ b(eq, &exit);
// Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(offset));
- __ RecordWrite(receiver_reg, name_reg, scratch);
+ // Pass the now unused name_reg as a scratch register.
+ __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -352,8 +351,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, Operand(offset));
- __ RecordWrite(scratch, name_reg, receiver_reg);
+ __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
}
// Return the value (register r0).
@@ -1019,6 +1017,14 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+ if (kind_ == Code::KEYED_CALL_IC) {
+ __ cmp(r2, Operand(Handle<String>(name)));
+ __ b(ne, miss);
+ }
+}
+
+
void CallStubCompiler::GenerateMissBranch() {
Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1035,6 +1041,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
const int argc = arguments().immediate();
// Get the receiver of the function from the stack into r0.
@@ -1078,6 +1086,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
@@ -1127,6 +1137,8 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
@@ -1198,6 +1210,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
Label miss_in_smi_check;
+ GenerateNameCheck(name, &miss_in_smi_check);
+
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
@@ -1337,6 +1351,8 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -1384,6 +1400,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the number of arguments.
const int argc = arguments().immediate();
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 334ca35d..8b90f424 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -367,6 +367,7 @@ void VirtualFrame::CallCodeObject(Handle<Code> code,
int dropped_args) {
switch (code->kind()) {
case Code::CALL_IC:
+ case Code::KEYED_CALL_IC:
case Code::FUNCTION:
break;
case Code::KEYED_LOAD_IC:
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index e97ad496..d8dc5c6c 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -212,10 +212,9 @@ class VirtualFrame : public ZoneObject {
void Enter();
void Exit();
- // Prepare for returning from the frame by spilling locals and
- // dropping all non-locals elements in the virtual frame. This
+ // Prepare for returning from the frame by elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
+ // shared return site. No spill code emitted. Value to return should be in r0.
inline void PrepareForReturn();
// Number of local variables after when we use a loop for allocating.
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 08741311..bbd69eca 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1462,6 +1462,7 @@ bool Genesis::InstallExtensions(Handle<Context> global_context,
}
if (FLAG_expose_gc) InstallExtension("v8/gc");
+ if (FLAG_expose_externalize_string) InstallExtension("v8/externalize");
if (extensions == NULL) return true;
// Install required extensions
diff --git a/src/checks.h b/src/checks.h
index c2e40ba9..13374d86 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -155,9 +155,9 @@ static inline void CheckNonEqualsHelper(const char* file,
static inline void CheckEqualsHelper(const char* file,
int line,
const char* expected_source,
- void* expected,
+ const void* expected,
const char* value_source,
- void* value) {
+ const void* value) {
if (expected != value) {
V8_Fatal(file, line,
"CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p",
@@ -170,9 +170,9 @@ static inline void CheckEqualsHelper(const char* file,
static inline void CheckNonEqualsHelper(const char* file,
int line,
const char* expected_source,
- void* expected,
+ const void* expected,
const char* value_source,
- void* value) {
+ const void* value) {
if (expected == value) {
V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p",
expected_source, value_source, value);
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 77fa1ddd..d5e91cbd 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -295,7 +295,6 @@ ScriptBreakPoint.prototype.update_positions = function(line, column) {
}
-
ScriptBreakPoint.prototype.hit_count = function() {
return this.hit_count_;
};
@@ -389,7 +388,10 @@ ScriptBreakPoint.prototype.set = function (script) {
// Create a break point object and set the break point.
break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
break_point.setIgnoreCount(this.ignoreCount());
- %SetScriptBreakPoint(script, pos, break_point);
+ pos = %SetScriptBreakPoint(script, pos, break_point);
+ if (!IS_UNDEFINED(pos)) {
+ this.actual_location = script.locationFromPosition(pos);
+ }
return break_point;
};
diff --git a/src/debug.cc b/src/debug.cc
index 98e366c7..d513b312 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1028,8 +1028,8 @@ Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
- int source_position,
- Handle<Object> break_point_object) {
+ Handle<Object> break_point_object,
+ int* source_position) {
HandleScope scope;
if (!EnsureDebugInfo(shared)) {
@@ -1043,9 +1043,11 @@ void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
// Find the break point and change it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(source_position);
+ it.FindBreakLocationFromPosition(*source_position);
it.SetBreakPoint(break_point_object);
+ *source_position = it.position();
+
// At least one active break point now.
ASSERT(debug_info->GetBreakPointCount() > 0);
}
diff --git a/src/debug.h b/src/debug.h
index 1c674711..6019294f 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -230,8 +230,8 @@ class Debug {
static Object* Break(Arguments args);
static void SetBreakPoint(Handle<SharedFunctionInfo> shared,
- int source_position,
- Handle<Object> break_point_object);
+ Handle<Object> break_point_object,
+ int* source_position);
static void ClearBreakPoint(Handle<Object> break_point_object);
static void ClearAllBreakPoints();
static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
diff --git a/src/execution.cc b/src/execution.cc
index 006d358e..a6b15ccb 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -679,7 +679,7 @@ Object* Execution::HandleStackGuardInterrupt() {
// --- G C E x t e n s i o n ---
-const char* GCExtension::kSource = "native function gc();";
+const char* const GCExtension::kSource = "native function gc();";
v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
@@ -695,7 +695,115 @@ v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
}
-static GCExtension kGCExtension;
-v8::DeclareExtension kGCExtensionDeclaration(&kGCExtension);
+static GCExtension gc_extension;
+static v8::DeclareExtension gc_extension_declaration(&gc_extension);
+
+
+// --- E x t e r n a l i z e S t r i n g E x t e n s i o n ---
+
+
+template <typename Char, typename Base>
+class SimpleStringResource : public Base {
+ public:
+ // Takes ownership of |data|.
+ SimpleStringResource(Char* data, size_t length)
+ : data_(data),
+ length_(length) {}
+
+ virtual ~SimpleStringResource() { delete data_; }
+
+ virtual const Char* data() const { return data_; }
+
+ virtual size_t length() const { return length_; }
+
+ private:
+ Char* const data_;
+ const size_t length_;
+};
+
+
+typedef SimpleStringResource<char, v8::String::ExternalAsciiStringResource>
+ SimpleAsciiStringResource;
+typedef SimpleStringResource<uc16, v8::String::ExternalStringResource>
+ SimpleTwoByteStringResource;
+
+
+const char* const ExternalizeStringExtension::kSource =
+ "native function externalizeString();"
+ "native function isAsciiString();";
+
+
+v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
+ v8::Handle<v8::String> str) {
+ if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) {
+ return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
+ } else {
+ ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0);
+ return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii);
+ }
+}
+
+
+v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
+ const v8::Arguments& args) {
+ if (args.Length() < 1 || !args[0]->IsString()) {
+ return v8::ThrowException(v8::String::New(
+ "First parameter to externalizeString() must be a string."));
+ }
+ bool force_two_byte = false;
+ if (args.Length() >= 2) {
+ if (args[1]->IsBoolean()) {
+ force_two_byte = args[1]->BooleanValue();
+ } else {
+ return v8::ThrowException(v8::String::New(
+ "Second parameter to externalizeString() must be a boolean."));
+ }
+ }
+ bool result = false;
+ Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
+ if (string->IsExternalString()) {
+ return v8::ThrowException(v8::String::New(
+ "externalizeString() can't externalize twice."));
+ }
+ if (string->IsAsciiRepresentation() && !force_two_byte) {
+ char* data = new char[string->length()];
+ String::WriteToFlat(*string, data, 0, string->length());
+ SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
+ data, string->length());
+ result = string->MakeExternal(resource);
+ if (result && !string->IsSymbol()) {
+ i::ExternalStringTable::AddString(*string);
+ }
+ } else {
+ uc16* data = new uc16[string->length()];
+ String::WriteToFlat(*string, data, 0, string->length());
+ SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
+ data, string->length());
+ result = string->MakeExternal(resource);
+ if (result && !string->IsSymbol()) {
+ i::ExternalStringTable::AddString(*string);
+ }
+ }
+ if (!result) {
+ return v8::ThrowException(v8::String::New("externalizeString() failed."));
+ }
+ return v8::Undefined();
+}
+
+
+v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
+ const v8::Arguments& args) {
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ return v8::ThrowException(v8::String::New(
+ "isAsciiString() requires a single string argument."));
+ }
+ return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ?
+ v8::True() : v8::False();
+}
+
+
+static ExternalizeStringExtension externalize_extension;
+static v8::DeclareExtension externalize_extension_declaration(
+ &externalize_extension);
} } // namespace v8::internal
diff --git a/src/execution.h b/src/execution.h
index e683e122..28235033 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -316,10 +316,21 @@ class GCExtension : public v8::Extension {
v8::Handle<v8::String> name);
static v8::Handle<v8::Value> GC(const v8::Arguments& args);
private:
- static const char* kSource;
+ static const char* const kSource;
};
+class ExternalizeStringExtension : public v8::Extension {
+ public:
+ ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> Externalize(const v8::Arguments& args);
+ static v8::Handle<v8::Value> IsAscii(const v8::Arguments& args);
+ private:
+ static const char* const kSource;
+};
+
} } // namespace v8::internal
#endif // V8_EXECUTION_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 91477f9a..02e8f16e 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -123,6 +123,8 @@ DEFINE_bool(enable_armv7, true,
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_bool(expose_gc, false, "expose gc extension")
+DEFINE_bool(expose_externalize_string, false,
+ "expose externalize string extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_bool(disable_native_files, false, "disable builtin natives files")
@@ -191,7 +193,7 @@ DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
-DEFINE_bool(flush_code, false,
+DEFINE_bool(flush_code, true,
"flush code that we expect not to use again before full gc")
// v8.cc
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 90544f11..73b9748f 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -30,8 +30,8 @@
#include "heap-profiler.h"
#include "frames-inl.h"
#include "global-handles.h"
+#include "profile-generator.h"
#include "string-stream.h"
-#include "zone-inl.h"
namespace v8 {
namespace internal {
@@ -314,6 +314,83 @@ void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
} // namespace
+HeapProfiler* HeapProfiler::singleton_ = NULL;
+
+HeapProfiler::HeapProfiler()
+ : snapshots_(new HeapSnapshotsCollection()),
+ next_snapshot_uid_(1) {
+}
+
+
+HeapProfiler::~HeapProfiler() {
+ delete snapshots_;
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+void HeapProfiler::Setup() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (singleton_ == NULL) {
+ singleton_ = new HeapProfiler();
+ }
+#endif
+}
+
+
+void HeapProfiler::TearDown() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ delete singleton_;
+ singleton_ = NULL;
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name) {
+ ASSERT(singleton_ != NULL);
+ return singleton_->TakeSnapshotImpl(name);
+}
+
+
+HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) {
+ ASSERT(singleton_ != NULL);
+ return singleton_->TakeSnapshotImpl(name);
+}
+
+
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
+ Heap::CollectAllGarbage(false);
+ HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
+ HeapSnapshotGenerator generator(result);
+ generator.GenerateSnapshot();
+ return result;
+}
+
+
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name) {
+ return TakeSnapshotImpl(snapshots_->GetName(name));
+}
+
+
+int HeapProfiler::GetSnapshotsCount() {
+ ASSERT(singleton_ != NULL);
+ return singleton_->snapshots_->snapshots()->length();
+}
+
+
+HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
+ ASSERT(singleton_ != NULL);
+ return singleton_->snapshots_->snapshots()->at(index);
+}
+
+
+HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
+ ASSERT(singleton_ != NULL);
+ return singleton_->snapshots_->GetSnapshot(uid);
+}
+
+
const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index d6f26505..b593b992 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -28,26 +28,56 @@
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
-#include "zone.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
+class HeapSnapshot;
+class HeapSnapshotsCollection;
+
+#endif
+
// The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
class HeapProfiler {
public:
+ static void Setup();
+ static void TearDown();
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ static HeapSnapshot* TakeSnapshot(const char* name);
+ static HeapSnapshot* TakeSnapshot(String* name);
+ static int GetSnapshotsCount();
+ static HeapSnapshot* GetSnapshot(int index);
+ static HeapSnapshot* FindSnapshot(unsigned uid);
+
+ // Obsolete interface.
// Write a single heap sample to the log file.
static void WriteSample();
private:
+ HeapProfiler();
+ ~HeapProfiler();
+ HeapSnapshot* TakeSnapshotImpl(const char* name);
+ HeapSnapshot* TakeSnapshotImpl(String* name);
+
+ // Obsolete interface.
// Update the array info with stats from obj.
static void CollectStats(HeapObject* obj, HistogramInfo* info);
+
+ HeapSnapshotsCollection* snapshots_;
+ unsigned next_snapshot_uid_;
+
+ static HeapProfiler* singleton_;
+#endif // ENABLE_LOGGING_AND_PROFILING
};
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
// JSObjectsCluster describes a group of JS objects that are
// considered equivalent in terms of a particular profile.
class JSObjectsCluster BASE_EMBEDDED {
diff --git a/src/heap.cc b/src/heap.cc
index 3fc7d02b..f1ec56ce 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1929,6 +1929,18 @@ Object* Heap::AllocateConsString(String* first, String* second) {
return Failure::OutOfMemoryException();
}
+ bool is_ascii_data_in_two_byte_string = false;
+ if (!is_ascii) {
+ // At least one of the strings uses two-byte representation so we
+ // can't use the fast case code for short ascii strings below, but
+ // we can try to save memory if all chars actually fit in ascii.
+ is_ascii_data_in_two_byte_string =
+ first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
+ if (is_ascii_data_in_two_byte_string) {
+ Counters::string_add_runtime_ext_to_ascii.Increment();
+ }
+ }
+
// If the resulting string is small make a flat string.
if (length < String::kMinNonFlatLength) {
ASSERT(first->IsFlat());
@@ -1955,22 +1967,13 @@ Object* Heap::AllocateConsString(String* first, String* second) {
for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
} else {
- // For short external two-byte strings we check whether they can
- // be represented using ascii.
- if (!first_is_ascii) {
- first_is_ascii = first->IsExternalTwoByteStringWithAsciiChars();
- }
- if (first_is_ascii && !second_is_ascii) {
- second_is_ascii = second->IsExternalTwoByteStringWithAsciiChars();
- }
- if (first_is_ascii && second_is_ascii) {
+ if (is_ascii_data_in_two_byte_string) {
Object* result = AllocateRawAsciiString(length);
if (result->IsFailure()) return result;
// Copy the characters into the new object.
char* dest = SeqAsciiString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length);
String::WriteToFlat(second, dest + first_length, 0, second_length);
- Counters::string_add_runtime_ext_to_ascii.Increment();
return result;
}
@@ -1984,7 +1987,8 @@ Object* Heap::AllocateConsString(String* first, String* second) {
}
}
- Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map();
+ Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
+ cons_ascii_string_map() : cons_string_map();
Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
@@ -2070,7 +2074,23 @@ Object* Heap::AllocateExternalStringFromTwoByte(
return Failure::OutOfMemoryException();
}
- Map* map = Heap::external_string_map();
+ // For small strings we check whether the resource contains only
+ // ascii characters. If yes, we use a different string map.
+ bool is_ascii = true;
+ if (length >= static_cast<size_t>(String::kMinNonFlatLength)) {
+ is_ascii = false;
+ } else {
+ const uc16* data = resource->data();
+ for (size_t i = 0; i < length; i++) {
+ if (data[i] > String::kMaxAsciiCharCode) {
+ is_ascii = false;
+ break;
+ }
+ }
+ }
+
+ Map* map = is_ascii ?
+ Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
@@ -2244,6 +2264,12 @@ static void FlushCodeForFunction(SharedFunctionInfo* function_info) {
ThreadManager::IterateArchivedThreads(&threadvisitor);
if (threadvisitor.FoundCode()) return;
+ // Check that there are heap allocated locals in the scopeinfo. If
+ // there is, we are potentially using eval and need the scopeinfo
+ // for variable resolution.
+ if (ScopeInfo<>::HasHeapAllocatedLocals(function_info->code()))
+ return;
+
HandleScope scope;
// Compute the lazy compilable version of the code.
function_info->set_code(*ComputeLazyCompile(function_info->length()));
@@ -2853,6 +2879,9 @@ Map* Heap::SymbolMapForString(String* string) {
if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
if (map == external_string_map()) return external_symbol_map();
if (map == external_ascii_string_map()) return external_ascii_symbol_map();
+ if (map == external_string_with_ascii_data_map()) {
+ return external_symbol_with_ascii_data_map();
+ }
// No match found.
return NULL;
diff --git a/src/heap.h b/src/heap.h
index 0db40083..a8f8c343 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -69,10 +69,12 @@ class ZoneScopeInfo;
V(Map, cons_symbol_map, ConsSymbolMap) \
V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
V(Map, external_symbol_map, ExternalSymbolMap) \
+ V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \
V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
V(Map, cons_string_map, ConsStringMap) \
V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
V(Map, external_string_map, ExternalStringMap) \
+ V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
@@ -1882,8 +1884,8 @@ class TranscendentalCache {
};
inline static int Hash(const Converter& c) {
uint32_t hash = (c.integers[0] ^ c.integers[1]);
- hash ^= hash >> 16;
- hash ^= hash >> 8;
+ hash ^= static_cast<int32_t>(hash) >> 16;
+ hash ^= static_cast<int32_t>(hash) >> 8;
return (hash & (kCacheSize - 1));
}
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index a851b427..eb2a04db 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -43,10 +43,6 @@
namespace v8 {
namespace internal {
-Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index d4dff330..ce2099da 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -378,6 +378,11 @@ void Assembler::Align(int m) {
}
+void Assembler::CodeTargetAlign() {
+ Align(16); // Preferred alignment of jump targets on ia32.
+}
+
+
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
@@ -2154,17 +2159,6 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::comisd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x2F);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 7dcbab5c..c76c55cf 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -146,7 +146,10 @@ enum Condition {
// Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc);
+inline Condition NegateCondition(Condition cc) {
+ return static_cast<Condition>(cc ^ 1);
+}
+
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cc) {
@@ -172,12 +175,14 @@ inline Condition ReverseCondition(Condition cc) {
};
}
+
enum Hint {
no_hint = 0,
not_taken = 0x2e,
taken = 0x3e
};
+
// The result of negating a hint is as if the corresponding condition
// were negated by NegateCondition. That is, no_hint is mapped to
// itself and not_taken and taken are mapped to each other.
@@ -502,6 +507,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
// Stack
void pushad();
@@ -779,7 +786,6 @@ class Assembler : public Malloced {
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
- void comisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 29b6c691..6b074723 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -604,6 +604,10 @@ void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
RegisterFile empty_regs;
SetFrame(clone, &empty_regs);
__ bind(&allocation_failed);
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ // Pop the value from the floating point stack.
+ __ fstp(0);
+ }
unsafe_bailout_->Jump();
done.Bind(value);
@@ -2991,7 +2995,7 @@ void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
&not_numbers);
LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
&not_numbers);
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
} else {
Label check_right, compare;
@@ -3278,6 +3282,9 @@ void CodeGenerator::VisitAndSpill(Statement* statement) {
void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
ASSERT(in_spilled_code());
set_in_spilled_code(false);
VisitStatements(statements);
@@ -3285,14 +3292,20 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
frame_->SpillAll();
}
set_in_spilled_code(true);
+
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
ASSERT(!in_spilled_code());
for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
Visit(statements->at(i));
}
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
@@ -6909,8 +6922,7 @@ void DeferredSearchCache::Generate() {
__ bind(&cache_miss);
__ push(cache_); // store a reference to cache
__ push(key_); // store a key
- Handle<Object> receiver(Top::global_context()->global());
- __ push(Immediate(receiver));
+ __ push(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ push(key_);
// On ia32 function must be in edi.
__ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
@@ -7298,7 +7310,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
__ addsd(xmm2, xmm3);
// xmm2 now has 0.5.
- __ comisd(xmm2, xmm1);
+ __ ucomisd(xmm2, xmm1);
call_runtime.Branch(not_equal);
// Calculates square root.
__ movsd(xmm1, xmm0);
@@ -10285,15 +10297,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// ST[0] == double value
// ebx = low 32 bits of double value
// edx = high 32 bits of double value
- // Compute hash:
+ // Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ mov(ecx, ebx);
__ xor_(ecx, Operand(edx));
__ mov(eax, ecx);
- __ shr(eax, 16);
+ __ sar(eax, 16);
__ xor_(ecx, Operand(eax));
__ mov(eax, ecx);
- __ shr(eax, 8);
+ __ sar(eax, 8);
__ xor_(ecx, Operand(eax));
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
__ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
@@ -11305,58 +11317,58 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
- Label seq_string, seq_two_byte_string, check_code;
- const int kStringRepresentationEncodingMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ Label seq_ascii_string, seq_two_byte_string, check_code;
__ mov(eax, Operand(esp, kSubjectOffset));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ and_(ebx, kStringRepresentationEncodingMask);
- // First check for sequential string.
- ASSERT_EQ(0, kStringTag);
- ASSERT_EQ(0, kSeqStringTag);
+ // First check for flat two byte string.
+ __ and_(ebx,
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+ ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be a flat ascii string.
__ test(Operand(ebx),
Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_string);
+ __ j(zero, &seq_ascii_string);
// Check for flat cons string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
- __ and_(ebx, kStringRepresentationMask);
- __ cmp(ebx, kConsStringTag);
- __ j(not_equal, &runtime);
+ ASSERT(kExternalStringTag !=0);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ __ test(Operand(ebx),
+ Immediate(kIsNotStringMask | kExternalStringTag));
+ __ j(not_zero, &runtime);
+ // String is a cons string.
__ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
__ cmp(Operand(edx), Factory::empty_string());
__ j(not_equal, &runtime);
__ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- ASSERT_EQ(0, kSeqStringTag);
- __ test(ebx, Immediate(kStringRepresentationMask));
+ // String is a cons string with empty second part.
+ // eax: first part of cons string.
+ // ebx: map of first part of cons string.
+ // Is first part a flat two byte string?
+ __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
+ kStringRepresentationMask | kStringEncodingMask);
+ ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be ascii.
+ __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
+ kStringRepresentationMask);
__ j(not_zero, &runtime);
- __ and_(ebx, kStringRepresentationEncodingMask);
- __ bind(&seq_string);
- // eax: subject string (sequential either ascii to two byte)
- // ebx: suject string type & kStringRepresentationEncodingMask
+ __ bind(&seq_ascii_string);
+ // eax: subject string (flat ascii)
// ecx: RegExp data (FixedArray)
- // Check that the irregexp code has been generated for an ascii string. If
- // it has, the field contains a code object otherwise it contains the hole.
- const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
- __ cmp(ebx, kSeqTwoByteString);
- __ j(equal, &seq_two_byte_string);
- if (FLAG_debug_code) {
- __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ Check(equal, "Expected sequential ascii string");
- }
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
__ Set(edi, Immediate(1)); // Type is ascii.
__ jmp(&check_code);
__ bind(&seq_two_byte_string);
- // eax: subject string
+ // eax: subject string (flat two byte)
// ecx: RegExp data (FixedArray)
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
__ Set(edi, Immediate(0)); // Type is two byte.
@@ -11584,7 +11596,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
CpuFeatures::Scope fscope(SSE2);
__ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
} else {
__ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
__ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
@@ -11809,7 +11821,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope use_cmov(CMOV);
FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, not_taken);
@@ -12840,7 +12852,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat allocate a cons string object. If both
// strings are ascii the result is an ascii cons string.
- Label non_ascii, allocated;
+ Label non_ascii, allocated, ascii_data;
__ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
@@ -12849,6 +12861,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
+ __ bind(&ascii_data);
// Allocate an acsii cons string.
__ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
__ bind(&allocated);
@@ -12863,6 +12876,19 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // ecx: first instance type AND second instance type.
+ // edi: second instance type.
+ __ test(ecx, Immediate(kAsciiDataHintMask));
+ __ j(not_zero, &ascii_data);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ xor_(edi, Operand(ecx));
+ ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
__ jmp(&allocated);
@@ -12897,7 +12923,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(zero, &string_add_runtime);
__ bind(&make_flat_ascii_string);
- // Both strings are ascii strings. As they are short they are both flat.
+ // Both strings are ascii strings. As they are short they are both flat.
// ebx: length of resulting flat string as a smi
__ SmiUntag(ebx);
__ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index f339d2e1..b0c07b7b 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -306,22 +306,22 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
- Register r0,
+ Register map,
Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// Scratch registers:
- // r0 - used to hold the map of the receiver.
+ // map - used to hold the map of the receiver.
// Check that the object isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, slow, not_taken);
// Get the map of the receiver.
- __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
- __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
KeyedLoadIC::kSlowCaseBitFieldMask);
__ j(not_zero, slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
@@ -330,7 +330,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// into string objects works as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpInstanceType(r0, JS_OBJECT_TYPE);
+ __ CmpInstanceType(map, JS_OBJECT_TYPE);
__ j(below, slow, not_taken);
}
@@ -371,7 +371,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Checks whether a key is an array index string or a symbol string.
-// Falls through if a key is a symbol.
+// Falls through if the key is a symbol.
static void GenerateKeyStringCheck(MacroAssembler* masm,
Register key,
Register map,
@@ -399,11 +399,9 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
// Picks out an array index from the hash field.
-// The generated code never falls through.
static void GenerateIndexFromHash(MacroAssembler* masm,
Register key,
- Register hash,
- Label* index_smi) {
+ Register hash) {
// Register use:
// key - holds the overwritten key on exit.
// hash - holds the key's hash. Clobbered.
@@ -415,8 +413,6 @@ static void GenerateIndexFromHash(MacroAssembler* masm,
(1 << String::kArrayIndexValueBits));
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
- // key: string key
- // ebx: hash field.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(hash, String::kArrayIndexValueMask);
__ shr(hash, String::kHashShift - kSmiTagSize);
@@ -424,8 +420,6 @@ static void GenerateIndexFromHash(MacroAssembler* masm,
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
__ mov(key, hash);
- // Now jump to the place where smi keys are handled.
- __ jmp(index_smi);
}
@@ -574,7 +568,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
__ bind(&index_string);
- GenerateIndexFromHash(masm, eax, ebx, &index_smi);
+ GenerateIndexFromHash(masm, eax, ebx);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
}
@@ -1125,13 +1121,12 @@ Object* CallIC_Miss(Arguments args);
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
- Code::Kind kind,
- Label* miss) {
+ Code::Kind kind) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
// -----------------------------------
- Label number, non_number, non_string, boolean, probe;
+ Label number, non_number, non_string, boolean, probe, miss;
// Probe the stub cache.
Code::Flags flags =
@@ -1166,7 +1161,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
__ cmp(edx, Factory::true_value());
__ j(equal, &boolean, not_taken);
__ cmp(edx, Factory::false_value());
- __ j(not_equal, miss, taken);
+ __ j(not_equal, &miss, taken);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
@@ -1174,6 +1169,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
+ __ bind(&miss);
}
@@ -1214,8 +1210,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ InvokeFunction(edi, actual, JUMP_FUNCTION);
}
-// The generated code never falls through.
-static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
+// The generated code falls through if the call should be handled by runtime.
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1223,20 +1219,20 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- Label global_object, non_global_object;
+ Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ j(zero, &miss, not_taken);
// Check that the receiver is a valid JS object.
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(eax, FIRST_JS_OBJECT_TYPE);
- __ j(below, miss, not_taken);
+ __ j(below, &miss, not_taken);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
@@ -1252,8 +1248,8 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
// Check that the global object does not require access checks.
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
- __ j(not_equal, miss, not_taken);
- GenerateNormalHelper(masm, argc, true, miss);
+ __ j(not_equal, &miss, not_taken);
+ GenerateNormalHelper(masm, argc, true, &miss);
// Accessing non-global object: Check for access to global proxy.
Label global_proxy, invoke;
@@ -1264,14 +1260,16 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
// require access checks.
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, &miss, not_taken);
__ bind(&invoke);
- GenerateNormalHelper(masm, argc, false, miss);
+ GenerateNormalHelper(masm, argc, false, &miss);
// Global object proxy access: Check access rights.
__ bind(&global_proxy);
- __ CheckAccessGlobalProxy(edx, eax, miss);
+ __ CheckAccessGlobalProxy(edx, eax, &miss);
__ jmp(&invoke);
+
+ __ bind(&miss);
}
@@ -1337,24 +1335,36 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- Label miss;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, &miss);
- __ bind(&miss);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
GenerateMiss(masm, argc);
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- Label miss;
- GenerateCallNormal(masm, argc, &miss);
- __ bind(&miss);
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc);
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
}
@@ -1385,13 +1395,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateKeyedLoadReceiverCheck(masm, edx, eax, &slow_call);
- GenerateFastArrayLoad(masm,
- edx,
- ecx,
- eax,
- edi,
- &check_number_dictionary,
- &slow_load);
+ GenerateFastArrayLoad(
+ masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
__ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
__ bind(&do_call);
@@ -1417,14 +1422,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ SmiUntag(ebx);
// ebx: untagged index
// Receiver in edx will be clobbered, need to reload it on miss.
- GenerateNumberDictionaryLoad(masm,
- &slow_reload_receiver,
- eax,
- ecx,
- ebx,
- edx,
- edi,
- edi);
+ GenerateNumberDictionaryLoad(
+ masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
__ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
__ jmp(&do_call);
@@ -1459,21 +1458,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
Immediate(Factory::hash_table_map()));
__ j(not_equal, &lookup_monomorphic_cache, not_taken);
- GenerateDictionaryLoad(masm,
- &slow_load,
- edx,
- ecx,
- ebx,
- eax,
- edi,
- edi,
- DICTIONARY_CHECK_DONE);
+ GenerateDictionaryLoad(
+ masm, &slow_load, edx, ecx, ebx, eax, edi, edi, DICTIONARY_CHECK_DONE);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
- GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC, &slow_call);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
// Fall through on miss.
__ bind(&slow_call);
@@ -1487,19 +1479,35 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
__ bind(&index_string);
- GenerateIndexFromHash(masm, ecx, ebx, &index_smi);
+ GenerateIndexFromHash(masm, ecx, ebx);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- Label miss;
- GenerateCallNormal(masm, argc, &miss);
- __ bind(&miss);
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc);
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
}
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 48d9e674..bab0435f 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -816,8 +816,13 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
__ push(other);
__ push(receiver); // receiver
__ push(reg); // holder
- __ mov(other, Immediate(callback_handle));
- __ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data
+ // Push data from AccessorInfo.
+ if (Heap::InNewSpace(callback_handle->data())) {
+ __ mov(other, Immediate(callback_handle));
+ __ push(FieldOperand(other, AccessorInfo::kDataOffset));
+ } else {
+ __ push(Immediate(Handle<Object>(callback_handle->data())));
+ }
__ push(name_reg); // name
// Save a pointer to where we pushed the arguments pointer.
// This will be passed as the const AccessorInfo& to the C++ callback.
diff --git a/src/ic.cc b/src/ic.cc
index 2b77a54e..475f1611 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -387,6 +387,7 @@ Object* CallICBase::TryCallAsFunction(Object* object) {
return *delegate;
}
+
void CallICBase::ReceiverToObject(Handle<Object> object) {
HandleScope scope;
Handle<Object> receiver(object);
@@ -588,6 +589,9 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
state == MONOMORPHIC ||
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(Code::cast(code));
+ } else if (state == MEGAMORPHIC) {
+ // Update the stub cache.
+ StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
}
#ifdef DEBUG
@@ -664,7 +668,6 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
Code* target = NULL;
target = Builtins::builtin(Builtins::LoadIC_StringLength);
set_target(target);
- StubCache::Set(*name, map, target);
return Smi::FromInt(String::cast(*object)->length());
}
@@ -679,7 +682,6 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
Code* target = Builtins::builtin(Builtins::LoadIC_ArrayLength);
set_target(target);
- StubCache::Set(*name, map, target);
return JSArray::cast(*object)->length();
}
@@ -691,7 +693,6 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
#endif
Code* target = Builtins::builtin(Builtins::LoadIC_FunctionPrototype);
set_target(target);
- StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
return Accessors::FunctionGetPrototype(*object, 0);
}
}
@@ -733,6 +734,28 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
if (PatchInlinedLoad(address(), map, offset)) {
set_target(megamorphic_stub());
return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[LoadIC : inline patch %s]\n", *name->ToCString());
+ }
+ } else {
+ if (FLAG_trace_ic) {
+ PrintF("[LoadIC : no inline patch %s (patching failed)]\n",
+ *name->ToCString());
+ }
+ }
+ } else {
+ if (FLAG_trace_ic) {
+ PrintF("[LoadIC : no inline patch %s (not inobject)]\n",
+ *name->ToCString());
+ }
+ }
+ } else {
+ if (FLAG_use_ic && state == PREMONOMORPHIC) {
+ if (FLAG_trace_ic) {
+ PrintF("[LoadIC : no inline patch %s (not inlinable)]\n",
+ *name->ToCString());
+#endif
}
}
}
@@ -847,6 +870,9 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
set_target(Code::cast(code));
} else if (state == MONOMORPHIC) {
set_target(megamorphic_stub());
+ } else if (state == MEGAMORPHIC) {
+ // Update the stub cache.
+ StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
}
#ifdef DEBUG
@@ -1110,7 +1136,6 @@ Object* StoreIC::Store(State state,
return *value;
}
-
// Use specialized code for setting the length of arrays.
if (receiver->IsJSArray()
&& name->Equals(Heap::length_symbol())
@@ -1120,7 +1145,6 @@ Object* StoreIC::Store(State state,
#endif
Code* target = Builtins::builtin(Builtins::StoreIC_ArrayLength);
set_target(target);
- StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
return receiver->SetProperty(*name, *value, NONE);
}
@@ -1208,8 +1232,11 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(Code::cast(code));
} else if (state == MONOMORPHIC) {
- // Only move to mega morphic if the target changes.
+ // Only move to megamorphic if the target changes.
if (target() != Code::cast(code)) set_target(megamorphic_stub());
+ } else if (state == MEGAMORPHIC) {
+ // Update the stub cache.
+ StubCache::Set(*name, receiver->map(), Code::cast(code));
}
#ifdef DEBUG
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 9a1f1f11..3e9c5eab 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -1747,9 +1747,11 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
if ((mask & char_mask) == char_mask) need_mask = false;
mask &= char_mask;
} else {
- // For 2-character preloads in ASCII mode we also use a 16 bit load with
- // zero extend.
+ // For 2-character preloads in ASCII mode or 1-character preloads in
+ // TWO_BYTE mode we also use a 16 bit load with zero extend.
if (details->characters() == 2 && compiler->ascii()) {
+ if ((mask & 0x7f7f) == 0x7f7f) need_mask = false;
+ } else if (details->characters() == 1 && !compiler->ascii()) {
if ((mask & 0xffff) == 0xffff) need_mask = false;
} else {
if (mask == 0xffffffff) need_mask = false;
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index f9b20a4b..b60e54d3 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -552,12 +552,14 @@ static const char* TypeToString(InstanceType type) {
case CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
case EXTERNAL_ASCII_SYMBOL_TYPE:
+ case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
case ASCII_STRING_TYPE: return "ASCII_STRING";
case STRING_TYPE: return "TWO_BYTE_STRING";
case CONS_STRING_TYPE:
case CONS_ASCII_STRING_TYPE: return "CONS_STRING";
case EXTERNAL_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 4112f933..d6571bff 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -237,31 +237,20 @@ bool StringShape::IsSymbol() {
bool String::IsAsciiRepresentation() {
uint32_t type = map()->instance_type();
- if ((type & kStringRepresentationMask) == kConsStringTag &&
- ConsString::cast(this)->second()->length() == 0) {
- return ConsString::cast(this)->first()->IsAsciiRepresentation();
- }
return (type & kStringEncodingMask) == kAsciiStringTag;
}
bool String::IsTwoByteRepresentation() {
uint32_t type = map()->instance_type();
- if ((type & kStringRepresentationMask) == kConsStringTag &&
- ConsString::cast(this)->second()->length() == 0) {
- return ConsString::cast(this)->first()->IsTwoByteRepresentation();
- }
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
-bool String::IsExternalTwoByteStringWithAsciiChars() {
- if (!IsExternalTwoByteString()) return false;
- const uc16* data = ExternalTwoByteString::cast(this)->resource()->data();
- for (int i = 0, len = length(); i < len; i++) {
- if (data[i] > kMaxAsciiCharCode) return false;
- }
- return true;
+bool String::HasOnlyAsciiChars() {
+ uint32_t type = map()->instance_type();
+ return (type & kStringEncodingMask) == kAsciiStringTag ||
+ (type & kAsciiDataHintMask) == kAsciiDataHintTag;
}
diff --git a/src/objects.cc b/src/objects.cc
index 1e4d4a4c..63b77b79 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -678,6 +678,9 @@ Object* String::SlowTryFlatten(PretenureFlag pretenure) {
bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
+ // Externalizing twice leaks the external resouce, so it's
+ // prohibited by the API.
+ ASSERT(!this->IsExternalString());
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
@@ -697,13 +700,16 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
return false;
}
ASSERT(size >= ExternalString::kSize);
+ bool is_ascii = this->IsAsciiRepresentation();
bool is_symbol = this->IsSymbol();
int length = this->length();
int hash_field = this->hash_field();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
- this->set_map(Heap::external_string_map());
+ this->set_map(is_ascii ?
+ Heap::external_string_with_ascii_data_map() :
+ Heap::external_string_map());
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_length(length);
self->set_hash_field(hash_field);
@@ -713,7 +719,9 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (is_symbol) {
self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol.
- this->set_map(Heap::external_symbol_map());
+ this->set_map(is_ascii ?
+ Heap::external_symbol_with_ascii_data_map() :
+ Heap::external_symbol_map());
}
// Fill the remainder of the string with dead wood.
@@ -2013,25 +2021,18 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
CustomArguments args(interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
- v8::NamedPropertyQueryImpl query =
- v8::ToCData<v8::NamedPropertyQueryImpl>(interceptor->query());
+ v8::NamedPropertyQuery query =
+ v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
LOG(ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
- v8::Handle<v8::Value> result;
+ v8::Handle<v8::Integer> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
result = query(v8::Utils::ToLocal(name_handle), info);
}
if (!result.IsEmpty()) {
- // Temporary complicated logic, would be removed soon.
- if (result->IsBoolean()) {
- // Convert the boolean result to a property attribute
- // specification.
- return result->IsTrue() ? NONE : ABSENT;
- } else {
- ASSERT(result->IsInt32());
- return static_cast<PropertyAttributes>(result->Int32Value());
- }
+ ASSERT(result->IsInt32());
+ return static_cast<PropertyAttributes>(result->Int32Value());
}
} else if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
@@ -8154,7 +8155,7 @@ Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
template<typename Shape, typename Key>
Object* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
- int entry = FindEntry(key);
+ int entry = this->FindEntry(key);
// If the entry is present set the value;
if (entry != Dictionary<Shape, Key>::kNotFound) {
@@ -8179,7 +8180,7 @@ Object* Dictionary<Shape, Key>::Add(Key key,
Object* value,
PropertyDetails details) {
// Valdate key is absent.
- SLOW_ASSERT((FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
+ SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
// Check whether the dictionary should be extended.
Object* obj = EnsureCapacity(1, key);
if (obj->IsFailure()) return obj;
@@ -8238,7 +8239,7 @@ Object* NumberDictionary::AddNumberEntry(uint32_t key,
Object* value,
PropertyDetails details) {
UpdateMaxNumberKey(key);
- SLOW_ASSERT(FindEntry(key) == kNotFound);
+ SLOW_ASSERT(this->FindEntry(key) == kNotFound);
return Add(key, value, details);
}
diff --git a/src/objects.h b/src/objects.h
index 095dd981..0c146656 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -320,6 +320,10 @@ enum PropertyNormalizationMode {
ExternalTwoByteString::kSize, \
external_symbol, \
ExternalSymbol) \
+ V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_symbol_with_ascii_data, \
+ ExternalSymbolWithAsciiData) \
V(EXTERNAL_ASCII_SYMBOL_TYPE, \
ExternalAsciiString::kSize, \
external_ascii_symbol, \
@@ -344,6 +348,10 @@ enum PropertyNormalizationMode {
ExternalTwoByteString::kSize, \
external_string, \
ExternalString) \
+ V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_string_with_ascii_data, \
+ ExternalStringWithAsciiData) \
V(EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kSize, \
external_ascii_string, \
@@ -412,6 +420,11 @@ enum StringRepresentationTag {
};
const uint32_t kIsConsStringMask = 0x1;
+// If bit 7 is clear, then bit 3 indicates whether this two-byte
+// string actually contains ascii data.
+const uint32_t kAsciiDataHintMask = 0x08;
+const uint32_t kAsciiDataHintTag = 0x08;
+
// A ConsString with an empty string as the right side is a candidate
// for being shortcut by the garbage collector unless it is a
@@ -427,18 +440,22 @@ const uint32_t kShortcutTypeTag = kConsStringTag;
enum InstanceType {
// String types.
- SYMBOL_TYPE = kSymbolTag | kSeqStringTag,
+ SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
- CONS_SYMBOL_TYPE = kSymbolTag | kConsStringTag,
+ CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
- EXTERNAL_SYMBOL_TYPE = kSymbolTag | kExternalStringTag,
+ EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
+ EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
+ kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
EXTERNAL_ASCII_SYMBOL_TYPE =
kAsciiStringTag | kSymbolTag | kExternalStringTag,
- STRING_TYPE = kSeqStringTag,
+ STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
- CONS_STRING_TYPE = kConsStringTag,
+ CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
- EXTERNAL_STRING_TYPE = kExternalStringTag,
+ EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
+ EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
+ kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
@@ -474,10 +491,12 @@ enum InstanceType {
TYPE_SWITCH_INFO_TYPE,
SCRIPT_TYPE,
CODE_CACHE_TYPE,
-#ifdef ENABLE_DEBUGGER_SUPPORT
+ // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
+ // is defined. However as include/v8.h contain some of the instance type
+ // constants always having them avoids them getting different numbers
+ // depending on whether ENABLE_DEBUGGER_SUPPORT is defined or not.
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
-#endif
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
@@ -511,6 +530,11 @@ enum InstanceType {
};
+STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
+STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
+STATIC_CHECK(PROXY_TYPE == Internals::kProxyType);
+
+
enum CompareResult {
LESS = -1,
EQUAL = 0,
@@ -1123,7 +1147,7 @@ class HeapNumber: public HeapObject {
static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu;
static const int kMantissaBits = 52;
- static const int KExponentBits = 11;
+ static const int kExponentBits = 11;
static const int kExponentBias = 1023;
static const int kExponentShift = 20;
static const int kMantissaBitsInTopWord = 20;
@@ -2151,6 +2175,11 @@ class Dictionary: public HashTable<Shape, Key> {
// Set the value for entry.
void ValueAtPut(int entry, Object* value) {
+ // Check that this value can actually be written.
+ PropertyDetails details = DetailsAt(entry);
+ // If a value has not been initilized we allow writing to it even if
+ // it is read only (a declared const that has not been initialized).
+ if (details.IsReadOnly() && !ValueAt(entry)->IsTheHole()) return;
this->set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value);
}
@@ -2832,14 +2861,14 @@ class Code: public HeapObject {
// Flags layout.
static const int kFlagsICStateShift = 0;
static const int kFlagsICInLoopShift = 3;
- static const int kFlagsKindShift = 4;
- static const int kFlagsTypeShift = 8;
+ static const int kFlagsTypeShift = 4;
+ static const int kFlagsKindShift = 7;
static const int kFlagsArgumentsCountShift = 11;
static const int kFlagsICStateMask = 0x00000007; // 00000000111
static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
- static const int kFlagsKindMask = 0x000000F0; // 00011110000
- static const int kFlagsTypeMask = 0x00000700; // 11100000000
+ static const int kFlagsTypeMask = 0x00000070; // 00001110000
+ static const int kFlagsKindMask = 0x00000780; // 11110000000
static const int kFlagsArgumentsCountMask = 0xFFFFF800;
static const int kFlagsNotUsedInLookup =
@@ -4064,12 +4093,14 @@ class String: public HeapObject {
inline bool IsAsciiRepresentation();
inline bool IsTwoByteRepresentation();
- // Check whether this string is an external two-byte string that in
- // fact contains only ascii characters.
+ // Returns whether this string has ascii chars, i.e. all of them can
+ // be ascii encoded. This might be the case even if the string is
+ // two-byte. Such strings may appear when the embedder prefers
+ // two-byte external representations even for ascii data.
//
- // Such strings may appear when the embedder prefers two-byte
- // representations even for ascii data.
- inline bool IsExternalTwoByteStringWithAsciiChars();
+ // NOTE: this should be considered only a hint. False negatives are
+ // possible.
+ inline bool HasOnlyAsciiChars();
// Get and set individual two byte chars in the string.
inline void Set(int index, uint16_t value);
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 805ed3e6..57ff6610 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -818,7 +818,7 @@ HeapGraphEdge::HeapGraphEdge(Type type,
HeapEntry* from,
HeapEntry* to)
: type_(type), name_(name), from_(from), to_(to) {
- ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY);
+ ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
}
@@ -845,26 +845,30 @@ HeapEntry::~HeapEntry() {
}
-void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) {
- HeapGraphEdge* edge =
- new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry);
+void HeapEntry::AddEdge(HeapGraphEdge* edge) {
children_.Add(edge);
- entry->retainers_.Add(edge);
+ edge->to()->retainers_.Add(edge);
+}
+
+
+void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) {
+ AddEdge(
+ new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry));
}
void HeapEntry::SetElementReference(int index, HeapEntry* entry) {
- HeapGraphEdge* edge = new HeapGraphEdge(index, this, entry);
- children_.Add(edge);
- entry->retainers_.Add(edge);
+ AddEdge(new HeapGraphEdge(index, this, entry));
+}
+
+
+void HeapEntry::SetInternalReference(const char* name, HeapEntry* entry) {
+ AddEdge(new HeapGraphEdge(HeapGraphEdge::INTERNAL, name, this, entry));
}
void HeapEntry::SetPropertyReference(const char* name, HeapEntry* entry) {
- HeapGraphEdge* edge =
- new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry);
- children_.Add(edge);
- entry->retainers_.Add(edge);
+ AddEdge(new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry));
}
@@ -1074,7 +1078,7 @@ void HeapEntry::CutEdges() {
void HeapEntry::Print(int max_depth, int indent) {
- OS::Print("%6d %6d %6d", self_size_, TotalSize(), NonSharedTotalSize());
+ OS::Print("%6d %6d %6d ", self_size_, TotalSize(), NonSharedTotalSize());
if (type_ != STRING) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@@ -1100,6 +1104,9 @@ void HeapEntry::Print(int max_depth, int indent) {
case HeapGraphEdge::ELEMENT:
OS::Print(" %*c %d: ", indent, ' ', edge->index());
break;
+ case HeapGraphEdge::INTERNAL:
+ OS::Print(" %*c $%s: ", indent, ' ', edge->name());
+ break;
case HeapGraphEdge::PROPERTY:
OS::Print(" %*c %s: ", indent, ' ', edge->name());
break;
@@ -1114,7 +1121,7 @@ void HeapEntry::Print(int max_depth, int indent) {
const char* HeapEntry::TypeAsString() {
switch (type_) {
case INTERNAL: return "/internal/";
- case JS_OBJECT: return "/object/";
+ case OBJECT: return "/object/";
case CLOSURE: return "/closure/";
case STRING: return "/string/";
case CODE: return "/code/";
@@ -1145,6 +1152,9 @@ void HeapGraphPath::Print() {
case HeapGraphEdge::ELEMENT:
OS::Print("[%d] ", edge->index());
break;
+ case HeapGraphEdge::INTERNAL:
+ OS::Print("[$%s] ", edge->name());
+ break;
case HeapGraphEdge::PROPERTY:
OS::Print("[%s] ", edge->name());
break;
@@ -1262,7 +1272,7 @@ HeapEntry* HeapSnapshot::GetEntry(Object* obj) {
return AddEntry(object, HeapEntry::CLOSURE, collection_->GetName(name));
} else if (object->IsJSObject()) {
return AddEntry(object,
- HeapEntry::JS_OBJECT,
+ HeapEntry::OBJECT,
collection_->GetName(
JSObject::cast(object)->constructor_name()));
} else if (object->IsJSGlobalPropertyCell()) {
@@ -1276,10 +1286,19 @@ HeapEntry* HeapSnapshot::GetEntry(Object* obj) {
return AddEntry(object,
HeapEntry::STRING,
collection_->GetName(String::cast(object)));
- } else if (object->IsCode()
- || object->IsSharedFunctionInfo()
- || object->IsScript()) {
+ } else if (object->IsCode()) {
return AddEntry(object, HeapEntry::CODE);
+ } else if (object->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
+ String* name = String::cast(shared->name())->length() > 0 ?
+ String::cast(shared->name()) : shared->inferred_name();
+ return AddEntry(object, HeapEntry::CODE, collection_->GetName(name));
+ } else if (object->IsScript()) {
+ Script* script = Script::cast(object);
+ return AddEntry(object,
+ HeapEntry::CODE,
+ script->name()->IsString() ?
+ collection_->GetName(String::cast(script->name())) : "");
} else if (object->IsFixedArray()) {
return AddEntry(object, HeapEntry::ARRAY);
}
@@ -1309,6 +1328,16 @@ void HeapSnapshot::SetElementReference(HeapEntry* parent,
}
+void HeapSnapshot::SetInternalReference(HeapEntry* parent,
+ const char* reference_name,
+ Object* child) {
+ HeapEntry* child_entry = GetEntry(child);
+ if (child_entry != NULL) {
+ parent->SetInternalReference(reference_name, child_entry);
+ }
+}
+
+
void HeapSnapshot::SetPropertyReference(HeapEntry* parent,
String* reference_name,
Object* child) {
@@ -1537,6 +1566,7 @@ void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj,
snapshot_->SetClosureReference(entry, local_name, context->get(idx));
}
}
+ snapshot_->SetInternalReference(entry, "code", func->shared());
}
}
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 3f90702b..4e423c8d 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -429,9 +429,10 @@ class HeapEntry;
class HeapGraphEdge {
public:
enum Type {
- CONTEXT_VARIABLE,
- ELEMENT,
- PROPERTY
+ CONTEXT_VARIABLE = v8::HeapGraphEdge::CONTEXT_VARIABLE,
+ ELEMENT = v8::HeapGraphEdge::ELEMENT,
+ PROPERTY = v8::HeapGraphEdge::PROPERTY,
+ INTERNAL = v8::HeapGraphEdge::INTERNAL
};
HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to);
@@ -443,7 +444,7 @@ class HeapGraphEdge {
return index_;
}
const char* name() const {
- ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY);
+ ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
return name_;
}
HeapEntry* from() const { return from_; }
@@ -468,12 +469,12 @@ class CachedHeapGraphPath;
class HeapEntry {
public:
enum Type {
- INTERNAL,
- ARRAY,
- STRING,
- JS_OBJECT,
- CODE,
- CLOSURE
+ INTERNAL = v8::HeapGraphNode::INTERNAL,
+ ARRAY = v8::HeapGraphNode::ARRAY,
+ STRING = v8::HeapGraphNode::STRING,
+ OBJECT = v8::HeapGraphNode::OBJECT,
+ CODE = v8::HeapGraphNode::CODE,
+ CLOSURE = v8::HeapGraphNode::CLOSURE
};
explicit HeapEntry(HeapSnapshot* snapshot)
@@ -533,6 +534,7 @@ class HeapEntry {
void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; }
void SetClosureReference(const char* name, HeapEntry* entry);
void SetElementReference(int index, HeapEntry* entry);
+ void SetInternalReference(const char* name, HeapEntry* entry);
void SetPropertyReference(const char* name, HeapEntry* entry);
void SetAutoIndexReference(HeapEntry* entry);
@@ -542,6 +544,7 @@ class HeapEntry {
void Print(int max_depth, int indent);
private:
+ void AddEdge(HeapGraphEdge* edge);
int CalculateTotalSize();
int CalculateNonSharedTotalSize();
void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path);
@@ -641,6 +644,8 @@ class HeapSnapshot {
void SetClosureReference(
HeapEntry* parent, String* reference_name, Object* child);
void SetElementReference(HeapEntry* parent, int index, Object* child);
+ void SetInternalReference(
+ HeapEntry* parent, const char* reference_name, Object* child);
void SetPropertyReference(
HeapEntry* parent, String* reference_name, Object* child);
diff --git a/src/runtime.cc b/src/runtime.cc
index 88786e82..71148e6b 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -626,9 +626,9 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
PropertyDetails details = dictionary->DetailsAt(entry);
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
- elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
+ elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
+ elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
return *desc;
} else {
// Elements that are stored as array elements always has:
@@ -3849,11 +3849,29 @@ static Object* Runtime_DefineOrRedefineDataProperty(Arguments args) {
int unchecked = flag->value();
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+ PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+
+ // Check if this is an element.
+ uint32_t index;
+ bool is_element = name->AsArrayIndex(&index);
+
+ // Special case for elements if any of the flags are true.
+ // If elements are in fast case we always implicitly assume that:
+ // DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
+ if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
+ is_element) {
+ // Normalize the elements to enable attributes on the property.
+ js_object->NormalizeElements();
+ NumberDictionary* dictionary = js_object->element_dictionary();
+ // Make sure that we never go back to fast case.
+ dictionary->set_requires_slow_elements();
+ PropertyDetails details = PropertyDetails(attr, NORMAL);
+ dictionary->Set(index, *obj_value, details);
+ }
+
LookupResult result;
js_object->LocalLookupRealNamedProperty(*name, &result);
- PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
-
// Take special care when attributes are different and there is already
// a property. For simplicity we normalize the property which enables us
// to not worry about changing the instance_descriptor and creating a new
@@ -3869,6 +3887,7 @@ static Object* Runtime_DefineOrRedefineDataProperty(Arguments args) {
*obj_value,
attr);
}
+
return Runtime::SetObjectProperty(js_object, name, obj_value, attr);
}
@@ -4927,16 +4946,6 @@ static Object* ConvertCaseHelper(String* s,
}
-static inline SeqAsciiString* TryGetSeqAsciiString(String* s) {
- if (!s->IsFlat() || !s->IsAsciiRepresentation()) return NULL;
- if (s->IsConsString()) {
- ASSERT(ConsString::cast(s)->second()->length() == 0);
- return SeqAsciiString::cast(ConsString::cast(s)->first());
- }
- return SeqAsciiString::cast(s);
-}
-
-
namespace {
struct ToLowerTraits {
@@ -4983,7 +4992,7 @@ static Object* ConvertCase(
unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]);
- s->TryFlatten();
+ s = s->TryFlattenGetString();
const int length = s->length();
// Assume that the string is not empty; we need this assumption later
@@ -4995,13 +5004,12 @@ static Object* ConvertCase(
// character is also ascii. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- SeqAsciiString* seq_ascii = TryGetSeqAsciiString(s);
- if (seq_ascii != NULL) {
+ if (s->IsSeqAsciiString()) {
Object* o = Heap::AllocateRawAsciiString(length);
if (o->IsFailure()) return o;
SeqAsciiString* result = SeqAsciiString::cast(o);
bool has_changed_character = ConvertTraits::ConvertAscii(
- result->GetChars(), seq_ascii->GetChars(), length);
+ result->GetChars(), SeqAsciiString::cast(s)->GetChars(), length);
return has_changed_character ? result : s;
}
@@ -5545,7 +5553,7 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
if (first->IsString()) return first;
}
- bool ascii = special->IsAsciiRepresentation();
+ bool ascii = special->HasOnlyAsciiChars();
int position = 0;
for (int i = 0; i < array_length; i++) {
int increment = 0;
@@ -5586,7 +5594,7 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
String* element = String::cast(elt);
int element_length = element->length();
increment = element_length;
- if (ascii && !element->IsAsciiRepresentation()) {
+ if (ascii && !element->HasOnlyAsciiChars()) {
ascii = false;
}
} else {
@@ -9042,7 +9050,7 @@ static Object* Runtime_SetFunctionBreakPoint(Arguments args) {
Handle<Object> break_point_object_arg = args.at<Object>(2);
// Set break point.
- Debug::SetBreakPoint(shared, source_position, break_point_object_arg);
+ Debug::SetBreakPoint(shared, break_point_object_arg, &source_position);
return Heap::undefined_value();
}
@@ -9062,8 +9070,6 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
// The current candidate for the source position:
int target_start_position = RelocInfo::kNoPosition;
Handle<SharedFunctionInfo> target;
- // The current candidate for the last function in script:
- Handle<SharedFunctionInfo> last;
while (!done) {
HeapIterator iterator;
for (HeapObject* obj = iterator.next();
@@ -9104,25 +9110,12 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
}
}
}
-
- // Keep track of the last function in the script.
- if (last.is_null() ||
- shared->end_position() > last->start_position()) {
- last = shared;
- }
}
}
}
- // Make sure some candidate is selected.
if (target.is_null()) {
- if (!last.is_null()) {
- // Position after the last function - use last.
- target = last;
- } else {
- // Unable to find function - possibly script without any function.
- return Heap::undefined_value();
- }
+ return Heap::undefined_value();
}
// If the candidate found is compiled we are done. NOTE: when lazy
@@ -9140,8 +9133,9 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
}
-// Change the state of a break point in a script. NOTE: Regarding performance
-// see the NOTE for GetScriptFromScriptData.
+// Changes the state of a break point in a script and returns source position
+// where break point was set. NOTE: Regarding performance see the NOTE for
+// GetScriptFromScriptData.
// args[0]: script to set break point in
// args[1]: number: break source position (within the script source)
// args[2]: number: break point object
@@ -9169,7 +9163,9 @@ static Object* Runtime_SetScriptBreakPoint(Arguments args) {
} else {
position = source_position - shared->start_position();
}
- Debug::SetBreakPoint(shared, position, break_point_object_arg);
+ Debug::SetBreakPoint(shared, break_point_object_arg, &position);
+ position += shared->start_position();
+ return Smi::FromInt(position);
}
return Heap::undefined_value();
}
diff --git a/src/scanner.cc b/src/scanner.cc
index 89431190..286f515b 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -200,6 +200,7 @@ void ExternalStringUTF16Buffer<StringType, CharType>::SeekForward(int pos) {
// ----------------------------------------------------------------------------
// Keyword Matcher
+
KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
{ "break", KEYWORD_PREFIX, Token::BREAK },
{ NULL, C, Token::ILLEGAL },
@@ -335,7 +336,7 @@ void KeywordMatcher::Step(uc32 input) {
// Scanner
Scanner::Scanner(ParserMode pre)
- : stack_overflow_(false), is_pre_parsing_(pre == PREPARSE) { }
+ : is_pre_parsing_(pre == PREPARSE), stack_overflow_(false) { }
void Scanner::Initialize(Handle<String> source,
diff --git a/src/scanner.h b/src/scanner.h
index d5efdff9..2dce5a18 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -154,7 +154,12 @@ class KeywordMatcher {
// *: Actually "future reserved keywords". These are the only ones we
// recognized, the remaining are allowed as identifiers.
public:
- KeywordMatcher() : state_(INITIAL), token_(Token::IDENTIFIER) {}
+ KeywordMatcher()
+ : state_(INITIAL),
+ token_(Token::IDENTIFIER),
+ keyword_(NULL),
+ counter_(0),
+ keyword_token_(Token::ILLEGAL) {}
Token::Value token() { return token_; }
@@ -206,17 +211,6 @@ class KeywordMatcher {
// State map for first keyword character range.
static FirstState first_states_[kFirstCharRangeLength];
- // Current state.
- State state_;
- // Token for currently added characters.
- Token::Value token_;
-
- // Matching a specific keyword string (there is only one possible valid
- // keyword with the current prefix).
- const char* keyword_;
- int counter_;
- Token::Value keyword_token_;
-
// If input equals keyword's character at position, continue matching keyword
// from that position.
inline bool MatchKeywordStart(uc32 input,
@@ -246,15 +240,26 @@ class KeywordMatcher {
char match,
State new_state,
Token::Value keyword_token) {
- if (input == match) { // Matched "do".
- state_ = new_state;
- token_ = keyword_token;
- return true;
+ if (input != match) {
+ return false;
}
- return false;
+ state_ = new_state;
+ token_ = keyword_token;
+ return true;
}
void Step(uc32 input);
+
+ // Current state.
+ State state_;
+ // Token for currently added characters.
+ Token::Value token_;
+
+ // Matching a specific keyword string (there is only one possible valid
+ // keyword with the current prefix).
+ const char* keyword_;
+ int counter_;
+ Token::Value keyword_token_;
};
@@ -362,37 +367,6 @@ class Scanner {
static const int kNoEndPosition = 1;
private:
- void Init(Handle<String> source,
- unibrow::CharacterStream* stream,
- int start_position, int end_position,
- ParserLanguage language);
-
-
- // Different UTF16 buffers used to pull characters from. Based on input one of
- // these will be initialized as the actual data source.
- CharacterStreamUTF16Buffer char_stream_buffer_;
- ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
- two_byte_string_buffer_;
- ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
-
- // Source. Will point to one of the buffers declared above.
- UTF16Buffer* source_;
-
- // Used to convert the source string into a character stream when a stream
- // is not passed to the scanner.
- SafeStringInputBuffer safe_string_input_buffer_;
-
- // Buffer to hold literal values (identifiers, strings, numbers)
- // using 0-terminated UTF-8 encoding.
- UTF8Buffer literal_buffer_1_;
- UTF8Buffer literal_buffer_2_;
-
- bool stack_overflow_;
- static StaticResource<Utf8Decoder> utf8_decoder_;
-
- // One Unicode character look-ahead; c0_ < 0 at the end of the input.
- uc32 c0_;
-
// The current and look-ahead token.
struct TokenDesc {
Token::Value token;
@@ -400,11 +374,10 @@ class Scanner {
UTF8Buffer* literal_buffer;
};
- TokenDesc current_; // desc for current token (as returned by Next())
- TokenDesc next_; // desc for next token (one token look-ahead)
- bool has_line_terminator_before_next_;
- bool is_pre_parsing_;
- bool is_parsing_json_;
+ void Init(Handle<String> source,
+ unibrow::CharacterStream* stream,
+ int start_position, int end_position,
+ ParserLanguage language);
// Literal buffer support
void StartLiteral();
@@ -426,6 +399,7 @@ class Scanner {
return SkipJavaScriptWhiteSpace();
}
}
+
bool SkipJavaScriptWhiteSpace();
bool SkipJsonWhiteSpace();
Token::Value SkipSingleLineComment();
@@ -460,11 +434,13 @@ class Scanner {
// the integer part is zero), and may include an exponent part (e.g., "e-10").
// Hexadecimal and octal numbers are not allowed.
Token::Value ScanJsonNumber();
+
// A JSON string (production JSONString) is subset of valid JavaScript string
// literals. The string must only be double-quoted (not single-quoted), and
// the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
// four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
Token::Value ScanJsonString();
+
// Used to recognizes one of the literals "true", "false", or "null". These
// are the only valid JSON identifiers (productions JSONBooleanLiteral,
// JSONNullLiteral).
@@ -489,6 +465,37 @@ class Scanner {
// Decodes a unicode escape-sequence which is part of an identifier.
// If the escape sequence cannot be decoded the result is kBadRune.
uc32 ScanIdentifierUnicodeEscape();
+
+ TokenDesc current_; // desc for current token (as returned by Next())
+ TokenDesc next_; // desc for next token (one token look-ahead)
+ bool has_line_terminator_before_next_;
+ bool is_pre_parsing_;
+ bool is_parsing_json_;
+
+ // Different UTF16 buffers used to pull characters from. Based on input one of
+ // these will be initialized as the actual data source.
+ CharacterStreamUTF16Buffer char_stream_buffer_;
+ ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
+ two_byte_string_buffer_;
+ ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
+
+ // Source. Will point to one of the buffers declared above.
+ UTF16Buffer* source_;
+
+ // Used to convert the source string into a character stream when a stream
+ // is not passed to the scanner.
+ SafeStringInputBuffer safe_string_input_buffer_;
+
+ // Buffer to hold literal values (identifiers, strings, numbers)
+ // using 0-terminated UTF-8 encoding.
+ UTF8Buffer literal_buffer_1_;
+ UTF8Buffer literal_buffer_2_;
+
+ bool stack_overflow_;
+ static StaticResource<Utf8Decoder> utf8_decoder_;
+
+ // One Unicode character look-ahead; c0_ < 0 at the end of the input.
+ uc32 c0_;
};
} } // namespace v8::internal
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 8b7e2ad9..2091ca72 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -408,6 +408,18 @@ int ScopeInfo<Allocator>::NumberOfContextSlots(Code* code) {
template<class Allocator>
+bool ScopeInfo<Allocator>::HasHeapAllocatedLocals(Code* code) {
+ if (code->sinfo_size() > 0) {
+ Object** p = ContextEntriesAddr(code);
+ int n; // number of context slots;
+ ReadInt(p, &n);
+ return n > 0;
+ }
+ return false;
+}
+
+
+template<class Allocator>
int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) {
ASSERT(name->IsSymbol());
if (code->sinfo_size() > 0) {
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 927ac66f..9fb26d03 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -112,6 +112,9 @@ class ScopeInfo BASE_EMBEDDED {
// Return the number of context slots for code.
static int NumberOfContextSlots(Code* code);
+ // Return if this has context slots besides MIN_CONTEXT_SLOTS;
+ static bool HasHeapAllocatedLocals(Code* code);
+
// Lookup support for scope info embedded in Code objects. Returns
// the stack slot index for a given slot name if the slot is
// present; otherwise returns a value < 0. The name must be a symbol
diff --git a/src/serialize.cc b/src/serialize.cc
index e610e283..a6a516a7 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -364,90 +364,102 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
6,
"RegExpStack::limit_address()");
- Add(ExternalReference::new_space_start().address(),
+ Add(ExternalReference::address_of_regexp_stack_memory_address().address(),
UNCLASSIFIED,
7,
+ "RegExpStack::memory_address()");
+ Add(ExternalReference::address_of_regexp_stack_memory_size().address(),
+ UNCLASSIFIED,
+ 8,
+ "RegExpStack::memory_size()");
+ Add(ExternalReference::address_of_static_offsets_vector().address(),
+ UNCLASSIFIED,
+ 9,
+ "OffsetsVector::static_offsets_vector");
+ Add(ExternalReference::new_space_start().address(),
+ UNCLASSIFIED,
+ 10,
"Heap::NewSpaceStart()");
Add(ExternalReference::new_space_mask().address(),
UNCLASSIFIED,
- 8,
+ 11,
"Heap::NewSpaceMask()");
Add(ExternalReference::heap_always_allocate_scope_depth().address(),
UNCLASSIFIED,
- 9,
+ 12,
"Heap::always_allocate_scope_depth()");
Add(ExternalReference::new_space_allocation_limit_address().address(),
UNCLASSIFIED,
- 10,
+ 13,
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address().address(),
UNCLASSIFIED,
- 11,
+ 14,
"Heap::NewSpaceAllocationTopAddress()");
#ifdef ENABLE_DEBUGGER_SUPPORT
Add(ExternalReference::debug_break().address(),
UNCLASSIFIED,
- 12,
+ 15,
"Debug::Break()");
Add(ExternalReference::debug_step_in_fp_address().address(),
UNCLASSIFIED,
- 13,
+ 16,
"Debug::step_in_fp_addr()");
#endif
Add(ExternalReference::double_fp_operation(Token::ADD).address(),
UNCLASSIFIED,
- 14,
+ 17,
"add_two_doubles");
Add(ExternalReference::double_fp_operation(Token::SUB).address(),
UNCLASSIFIED,
- 15,
+ 18,
"sub_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MUL).address(),
UNCLASSIFIED,
- 16,
+ 19,
"mul_two_doubles");
Add(ExternalReference::double_fp_operation(Token::DIV).address(),
UNCLASSIFIED,
- 17,
+ 20,
"div_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MOD).address(),
UNCLASSIFIED,
- 18,
+ 21,
"mod_two_doubles");
Add(ExternalReference::compare_doubles().address(),
UNCLASSIFIED,
- 19,
+ 22,
"compare_doubles");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
UNCLASSIFIED,
- 20,
+ 23,
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
Add(ExternalReference::re_check_stack_guard_state().address(),
UNCLASSIFIED,
- 21,
+ 24,
"RegExpMacroAssembler*::CheckStackGuardState()");
Add(ExternalReference::re_grow_stack().address(),
UNCLASSIFIED,
- 22,
+ 25,
"NativeRegExpMacroAssembler::GrowStack()");
Add(ExternalReference::re_word_character_map().address(),
UNCLASSIFIED,
- 23,
+ 26,
"NativeRegExpMacroAssembler::word_character_map");
#endif // V8_INTERPRETED_REGEXP
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys().address(),
UNCLASSIFIED,
- 24,
+ 27,
"KeyedLookupCache::keys()");
Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
UNCLASSIFIED,
- 25,
+ 28,
"KeyedLookupCache::field_offsets()");
Add(ExternalReference::transcendental_cache_array_address().address(),
UNCLASSIFIED,
- 26,
+ 29,
"TranscendentalCache::caches()");
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 397988ae..ffa92dd3 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -121,7 +121,7 @@ Object* StubCache::ComputeLoadNonexistent(String* name, JSObject* receiver) {
receiver->map()->UpdateCodeCache(cache_name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -139,7 +139,7 @@ Object* StubCache::ComputeLoadField(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -158,7 +158,7 @@ Object* StubCache::ComputeLoadCallback(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -177,7 +177,7 @@ Object* StubCache::ComputeLoadConstant(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -194,13 +194,12 @@ Object* StubCache::ComputeLoadInterceptor(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
- Code* code = Builtins::builtin(Builtins::LoadIC_Normal);
- return Set(name, receiver->map(), code);
+ return Builtins::builtin(Builtins::LoadIC_Normal);
}
@@ -223,7 +222,7 @@ Object* StubCache::ComputeLoadGlobal(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -368,7 +367,7 @@ Object* StubCache::ComputeStoreField(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -385,7 +384,7 @@ Object* StubCache::ComputeStoreGlobal(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -403,7 +402,7 @@ Object* StubCache::ComputeStoreCallback(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -420,7 +419,7 @@ Object* StubCache::ComputeStoreInterceptor(String* name,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -486,7 +485,7 @@ Object* StubCache::ComputeCallConstant(int argc,
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, map, Code::cast(code));
+ return code;
}
@@ -525,7 +524,7 @@ Object* StubCache::ComputeCallField(int argc,
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, map, Code::cast(code));
+ return code;
}
@@ -563,7 +562,7 @@ Object* StubCache::ComputeCallInterceptor(int argc,
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, map, Code::cast(code));
+ return code;
}
@@ -574,7 +573,7 @@ Object* StubCache::ComputeCallNormal(int argc,
JSObject* receiver) {
Object* code = ComputeCallNormal(argc, in_loop, kind);
if (code->IsFailure()) return code;
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -607,7 +606,7 @@ Object* StubCache::ComputeCallGlobal(int argc,
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
- return Set(name, receiver->map(), Code::cast(code));
+ return code;
}
@@ -1106,6 +1105,7 @@ Object* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
Code* code = Code::cast(result);
USE(code);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ USE(kind);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
code, code->arguments_count()));
}
diff --git a/src/utils.h b/src/utils.h
index ed6d9a4f..d7c5b70f 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -47,6 +47,41 @@ static inline bool IsPowerOf2(T x) {
}
+// X must be a power of 2. Returns the number of trailing zeros.
+template <typename T>
+static inline int WhichPowerOf2(T x) {
+ ASSERT(IsPowerOf2(x));
+ ASSERT(x != 0);
+ if (x < 0) return 31;
+ int bits = 0;
+#ifdef DEBUG
+ int original_x = x;
+#endif
+ if (x >= 0x10000) {
+ bits += 16;
+ x >>= 16;
+ }
+ if (x >= 0x100) {
+ bits += 8;
+ x >>= 8;
+ }
+ if (x >= 0x10) {
+ bits += 4;
+ x >>= 4;
+ }
+ switch (x) {
+ default: UNREACHABLE();
+ case 8: bits++; // Fall through.
+ case 4: bits++; // Fall through.
+ case 2: bits++; // Fall through.
+ case 1: break;
+ }
+ ASSERT_EQ(1 << bits, original_x);
+ return bits;
+ return 0;
+}
+
+
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.
diff --git a/src/v8.cc b/src/v8.cc
index 65ce2e1b..23139670 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -32,6 +32,7 @@
#include "serialize.h"
#include "simulator.h"
#include "stub-cache.h"
+#include "heap-profiler.h"
#include "oprofile-agent.h"
#include "log.h"
@@ -61,6 +62,7 @@ bool V8::Initialize(Deserializer* des) {
Logger::Setup();
CpuProfiler::Setup();
+ HeapProfiler::Setup();
// Setup the platform OS support.
OS::Setup();
@@ -149,6 +151,8 @@ void V8::TearDown() {
Top::TearDown();
+ HeapProfiler::TearDown();
+
CpuProfiler::TearDown();
Heap::TearDown();
diff --git a/src/version.cc b/src/version.cc
index d210dabc..c9e84119 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,9 +34,9 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 18
+#define BUILD_NUMBER 19
#define PATCH_LEVEL 0
-#define CANDIDATE_VERSION true
+#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
diff --git a/src/virtual-frame-light-inl.h b/src/virtual-frame-light-inl.h
index d08b5d21..19520a63 100644
--- a/src/virtual-frame-light-inl.h
+++ b/src/virtual-frame-light-inl.h
@@ -74,7 +74,9 @@ bool VirtualFrame::Equals(const VirtualFrame* other) {
void VirtualFrame::PrepareForReturn() {
- SpillAll();
+ // Don't bother flushing tos registers as returning does not require more
+ // access to the expression stack.
+ top_of_stack_state_ = NO_TOS_REGISTERS;
}
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 70bcdb16..01c60aaf 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -35,16 +35,11 @@
namespace v8 {
namespace internal {
-inline Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
// -----------------------------------------------------------------------------
// Implementation of Assembler
-
void Assembler::emitl(uint32_t x) {
Memory::uint32_at(pc_) = x;
pc_ += sizeof(uint32_t);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index d77c09fd..e665385c 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -382,6 +382,11 @@ void Assembler::Align(int m) {
}
+void Assembler::CodeTargetAlign() {
+ Align(16); // Preferred alignment of jump targets on x64.
+}
+
+
void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once.
last_pc_ = NULL;
@@ -1148,6 +1153,15 @@ void Assembler::incl(const Operand& dst) {
}
+void Assembler::incl(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xFF);
+ emit_modrm(0, dst);
+}
+
+
void Assembler::int3() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2738,17 +2752,6 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::comisd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2f);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index c7e737c6..f195439e 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -215,7 +215,10 @@ enum Condition {
// Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc);
+inline Condition NegateCondition(Condition cc) {
+ return static_cast<Condition>(cc ^ 1);
+}
+
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cc) {
@@ -241,6 +244,7 @@ inline Condition ReverseCondition(Condition cc) {
};
}
+
enum Hint {
no_hint = 0,
not_taken = 0x2e,
@@ -495,6 +499,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
// Stack
void pushfq();
@@ -761,6 +767,7 @@ class Assembler : public Malloced {
void incq(Register dst);
void incq(const Operand& dst);
+ void incl(Register dst);
void incl(const Operand& dst);
void lea(Register dst, const Operand& src);
@@ -1122,7 +1129,6 @@ class Assembler : public Malloced {
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
- void comisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
// The first argument is the reg field, the second argument is the r/m field.
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index f9692ce4..3ba89067 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -262,63 +262,23 @@ class DeferredInlineSmiOperationReversed: public DeferredCode {
class FloatingPointHelper : public AllStatic {
public:
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand on TOS+1. Returns operand as floating point number on FPU
- // stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
-
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in src register. Returns operand as floating point number
- // in XMM register. May destroy src register.
- static void LoadFloatOperand(MacroAssembler* masm,
- Register src,
- XMMRegister dst);
-
- // Code pattern for loading a possible number into a XMM register.
- // If the contents of src is not a number, control branches to
- // the Label not_number. If contents of src is a smi or a heap number
- // object (fp value), it is loaded into the XMM register as a double.
- // The register src is not changed, and src may not be kScratchRegister.
- static void LoadFloatOperand(MacroAssembler* masm,
- Register src,
- XMMRegister dst,
- Label *not_number);
-
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 in rdx, operand_2 in rax; Returns operands as
- // floating point numbers in XMM registers.
- static void LoadFloatOperands(MacroAssembler* masm,
- XMMRegister dst1,
- XMMRegister dst2);
-
- // Similar to LoadFloatOperands, assumes that the operands are smis.
- static void LoadFloatOperandsFromSmis(MacroAssembler* masm,
- XMMRegister dst1,
- XMMRegister dst2);
-
- // Code pattern for loading floating point values onto the fp stack.
- // Input values must be either smi or heap number objects (fp values).
- // Requirements:
- // Register version: operands in registers lhs and rhs.
- // Stack version: operands on TOS+1 and TOS+2.
- // Returns operands as floating point numbers on fp stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register lhs,
- Register rhs);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in rax, operand_2 in rdx; falls through on float or smi
- // operands, jumps to the non_float label otherwise.
- static void CheckNumberOperands(MacroAssembler* masm,
- Label* non_float);
+ // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
+ // If the operands are not both numbers, jump to not_numbers.
+ // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
+ // NumberOperands assumes both are smis or heap numbers.
+ static void LoadSSE2SmiOperands(MacroAssembler* masm);
+ static void LoadSSE2NumberOperands(MacroAssembler* masm);
+ static void LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers);
// Takes the operands in rdx and rax and loads them as integers in rax
// and rcx.
static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure);
+ Label* operand_conversion_failure,
+ Register heap_number_map);
+ // As above, but we know the operands to be numbers. In that case,
+ // conversion can't fail.
+ static void LoadNumbersAsIntegers(MacroAssembler* masm);
};
@@ -3108,25 +3068,31 @@ void CodeGenerator::VisitCall(Call* node) {
ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
+ // Call the function.
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
} else {
- Reference ref(this, property, false);
- ASSERT(ref.size() == 2);
- Result key = frame_->Pop();
- frame_->Dup(); // Duplicate the receiver.
- frame_->Push(&key);
- ref.GetValue();
- // Top of frame contains function to call, with duplicate copy of
- // receiver below it. Swap them.
- Result function = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&function);
- frame_->Push(&receiver);
- }
+ // Push the receiver onto the frame.
+ Load(property->obj());
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- }
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ frame_->SpillTop();
+ }
+
+ // Load the name of the function.
+ Load(property->key());
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ frame_->Push(&result);
+ }
+ }
} else {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is not global
@@ -4423,7 +4389,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
__ addsd(xmm2, xmm3);
// xmm2 now has 0.5.
- __ comisd(xmm2, xmm1);
+ __ ucomisd(xmm2, xmm1);
call_runtime.Branch(not_equal);
// Calculates square root.
@@ -4763,8 +4729,8 @@ void DeferredSearchCache::Generate() {
__ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &first_loop);
- __ Integer32ToSmi(scratch_, dst_);
- __ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_);
+ __ Integer32ToSmiField(
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label());
@@ -4785,16 +4751,15 @@ void DeferredSearchCache::Generate() {
__ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &second_loop);
- __ Integer32ToSmi(scratch_, dst_);
- __ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_);
+ __ Integer32ToSmiField(
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label());
__ bind(&cache_miss);
__ push(cache_); // store a reference to cache
__ push(key_); // store a key
- Handle<Object> receiver(Top::global_context()->global());
- __ Push(receiver);
+ __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ push(key_);
// On x64 function must be in rdi.
__ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
@@ -4809,50 +4774,50 @@ void DeferredSearchCache::Generate() {
// cache miss this optimization would hardly matter much.
// Check if we could add new entry to cache.
- __ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
- __ SmiCompare(rbx, r9);
+ __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(r9,
+ FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
+ __ cmpl(rbx, r9);
__ j(greater, &add_new_entry);
// Check if we could evict entry after finger.
- __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ SmiToInteger32(rdx, rdx);
- __ SmiToInteger32(rbx, rbx);
- __ addq(rdx, kEntrySizeImm);
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
+ __ addl(rdx, kEntrySizeImm);
Label forward;
- __ cmpq(rbx, rdx);
+ __ cmpl(rbx, rdx);
__ j(greater, &forward);
// Need to wrap over the cache.
__ movl(rdx, kEntriesIndexImm);
__ bind(&forward);
- __ Integer32ToSmi(r9, rdx);
+ __ movl(r9, rdx);
__ jmp(&update_cache);
__ bind(&add_new_entry);
- // r9 holds cache size as smi.
- __ SmiToInteger32(rdx, r9);
- __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
- __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
+ // r9 holds cache size as int32.
+ __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
+ __ Integer32ToSmiField(
+ FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
// Update the cache itself.
- // rdx holds the index as int.
- // r9 holds the index as smi.
+ // r9 holds the index as int32.
__ bind(&update_cache);
__ pop(rbx); // restore the key
- __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
+ __ Integer32ToSmiField(
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
// Store key.
- __ movq(ArrayElement(rcx, rdx), rbx);
+ __ movq(ArrayElement(rcx, r9), rbx);
__ RecordWrite(rcx, 0, rbx, r9);
// Store value.
__ pop(rcx); // restore the cache.
- __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ SmiAddConstant(rdx, rdx, Smi::FromInt(1));
- __ movq(r9, rdx);
- __ SmiToInteger32(rdx, rdx);
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
+ __ incl(rdx);
+ // Backup rax, because the RecordWrite macro clobbers its arguments.
__ movq(rbx, rax);
- __ movq(ArrayElement(rcx, rdx), rbx);
- __ RecordWrite(rcx, 0, rbx, r9);
+ __ movq(ArrayElement(rcx, rdx), rax);
+ __ RecordWrite(rcx, 0, rbx, rdx);
if (!dst_.is(rax)) {
__ movq(dst_, rax);
@@ -6507,7 +6472,7 @@ void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
&not_numbers);
LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
&not_numbers);
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
// Bail out if a NaN is involved.
not_numbers.Branch(parity_even, left_side, right_side);
@@ -8178,7 +8143,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// ST[0] == double value
// rbx = bits of double value.
// rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64):
+ // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
// h = h0 = bits ^ (bits >> 32);
// h ^= h >> 16;
// h ^= h >> 8;
@@ -8189,9 +8154,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ movl(rcx, rdx);
__ movl(rax, rdx);
__ movl(rdi, rdx);
- __ shrl(rdx, Immediate(8));
- __ shrl(rcx, Immediate(16));
- __ shrl(rax, Immediate(24));
+ __ sarl(rdx, Immediate(8));
+ __ sarl(rcx, Immediate(16));
+ __ sarl(rax, Immediate(24));
__ xorl(rcx, rdx);
__ xorl(rax, rdi);
__ xorl(rcx, rax);
@@ -8293,7 +8258,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
// Move exponent and sign bits to low bits.
__ shr(rdi, Immediate(HeapNumber::kMantissaBits));
// Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::KExponentBits) - 1));
+ __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
__ cmpl(rdi, Immediate(supported_exponent_limit));
__ j(below, &in_range);
@@ -8370,7 +8335,7 @@ void IntegerConvert(MacroAssembler* masm,
// Double to remove sign bit, shift exponent down to least significant bits.
// and subtract bias to get the unshifted, unbiased exponent.
__ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::KExponentBits));
+ __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
__ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
// Check whether the exponent is too big for a 63 bit unsigned integer.
__ cmpl(double_exponent, Immediate(63));
@@ -8546,18 +8511,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
- __ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP));
+ __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+ __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
__ j(not_equal, &runtime);
// rcx: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
- __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
- __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
+ __ leal(rdx, Operand(rdx, rdx, times_1, 2));
// Check that the static offsets vector buffer is large enough.
- __ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
__ j(above, &runtime);
// rcx: RegExp data (FixedArray)
@@ -8567,17 +8532,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(rax, &runtime);
Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
__ j(NegateCondition(is_string), &runtime);
- // Get the length of the string to rbx.
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- // rbx: Length of subject string as smi
- // rcx: RegExp data (FixedArray)
- // rdx: Number of capture registers
+ // rax: Subject string.
+ // rcx: RegExp data (FixedArray).
+ // rdx: Number of capture registers.
// Check that the third argument is a positive smi less than the string
// length. A negative value will be greater (unsigned comparison).
- __ movq(rax, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rax, &runtime);
- __ SmiCompare(rax, rbx);
+ __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(rbx, &runtime);
+ __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
__ j(above_equal, &runtime);
// rcx: RegExp data (FixedArray)
@@ -8595,65 +8558,63 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ SmiToInteger32(rax, rax);
+ __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
__ cmpl(rdx, rax);
__ j(greater, &runtime);
- // ecx: RegExp data (FixedArray)
+ // rcx: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
- Label seq_string, seq_two_byte_string, check_code;
- const int kStringRepresentationEncodingMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ Label seq_ascii_string, seq_two_byte_string, check_code;
__ movq(rax, Operand(rsp, kSubjectOffset));
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
- // First check for sequential string.
- ASSERT_EQ(0, kStringTag);
- ASSERT_EQ(0, kSeqStringTag);
+ // First check for flat two byte string.
+ __ andb(rbx, Immediate(
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
+ ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be a flat ascii string.
__ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_string);
+ __ j(zero, &seq_ascii_string);
// Check for flat cons string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
- __ andb(rbx, Immediate(kStringRepresentationMask));
- __ cmpb(rbx, Immediate(kConsStringTag));
- __ j(not_equal, &runtime);
+ ASSERT(kExternalStringTag !=0);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
+ __ j(not_zero, &runtime);
+ // String is a cons string.
__ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
__ Cmp(rdx, Factory::empty_string());
__ j(not_equal, &runtime);
__ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- ASSERT_EQ(0, kSeqStringTag);
- __ testb(rbx, Immediate(kStringRepresentationMask));
+ // String is a cons string with empty second part.
+ // eax: first part of cons string.
+ // ebx: map of first part of cons string.
+ // Is first part a flat two byte string?
+ __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+ Immediate(kStringRepresentationMask | kStringEncodingMask));
+ ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be ascii.
+ __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+ Immediate(kStringRepresentationMask));
__ j(not_zero, &runtime);
- __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
- __ bind(&seq_string);
- // rax: subject string (sequential either ascii to two byte)
- // rbx: suject string type & kStringRepresentationEncodingMask
+ __ bind(&seq_ascii_string);
+ // rax: subject string (sequential ascii)
// rcx: RegExp data (FixedArray)
- // Check that the irregexp code has been generated for an ascii string. If
- // it has, the field contains a code object otherwise it contains the hole.
- const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
- __ cmpb(rbx, Immediate(kSeqTwoByteString));
- __ j(equal, &seq_two_byte_string);
- if (FLAG_debug_code) {
- __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- __ Check(equal, "Expected sequential ascii string");
- }
__ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
__ Set(rdi, 1); // Type is ascii.
__ jmp(&check_code);
__ bind(&seq_two_byte_string);
- // rax: subject string
+ // rax: subject string (flat two-byte)
// rcx: RegExp data (FixedArray)
__ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
__ Set(rdi, 0); // Type is two byte.
@@ -8670,8 +8631,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r12: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ SmiToInteger64(rbx, rbx); // Previous index from smi.
+ __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
// rax: subject string
// rbx: previous index
@@ -8783,10 +8743,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&success);
__ movq(rax, Operand(rsp, kJSRegExpOffset));
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ __ SmiToInteger32(rax,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
- __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
+ __ leal(rdx, Operand(rax, rax, times_1, 2));
// rdx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
@@ -8829,7 +8789,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
rdx,
times_pointer_size,
RegExpImpl::kFirstCaptureOffset),
- rdi);
+ rdi);
__ jmp(&next_capture);
__ bind(&done);
@@ -8873,9 +8833,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
- __ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide smi tagged length by two.
- __ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1);
+ __ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ __ shrl(mask, Immediate(1));
__ subq(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
@@ -8905,15 +8865,14 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
CpuFeatures::Scope fscope(SSE2);
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
__ j(parity_even, not_found); // Bail out if NaN is involved.
__ j(not_equal, not_found); // The cache did not contain this value.
__ jmp(&load_result_from_cache);
}
__ bind(&is_smi);
- __ movq(scratch, object);
- __ SmiToInteger32(scratch, scratch);
+ __ SmiToInteger32(scratch, object);
GenerateConvertHashCodeToIndex(masm, scratch, mask);
Register index = scratch;
@@ -9107,12 +9066,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (include_number_compare_) {
Label non_number_comparison;
Label unordered;
- FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0,
- &non_number_comparison);
- FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1,
- &non_number_comparison);
-
- __ comisd(xmm0, xmm1);
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered);
@@ -9340,29 +9295,30 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ j(equal, &adaptor_frame);
// Get the length from the frame.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ SmiToInteger32(rcx,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Space on stack must already hold a smi.
+ __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
// Do not clobber the length index for the indexing operation since
// it is used compute the size for allocation later.
- SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2);
- __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
__ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testq(rcx, rcx);
+ __ testl(rcx, rcx);
__ j(zero, &add_arguments_object);
- index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
- __ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
+ __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addq(rcx, Immediate(Heap::kArgumentsObjectSize));
+ __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
@@ -9374,10 +9330,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(rdi, Operand(rdi, offset));
// Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(kScratchRegister, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), kScratchRegister);
- }
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
+ __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
+ __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
+ __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
+ __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
+ __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
// Setup the callee in-object property.
ASSERT(Heap::arguments_callee_index == 0);
@@ -9391,7 +9350,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// If there are no actual arguments, we're done.
Label done;
- __ testq(rcx, rcx);
+ __ SmiTest(rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack and untag the length.
@@ -9413,7 +9372,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
__ addq(rdi, Immediate(kPointerSize));
__ subq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
+ __ decl(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@@ -9964,86 +9923,73 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register number) {
- Label load_smi, done;
-
- __ JumpIfSmi(number, &load_smi);
- __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi);
- __ SmiToInteger32(number, number);
- __ push(number);
- __ fild_s(Operand(rsp, 0));
- __ pop(number);
-
- __ bind(&done);
+void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
}
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register src,
- XMMRegister dst) {
- Label load_smi, done;
-
- __ JumpIfSmi(src, &load_smi);
- __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
+void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
+ // Load operand in rdx into xmm0.
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1.
+ __ JumpIfSmi(rax, &load_smi_rax);
+ __ bind(&load_nonsmi_rax);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
- __ bind(&load_smi);
- __ SmiToInteger32(src, src);
- __ cvtlsi2sd(dst, src);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register src,
- XMMRegister dst,
- Label* not_number) {
- Label load_smi, done;
- ASSERT(!src.is(kScratchRegister));
- __ JumpIfSmi(src, &load_smi);
- __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister);
- __ j(not_equal, not_number);
- __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
- __ jmp(&done);
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
- __ bind(&load_smi);
- __ SmiToInteger32(kScratchRegister, src);
- __ cvtlsi2sd(dst, kScratchRegister);
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
__ bind(&done);
}
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- XMMRegister dst1,
- XMMRegister dst2) {
- __ movq(kScratchRegister, rdx);
- LoadFloatOperand(masm, kScratchRegister, dst1);
- __ movq(kScratchRegister, rax);
- LoadFloatOperand(masm, kScratchRegister, dst2);
-}
+void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
+ // Load operand in rdx into xmm0, or branch to not_numbers.
+ __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers); // Argument in rdx is not a number.
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1, or branch to not_numbers.
+ __ JumpIfSmi(rax, &load_smi_rax);
+ __ bind(&load_nonsmi_rax);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
-void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm,
- XMMRegister dst1,
- XMMRegister dst2) {
+ __ bind(&load_smi_rdx);
__ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(dst1, kScratchRegister);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+ __ bind(&load_smi_rax);
__ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(dst2, kScratchRegister);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+ __ bind(&done);
}
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure) {
+ Label* conversion_failure,
+ Register heap_number_map) {
// Check float operands.
Label arg1_is_object, check_undefined_arg1;
Label arg2_is_object, check_undefined_arg2;
@@ -10061,8 +10007,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ jmp(&load_arg2);
__ bind(&arg1_is_object);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the edx heap number in rcx.
IntegerConvert(masm, rdx, rdx);
@@ -10083,8 +10028,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ jmp(&done);
__ bind(&arg2_is_object);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the eax heap number in ecx.
IntegerConvert(masm, rcx, rax);
@@ -10093,51 +10037,35 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
}
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
- __ JumpIfSmi(lhs, &load_smi_lhs);
- __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
- __ bind(&done_load_lhs);
-
- __ JumpIfSmi(rhs, &load_smi_rhs);
- __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_lhs);
- __ SmiToInteger64(kScratchRegister, lhs);
- __ push(kScratchRegister);
- __ fild_d(Operand(rsp, 0));
- __ pop(kScratchRegister);
- __ jmp(&done_load_lhs);
-
- __ bind(&load_smi_rhs);
- __ SmiToInteger64(kScratchRegister, rhs);
- __ push(kScratchRegister);
- __ fild_d(Operand(rsp, 0));
- __ pop(kScratchRegister);
-
- __ bind(&done);
-}
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
+ if (FLAG_debug_code) {
+ // Both arguments can not be smis. That case is handled by smi-only code.
+ Label ok;
+ __ JumpIfNotBothSmi(rax, rdx, &ok);
+ __ Abort("Both arguments smi but not handled by smi-code.");
+ __ bind(&ok);
+ }
+ // Check float operands.
+ Label done;
+ Label rax_is_object;
+ Label rdx_is_object;
+ __ JumpIfNotSmi(rdx, &rdx_is_object);
+ __ SmiToInteger32(rdx, rdx);
-void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
- Label* non_float) {
- Label test_other, done;
- // Test if both operands are numbers (heap_numbers or smis).
- // If not, jump to label non_float.
- __ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(not_equal, non_float); // The argument in rdx is not a number.
+ __ bind(&rax_is_object);
+ IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
+ __ jmp(&done);
- __ bind(&test_other);
- __ JumpIfSmi(rax, &done); // argument in rax is OK
- __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(not_equal, non_float); // The argument in rax is not a number.
+ __ bind(&rdx_is_object);
+ IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
+ __ JumpIfNotSmi(rax, &rax_is_object);
+ __ SmiToInteger32(rcx, rax);
- // Fall-through: Both operands are numbers.
__ bind(&done);
+ __ movl(rax, rdx);
}
@@ -10447,15 +10375,15 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
}
// left is rdx, right is rax.
__ AllocateHeapNumber(rbx, rcx, slow);
- FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5);
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
switch (op_) {
- case Token::ADD: __ addsd(xmm4, xmm5); break;
- case Token::SUB: __ subsd(xmm4, xmm5); break;
- case Token::MUL: __ mulsd(xmm4, xmm5); break;
- case Token::DIV: __ divsd(xmm4, xmm5); break;
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4);
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rbx);
GenerateReturn(masm);
}
@@ -10518,22 +10446,23 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_floats;
// rax: y
// rdx: x
- if (static_operands_type_.IsNumber() && FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- } else {
- FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
- }
- // Fast-case: Both operands are numbers.
- // xmm4 and xmm5 are volatile XMM registers.
- FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
+ ASSERT(!static_operands_type_.IsSmi());
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadSSE2NumberOperands(masm);
+ } else {
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
+ }
switch (op_) {
- case Token::ADD: __ addsd(xmm4, xmm5); break;
- case Token::SUB: __ subsd(xmm4, xmm5); break;
- case Token::MUL: __ mulsd(xmm4, xmm5); break;
- case Token::DIV: __ divsd(xmm4, xmm5); break;
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
// Allocate a heap number, if needed.
@@ -10568,7 +10497,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
break;
default: UNREACHABLE();
}
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
GenerateReturn(masm);
__ bind(&not_floats);
if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
@@ -10593,34 +10522,52 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- Label skip_allocation, non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm, &call_runtime);
+ Label skip_allocation, non_smi_shr_result;
+ Register heap_number_map = r9;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadNumbersAsIntegers(masm);
+ } else {
+ FloatingPointHelper::LoadAsIntegers(masm,
+ &call_runtime,
+ heap_number_map);
+ }
switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
case Token::SAR: __ sarl_cl(rax); break;
case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: __ shrl_cl(rax); break;
+ case Token::SHR: {
+ __ shrl_cl(rax);
+ // Check if result is negative. This can only happen for a shift
+ // by zero.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_shr_result);
+ break;
+ }
default: UNREACHABLE();
}
- if (op_ == Token::SHR) {
- // Check if result is negative. This can only happen for a shift
- // by zero, which also doesn't update the sign flag.
- __ testl(rax, rax);
- __ j(negative, &non_smi_result);
- }
- __ JumpIfNotValidSmiValue(rax, &non_smi_result);
- // Tag smi result, if possible, and return.
+
+ STATIC_ASSERT(kSmiValueSize == 32);
+ // Tag smi result and return.
__ Integer32ToSmi(rax, rax);
GenerateReturn(masm);
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR && non_smi_result.is_linked()) {
- __ bind(&non_smi_result);
+ // All bit-ops except SHR return a signed int32 that can be
+ // returned immediately as a smi.
+ // We might need to allocate a HeapNumber if we shift a negative
+ // number right by zero (i.e., convert to UInt32).
+ if (op_ == Token::SHR) {
+ ASSERT(non_smi_shr_result.is_linked());
+ __ bind(&non_smi_shr_result);
// Allocate a heap number if needed.
- __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
@@ -10631,22 +10578,33 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
- __ AllocateHeapNumber(rax, rcx, &call_runtime);
+ // Allocate heap number in new space.
+ // Not using AllocateHeapNumber macro in order to reuse
+ // already loaded heap_number_map.
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ rax,
+ rcx,
+ no_reg,
+ &call_runtime,
+ TAG_OBJECT);
+ // Set the map.
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+ heap_number_map);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ fild_s(Operand(rsp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ cvtqsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
GenerateReturn(masm);
}
- // SHR should return uint32 - go to runtime for non-smi/negative result.
- if (op_ == Token::SHR) {
- __ bind(&non_smi_result);
- }
break;
}
default: UNREACHABLE(); break;
@@ -10679,7 +10637,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_strings, both_strings, not_string1, string1, string1_smi2;
// If this stub has already generated FP-specific code then the arguments
- // are already in rdx, rax
+ // are already in rdx and rax.
if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
GenerateLoadArguments(masm);
}
@@ -10828,19 +10786,13 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ push(rax);
// Push this stub's key.
- __ movq(rax, Immediate(MinorKey()));
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(Smi::FromInt(MinorKey()));
// Although the operation and the type info are encoded into the key,
// the encoding is opaque, so push them too.
- __ movq(rax, Immediate(op_));
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(Smi::FromInt(op_));
- __ movq(rax, Immediate(runtime_operands_type_));
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(Smi::FromInt(runtime_operands_type_));
__ push(rcx);
@@ -11208,16 +11160,17 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object. If
// both strings are ascii the result is an ascii cons string.
// rax: first string
- // ebx: length of resulting flat string
+ // rbx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of second string
- Label non_ascii, allocated;
+ Label non_ascii, allocated, ascii_data;
__ movl(rcx, r8);
__ and_(rcx, r9);
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ testl(rcx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
+ __ bind(&ascii_data);
// Allocate an acsii cons string.
__ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
__ bind(&allocated);
@@ -11231,6 +11184,18 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // rcx: first instance type AND second instance type.
+ // r8: first instance type.
+ // r9: second instance type.
+ __ testb(rcx, Immediate(kAsciiDataHintMask));
+ __ j(not_zero, &ascii_data);
+ __ xor_(r8, r9);
+ ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ __ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
__ jmp(&allocated);
@@ -11238,7 +11203,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Handle creating a flat result. First check that both strings are not
// external strings.
// rax: first string
- // ebx: length of resulting flat string as smi
+ // rbx: length of resulting flat string as smi
// rdx: second string
// r8: instance type of first string
// r9: instance type of first string
@@ -11254,7 +11219,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(equal, &string_add_runtime);
// Now check if both strings are ascii strings.
// rax: first string
- // ebx: length of resulting flat string
+ // rbx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of second string
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index d99ea84a..1df1de34 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1729,6 +1729,30 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
}
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key,
+ RelocInfo::Mode mode) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ VisitForValue(key, kAccumulator);
+ __ movq(rcx, rax);
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
+ in_loop);
+ __ Call(ic, mode);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ Apply(context_, rax);
+}
+
+
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
@@ -1820,30 +1844,32 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
- // Call to a keyed property, use keyed load IC followed by function
- // call.
+ // Call to a keyed property.
+ // For a synthetic property use keyed load IC followed by function call,
+ // for a regular property use KeyedCallIC.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
- __ movq(rdx, Operand(rsp, 0));
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test rax,..."
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
- // Pop receiver.
- __ pop(rbx);
- // Push result (function).
- __ push(rax);
- // Push receiver object on stack.
if (prop->is_synthetic()) {
+ VisitForValue(prop->key(), kAccumulator);
+ __ movq(rdx, Operand(rsp, 0));
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test rax,..."
+ // instruction after the call as it is treated specially
+ // by the LoadIC code.
+ __ nop();
+ // Pop receiver.
+ __ pop(rbx);
+ // Push result (function).
+ __ push(rax);
+ // Push receiver object on stack.
__ movq(rcx, CodeGenerator::GlobalObject());
__ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+ EmitCallWithStub(expr);
} else {
- __ push(rbx);
+ EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
- EmitCallWithStub(expr);
}
} else {
// Call to some other expression. If the expression is an anonymous
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 89c21cba..6e77c892 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -57,19 +57,21 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Register r2,
Register name,
Register r4,
+ Register result,
DictionaryCheck check_dictionary) {
// Register use:
//
- // r0 - used to hold the property dictionary.
+ // r0 - used to hold the property dictionary and is unchanged.
//
- // r1 - initially the receiver.
- // - unchanged on any jump to miss_label.
- // - holds the result on exit.
+ // r1 - used to hold the receiver and is unchanged.
//
// r2 - used to hold the capacity of the property dictionary.
//
// name - holds the name of the property and is unchanged.
+ //
// r4 - used to hold the index into the property dictionary.
+ //
+ // result - holds the result on exit if the load succeeded.
Label done;
@@ -148,7 +150,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(r1,
+ __ movq(result,
Operand(r0, r4, times_pointer_size, kValueOffset - kHeapObjectTag));
}
@@ -159,14 +161,15 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Register key,
Register r0,
Register r1,
- Register r2) {
+ Register r2,
+ Register result) {
// Register use:
//
- // elements - holds the slow-case elements of the receiver and is unchanged.
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
//
- // key - holds the smi key on entry and is unchanged if a branch is
- // performed to the miss label.
- // Holds the result on exit if the load succeeded.
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
//
// Scratch registers:
//
@@ -175,6 +178,12 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
@@ -246,7 +255,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
- __ movq(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+ __ movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -346,55 +355,167 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, check_string, index_smi, index_string;
- Label check_pixel_array, probe_dictionary, check_number_dictionary;
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map,
+ Label* slow) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // Scratch registers:
+ // map - used to hold the map of the receiver.
// Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
+ __ JumpIfSmi(receiver, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing
// into string objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(below, &slow);
+ __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
+ __ j(below, slow);
// Check bit field.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(kSlowCaseBitFieldMask));
- __ j(not_zero, &slow);
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
+ Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ j(not_zero, slow);
+}
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+
+// Loads an indexed element from a fast case array.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register scratch,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - holds the elements of the receiver on exit.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // scratch - used to hold elements of the receiver and the loaded value.
+
+ __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &check_pixel_array);
+ __ j(not_equal, not_fast_array);
// Check that the key (index) is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
+ __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
+ // Unsigned comparison rejects negative indices.
+ __ j(above_equal, out_of_range);
// Fast case: Do the load.
- SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rcx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+ SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
+ __ movq(scratch, FieldOperand(elements,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
- __ j(equal, &slow);
- __ movq(rax, rbx);
+ __ j(equal, out_of_range);
+ if (!result.is(scratch)) {
+ __ movq(result, scratch);
+ }
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if the key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_symbol) {
+ // Register use:
+ // key - holds the key and is unchanged. Assumed to be non-smi.
+ // Scratch registers:
+ // map - used to hold the map of the key.
+ // hash - used to hold the hash of the key.
+ __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
+ __ j(above_equal, not_symbol);
+ // Is the string an array index, with cached numeric value?
+ __ movl(hash, FieldOperand(key, String::kHashFieldOffset));
+ __ testl(hash, Immediate(String::kContainsCachedArrayIndexMask));
+ __ j(zero, index_string); // The value in hash is used at jump target.
+
+ // Is the string a symbol?
+ ASSERT(kSymbolTag != 0);
+ __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
+ Immediate(kIsSymbolMask));
+ __ j(zero, not_symbol);
+}
+
+
+// Picks out an array index from the hash field.
+static void GenerateIndexFromHash(MacroAssembler* masm,
+ Register key,
+ Register hash) {
+ // Register use:
+ // key - holds the overwritten key on exit.
+ // hash - holds the key's hash. Clobbered.
+
+ // The assert checks that the constants for the maximum number of digits
+ // for an array index cached in the hash field and the number of bits
+ // reserved for it does not conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. Even if we subsequently go to
+ // the slow case, converting the key to a smi is always valid.
+ // key: string key
+ // hash: key's hash field, including its array index value.
+ __ and_(hash, Immediate(String::kArrayIndexValueMask));
+ __ shr(hash, Immediate(String::kHashShift));
+ // Here we actually clobber the key which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
+ __ Integer32ToSmi(key, hash);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow, check_string, index_smi, index_string;
+ Label check_pixel_array, probe_dictionary, check_number_dictionary;
+
+ GenerateKeyedLoadReceiverCheck(masm, rdx, rcx, &slow);
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rax, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateFastArrayLoad(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rax,
+ &check_pixel_array,
+ &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
@@ -423,7 +544,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
- GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi);
+ GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi, rax);
__ ret(0);
__ bind(&slow);
@@ -434,22 +555,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
- // The key is not a smi.
- // Is it a string?
- // rdx: receiver
- // rax: key
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &slow);
- // Is the string an array index, with cached numeric value?
- __ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
- __ testl(rbx, Immediate(String::kContainsCachedArrayIndexMask));
- __ j(zero, &index_string); // The value in rbx is used at jump target.
-
- // Is the string a symbol?
- ASSERT(kSymbolTag != 0);
- __ testb(FieldOperand(rcx, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
- __ j(zero, &slow);
+ GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
@@ -509,29 +615,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
rcx,
rax,
rdi,
+ rax,
DICTIONARY_CHECK_DONE);
- __ movq(rax, rdx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
+
__ bind(&index_string);
- // We want the smi-tagged index in rax. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // rdx: receiver
- // rax: key (a string)
- // rbx: key's hash field, including its array index value.
- __ and_(rbx, Immediate(String::kArrayIndexValueMask));
- __ shr(rbx, Immediate(String::kHashShift));
- // Here we actually clobber the key (rax) which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- __ Integer32ToSmi(rax, rbx);
- // Now jump to the place where smi keys are handled.
+ GenerateIndexFromHash(masm, rax, rbx);
__ jmp(&index_smi);
}
@@ -803,19 +893,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, fast, array, extra, check_pixel_array;
+ Label slow, slow_with_tagged_index, fast, array, extra, check_pixel_array;
// Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
+ __ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
+ __ j(not_zero, &slow_with_tagged_index);
// Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow);
+ __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
+ __ SmiToInteger32(rcx, rcx);
__ CmpInstanceType(rbx, JS_ARRAY_TYPE);
__ j(equal, &array);
@@ -826,27 +917,30 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Object case: Check key against length in the elements array.
// rax: value
// rdx: JSObject
- // rcx: index (as a smi)
+ // rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
- __ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
// rbx: FixedArray
- // rcx: index (as a smi)
- __ j(below, &fast);
+ // rcx: index
+ __ j(above, &fast);
// Slow case: call runtime.
__ bind(&slow);
+ __ Integer32ToSmi(rcx, rcx);
+ __ bind(&slow_with_tagged_index);
GenerateRuntimeSetProperty(masm);
+ // Never returns to here.
// Check whether the elements is a pixel array.
// rax: value
// rdx: receiver
// rbx: receiver's elements array
- // rcx: index (as a smi), zero-extended.
+ // rcx: index, zero-extended.
__ bind(&check_pixel_array);
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
@@ -854,21 +948,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ JumpIfNotSmi(rax, &slow);
- __ SmiToInteger32(rdi, rcx);
- __ cmpl(rdi, FieldOperand(rbx, PixelArray::kLengthOffset));
+ __ cmpl(rcx, FieldOperand(rbx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
// No more bailouts to slow case on this path, so key not needed.
- __ SmiToInteger32(rcx, rax);
+ __ SmiToInteger32(rdi, rax);
{ // Clamp the value to [0..255].
Label done;
- __ testl(rcx, Immediate(0xFFFFFF00));
+ __ testl(rdi, Immediate(0xFFFFFF00));
__ j(zero, &done);
- __ setcc(negative, rcx); // 1 if negative, 0 if positive.
- __ decb(rcx); // 0 if negative, 255 if positive.
+ __ setcc(negative, rdi); // 1 if negative, 0 if positive.
+ __ decb(rdi); // 0 if negative, 255 if positive.
__ bind(&done);
}
__ movq(rbx, FieldOperand(rbx, PixelArray::kExternalPointerOffset));
- __ movb(Operand(rbx, rdi, times_1, 0), rcx);
+ __ movb(Operand(rbx, rcx, times_1, 0), rdi);
__ ret(0);
// Extra capacity case: Check if there is extra capacity to
@@ -878,14 +971,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rax: value
// rdx: receiver (a JSArray)
// rbx: receiver's elements array (a FixedArray)
- // rcx: index (as a smi)
+ // rcx: index
// flags: smicompare (rdx.length(), rbx)
__ j(not_equal, &slow); // do not leave holes in the array
- __ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
+ __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
+ __ j(below_equal, &slow);
// Increment index to get new length.
- __ SmiAddConstant(rdi, rcx, Smi::FromInt(1));
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+ __ leal(rdi, Operand(rcx, 1));
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
@@ -894,7 +987,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&array);
// rax: value
// rdx: receiver (a JSArray)
- // rcx: index (as a smi)
+ // rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
@@ -902,26 +995,22 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
- __ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
+ __ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
__ j(below_equal, &extra);
// Fast case: Do the store.
__ bind(&fast);
// rax: value
// rbx: receiver's elements array (a FixedArray)
- // rcx: index (as a smi)
+ // rcx: index
Label non_smi_value;
- __ JumpIfNotSmi(rax, &non_smi_value);
- SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
- __ movq(FieldOperand(rbx, index.reg, index.scale, FixedArray::kHeaderSize),
+ __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
+ __ JumpIfNotSmi(rax, &non_smi_value);
__ ret(0);
__ bind(&non_smi_value);
// Slow case that needs to retain rcx for use by RecordWrite.
// Update write barrier for the elements array address.
- SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rcx, kPointerSizeLog2);
- __ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize),
- rax);
__ movq(rdx, rax);
__ RecordWriteNonSmi(rbx, 0, rdx, rcx);
__ ret(0);
@@ -1109,7 +1198,11 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+
+static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1132,7 +1225,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
CEntryStub stub(1);
__ movq(rax, Immediate(2));
- __ movq(rbx, ExternalReference(IC_Utility(kCallIC_Miss)));
+ __ movq(rbx, ExternalReference(IC_Utility(id)));
__ CallStub(&stub);
// Move result to rdi and exit the internal frame.
@@ -1160,27 +1253,20 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind) {
// ----------- S t a t e -------------
// rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rdx : receiver
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
// Probe the stub cache.
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
// If the stub cache probing failed, the receiver might be a value.
@@ -1219,9 +1305,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
- // Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
}
@@ -1240,19 +1324,16 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// Search dictionary - put result in register rdx.
- GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, rdi, CHECK_DICTIONARY);
-
- // Move the result to register rdi and check that it isn't a smi.
- __ movq(rdi, rdx);
- __ JumpIfSmi(rdx, miss);
+ GenerateDictionaryLoad(
+ masm, miss, rax, rdx, rbx, rcx, rdi, rdi, CHECK_DICTIONARY);
+ __ JumpIfSmi(rdi, miss);
// Check that the value is a JavaScript function.
- __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
__ j(not_equal, miss);
// Patch the receiver with the global proxy if necessary.
if (is_global_object) {
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
@@ -1263,7 +1344,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+// The generated code falls through if the call should be handled by runtime.
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1324,24 +1406,197 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ CheckAccessGlobalProxy(rdx, rax, &miss);
__ jmp(&invoke);
- // Cache miss: Jump to runtime.
__ bind(&miss);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+}
+
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
+ GenerateMiss(masm, argc);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
GenerateMiss(masm, argc);
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- UNREACHABLE();
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- UNREACHABLE();
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ Label do_call, slow_call, slow_load, slow_reload_receiver;
+ Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+ Label index_smi, index_string;
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rcx, &check_string);
+
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &slow_call);
+
+ GenerateFastArrayLoad(
+ masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
+ __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
+
+ __ bind(&do_call);
+ // receiver in rdx is not used after this point.
+ // rcx: key
+ // rdi: function
+
+ // Check that the value in edi is a JavaScript function.
+ __ JumpIfSmi(rdi, &slow_call);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
+ __ j(not_equal, &slow_call);
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+ __ bind(&check_number_dictionary);
+ // eax: elements
+ // ecx: smi key
+ // Check whether the elements is a number dictionary.
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ SmiToInteger32(rbx, rcx);
+ // ebx: untagged index
+ GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
+ __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
+ __ jmp(&do_call);
+
+ __ bind(&slow_load);
+ // This branch is taken when calling KeyedCallIC_Miss is neither required
+ // nor beneficial.
+ __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1);
+ __ EnterInternalFrame();
+ __ push(rcx); // save the key
+ __ push(rdx); // pass the receiver
+ __ push(rcx); // pass the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(rcx); // restore the key
+ __ LeaveInternalFrame();
+ __ movq(rdi, rax);
+ __ jmp(&do_call);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, rcx, rax, rbx, &index_string, &slow_call);
+
+ // The key is known to be a symbol.
+ // If the receiver is a regular JS object with slow properties then do
+ // a quick inline probe of the receiver's dictionary.
+ // Otherwise do the monomorphic cache probe.
+ GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &lookup_monomorphic_cache);
+
+ __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(not_equal, &lookup_monomorphic_cache);
+
+ GenerateDictionaryLoad(
+ masm, &slow_load, rbx, rdx, rax, rcx, rdi, rdi, DICTIONARY_CHECK_DONE);
+ __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
+ __ jmp(&do_call);
+
+ __ bind(&lookup_monomorphic_cache);
+ __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+ // Fall through on miss.
+
+ __ bind(&slow_call);
+ // This branch is taken if:
+ // - the receiver requires boxing or access check,
+ // - the key is neither smi nor symbol,
+ // - the value loaded is not a function,
+ // - there is hope that the runtime will create a monomorphic call stub
+ // that will get fetched next time.
+ __ IncrementCounter(&Counters::keyed_call_generic_slow, 1);
+ GenerateMiss(masm, argc);
+
+ __ bind(&index_string);
+ GenerateIndexFromHash(masm, rcx, rbx);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- UNREACHABLE();
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
+ GenerateMiss(masm, argc);
}
@@ -1452,7 +1707,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in rax.
__ bind(&probe);
GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx,
- rcx, rdi, CHECK_DICTIONARY);
+ rcx, rdi, rax, CHECK_DICTIONARY);
__ ret(0);
// Global object access: Check access rights.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 3823cadb..24bac7d3 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -35,6 +35,7 @@
#include "macro-assembler-x64.h"
#include "serialize.h"
#include "debug.h"
+#include "heap.h"
namespace v8 {
namespace internal {
@@ -96,8 +97,8 @@ void MacroAssembler::RecordWriteHelper(Register object,
// Compute number of region covering addr. See Page::GetRegionNumberForAddress
// method for more details.
- and_(addr, Immediate(Page::kPageAlignmentMask));
shrl(addr, Immediate(Page::kRegionSizeLog2));
+ andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
// Set dirty mark for region.
bts(Operand(object, Page::kDirtyFlagOffset), addr);
@@ -106,25 +107,25 @@ void MacroAssembler::RecordWriteHelper(Register object,
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
-// If offset is zero, then the smi_index register contains the array index into
-// the elements array represented as a smi. Otherwise it can be used as a
-// scratch register.
+// If offset is zero, then the index register contains the array index into
+// the elements array represented a zero extended int32. Otherwise it can be
+// used as a scratch register.
// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
- Register smi_index) {
+ Register index) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
- ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
+ ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
JumpIfSmi(value, &done);
- RecordWriteNonSmi(object, offset, value, smi_index);
+ RecordWriteNonSmi(object, offset, value, index);
bind(&done);
// Clobber all input registers when running with the debug-code flag
@@ -135,7 +136,7 @@ void MacroAssembler::RecordWrite(Register object,
if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
@@ -143,7 +144,7 @@ void MacroAssembler::RecordWrite(Register object,
void MacroAssembler::RecordWriteNonSmi(Register object,
int offset,
Register scratch,
- Register smi_index) {
+ Register index) {
Label done;
if (FLAG_debug_code) {
@@ -151,6 +152,16 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
JumpIfNotSmi(object, &okay);
Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
bind(&okay);
+
+ if (offset == 0) {
+ // index must be int32.
+ Register tmp = index.is(rax) ? rbx : rax;
+ push(tmp);
+ movl(tmp, index);
+ cmpq(tmp, index);
+ Check(equal, "Index register for RecordWrite must be untagged int32.");
+ pop(tmp);
+ }
}
// Test that the object address is not in the new space. We cannot
@@ -163,16 +174,15 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
- Register dst = smi_index;
+ Register dst = index;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric.
- SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
lea(dst, FieldOperand(object,
- index.reg,
- index.scale,
+ index,
+ times_pointer_size,
FixedArray::kHeaderSize));
}
RecordWriteHelper(object, dst, scratch);
@@ -184,7 +194,7 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
@@ -446,13 +456,8 @@ void MacroAssembler::Set(Register dst, int64_t x) {
void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (x == 0) {
- xor_(kScratchRegister, kScratchRegister);
- movq(dst, kScratchRegister);
- } else if (is_int32(x)) {
+ if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
- } else if (is_uint32(x)) {
- movl(dst, Immediate(static_cast<uint32_t>(x)));
} else {
movq(kScratchRegister, x, RelocInfo::NONE);
movq(dst, kScratchRegister);
@@ -485,6 +490,23 @@ void MacroAssembler::Integer32ToSmi(Register dst,
}
+void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
+ if (FLAG_debug_code) {
+ testb(dst, Immediate(0x01));
+ Label ok;
+ j(zero, &ok);
+ if (allow_stub_calls()) {
+ Abort("Integer32ToSmiField writing to non-smi location");
+ } else {
+ int3();
+ }
+ bind(&ok);
+ }
+ ASSERT(kSmiShift % kBitsPerByte == 0);
+ movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+}
+
+
void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
Register src,
int constant) {
@@ -520,6 +542,11 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) {
}
+void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
+ movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+}
+
+
void MacroAssembler::SmiTest(Register src) {
testq(src, src);
}
@@ -556,6 +583,11 @@ void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
}
+void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+}
+
+
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src,
int power) {
@@ -696,15 +728,12 @@ void MacroAssembler::SmiAdd(Register dst,
movq(dst, src1);
addq(dst, src2);
}
- Assert(no_overflow, "Smi addition onverflow");
+ Assert(no_overflow, "Smi addition overflow");
} else if (dst.is(src1)) {
- addq(dst, src2);
- Label smi_result;
- j(no_overflow, &smi_result);
- // Restore src1.
- subq(src1, src2);
- jmp(on_not_smi_result);
- bind(&smi_result);
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
} else {
movq(dst, src1);
addq(dst, src2);
@@ -727,15 +756,11 @@ void MacroAssembler::SmiSub(Register dst,
movq(dst, src1);
subq(dst, src2);
}
- Assert(no_overflow, "Smi substraction onverflow");
+ Assert(no_overflow, "Smi subtraction overflow");
} else if (dst.is(src1)) {
+ cmpq(dst, src2);
+ j(overflow, on_not_smi_result);
subq(dst, src2);
- Label smi_result;
- j(no_overflow, &smi_result);
- // Restore src1.
- addq(src1, src2);
- jmp(on_not_smi_result);
- bind(&smi_result);
} else {
movq(dst, src1);
subq(dst, src2);
@@ -757,15 +782,12 @@ void MacroAssembler::SmiSub(Register dst,
movq(dst, src1);
subq(dst, src2);
}
- Assert(no_overflow, "Smi substraction onverflow");
+ Assert(no_overflow, "Smi subtraction overflow");
} else if (dst.is(src1)) {
- subq(dst, src2);
- Label smi_result;
- j(no_overflow, &smi_result);
- // Restore src1.
- addq(src1, src2);
- jmp(on_not_smi_result);
- bind(&smi_result);
+ movq(kScratchRegister, src1);
+ subq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result);
+ movq(src1, kScratchRegister);
} else {
movq(dst, src1);
subq(dst, src2);
@@ -883,12 +905,9 @@ void MacroAssembler::SmiAddConstant(Register dst,
ASSERT(!dst.is(kScratchRegister));
Move(kScratchRegister, constant);
- addq(dst, kScratchRegister);
- Label result_ok;
- j(no_overflow, &result_ok);
- subq(dst, kScratchRegister);
- jmp(on_not_smi_result);
- bind(&result_ok);
+ addq(kScratchRegister, dst);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
} else {
Move(dst, constant);
addq(dst, src);
@@ -910,10 +929,12 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
} else {
// Subtract by adding the negative, to do it in two operations.
if (constant->value() == Smi::kMinValue) {
- Move(kScratchRegister, constant);
- movq(dst, src);
- subq(dst, kScratchRegister);
+ Move(dst, constant);
+ // Adding and subtracting the min-value gives the same result, it only
+ // differs on the overflow bit, which we don't check here.
+ addq(dst, src);
} else {
+ // Subtract by adding the negation.
Move(dst, Smi::FromInt(-constant->value()));
addq(dst, src);
}
@@ -931,21 +952,32 @@ void MacroAssembler::SmiSubConstant(Register dst,
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
- Move(kScratchRegister, constant);
- subq(dst, kScratchRegister);
- Label sub_success;
- j(no_overflow, &sub_success);
- addq(src, kScratchRegister);
- jmp(on_not_smi_result);
- bind(&sub_success);
- } else {
if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result);
Move(kScratchRegister, constant);
- movq(dst, src);
subq(dst, kScratchRegister);
+ } else {
+ // Subtract by adding the negation.
+ Move(kScratchRegister, Smi::FromInt(-constant->value()));
+ addq(kScratchRegister, dst);
j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ }
+ } else {
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result);
+ Move(dst, constant);
+ // Adding and subtracting the min-value gives the same result, it only
+ // differs on the overflow bit, which we don't check here.
+ addq(dst, src);
} else {
+ // Subtract by adding the negation.
Move(dst, Smi::FromInt(-(constant->value())));
addq(dst, src);
j(overflow, on_not_smi_result);
@@ -1695,6 +1727,17 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
}
+void MacroAssembler::AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ ASSERT(!src.is(kScratchRegister));
+ LoadRoot(kScratchRegister, root_value_index);
+ cmpq(src, kScratchRegister);
+ Check(equal, message);
+}
+
+
+
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 0acce054..bb0b6810 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -203,6 +203,9 @@ class MacroAssembler: public Assembler {
// NOTICE: Destroys the dst register even if unsuccessful!
void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
+ // Stores an integer32 value into a memory field that already holds a smi.
+ void Integer32ToSmiField(const Operand& dst, Register src);
+
// Adds constant to src and tags the result as a smi.
// Result must be a valid smi.
void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
@@ -214,6 +217,7 @@ class MacroAssembler: public Assembler {
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
+ void SmiToInteger64(Register dst, const Operand& src);
// Multiply a positive smi's integer value by a power of two.
// Provides result as 64-bit integer value.
@@ -234,6 +238,8 @@ class MacroAssembler: public Assembler {
void SmiCompare(Register dst, const Operand& src);
void SmiCompare(const Operand& dst, Register src);
void SmiCompare(const Operand& dst, Smi* src);
+ // Compare the int32 in src register to the value of the smi stored at dst.
+ void SmiCompareInteger32(const Operand& dst, Register src);
// Sets sign and zero flags depending on value of smi in register.
void SmiTest(Register src);
@@ -550,6 +556,11 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
+ // Abort execution if argument is not the root value with the given index.
+ void AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
+
// ---------------------------------------------------------------------------
// Exception handling
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index cc544705..1e103ac2 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -706,6 +706,15 @@ static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
#define __ ACCESS_MASM((masm()))
+
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+ if (kind_ == Code::KEYED_CALL_IC) {
+ __ Cmp(rcx, Handle<String>(name));
+ __ j(not_equal, miss);
+ }
+}
+
+
void CallStubCompiler::GenerateMissBranch() {
Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -740,6 +749,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
Label miss_in_smi_check;
+ GenerateNameCheck(name, &miss_in_smi_check);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -881,6 +892,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -938,6 +951,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -970,30 +985,30 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into rax and calculate new length.
- __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ SmiAddConstant(rax, rax, Smi::FromInt(argc));
+ __ addl(rax, Immediate(argc));
// Get the element's length into rcx.
- __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ SmiCompare(rax, rcx);
+ __ cmpl(rax, rcx);
__ j(greater, &attempt_to_grow_elements);
// Save new length.
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Push the element.
__ movq(rcx, Operand(rsp, argc * kPointerSize));
- SmiIndex index =
- masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
__ lea(rdx, FieldOperand(rbx,
- index.reg, index.scale,
+ rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movq(Operand(rdx, 0), rcx);
// Check if value is a smi.
+ __ Integer32ToSmi(rax, rax); // Return new length as smi.
+
__ JumpIfNotSmi(rcx, &with_write_barrier);
__ bind(&exit);
@@ -1005,6 +1020,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
RecordWriteStub stub(rbx, rdx, rcx);
__ CallStub(&stub);
+
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
@@ -1019,9 +1035,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(rcx, Operand(rcx, 0));
// Check if it's the end of elements.
- index = masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
__ lea(rdx, FieldOperand(rbx,
- index.reg, index.scale,
+ rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmpq(rdx, rcx);
__ j(not_equal, &call_builtin);
@@ -1049,8 +1064,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta));
+ // Make new length a smi before returning it.
+ __ Integer32ToSmi(rax, rax);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
// Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
@@ -1092,6 +1108,8 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
Label miss, return_undefined, call_builtin;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1111,28 +1129,26 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ j(not_equal, &miss);
// Get the array's length into rcx and calculate new length.
- __ movq(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(1));
- __ SmiTest(rcx);
+ __ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ subl(rcx, Immediate(1));
__ j(negative, &return_undefined);
// Get the last element.
__ Move(r9, Factory::the_hole_value());
- SmiIndex index =
- masm()->SmiToIndex(r8, rcx, times_pointer_size);
__ movq(rax, FieldOperand(rbx,
- index.reg, index.scale,
+ rcx, times_pointer_size,
FixedArray::kHeaderSize));
// Check if element is already the hole.
__ cmpq(rax, r9);
+ // If so, call slow-case to also check prototypes for value.
__ j(equal, &call_builtin);
// Set the array's length.
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
- // Fill with the hole and return original value..
+ // Fill with the hole and return original value.
__ movq(FieldOperand(rbx,
- index.reg, index.scale,
+ rcx, times_pointer_size,
FixedArray::kHeaderSize),
r9);
__ ret((argc + 1) * kPointerSize);
@@ -1190,6 +1206,8 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -1254,6 +1272,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// rsp[(argc + 1) * 8] : argument 0 = receiver
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the number of arguments.
const int argc = arguments().immediate();
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index a0acd6a2..e65378dc 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -961,16 +961,18 @@ void VirtualFrame::SyncRange(int begin, int end) {
// Sync elements below the range if they have not been materialized
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
+ int end_or_stack_pointer = Min(stack_pointer_, end);
+ // Emit normal push instructions for elements above stack pointer
+ // and use mov instructions if we are below stack pointer.
+ int i = start;
- // If positive we have to adjust the stack pointer.
- int delta = end - stack_pointer_;
- if (delta > 0) {
- stack_pointer_ = end;
- __ subq(rsp, Immediate(delta * kPointerSize));
- }
-
- for (int i = start; i <= end; i++) {
+ while (i <= end_or_stack_pointer) {
if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+ i++;
+ }
+ while (i <= end) {
+ SyncElementByPushing(i);
+ i++;
}
}
@@ -1164,6 +1166,25 @@ Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
}
+Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
+ int arg_count,
+ int loop_nesting) {
+ // Function name, arguments, and receiver are found on top of the frame
+ // and dropped by the call. The IC expects the name in rcx and the rest
+ // on the stack, and drops them all.
+ InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic =
+ cgen()->ComputeKeyedCallInitialize(arg_count, in_loop);
+ Result name = Pop();
+ // Spill args, receiver, and function. The call will drop args and
+ // receiver.
+ PrepareForCall(arg_count + 1, arg_count + 1);
+ name.ToRegister(rcx);
+ name.Unuse();
+ return RawCallCodeObject(ic, mode);
+}
+
+
Result VirtualFrame::CallConstructor(int arg_count) {
// Arguments, receiver, and function are on top of the frame. The
// IC expects arg count in rax, function in rdi, and the arguments
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index affe18ff..dc270fea 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -369,6 +369,8 @@ class VirtualFrame : public ZoneObject {
// The argument count does not include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+ Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+
// Allocate and call JS function as constructor. Arguments,
// receiver (global object), and function are found on top of the
// frame. Function is not dropped. The argument count does not
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 91231253..c426db40 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -27,8 +27,6 @@
#include <limits.h>
-#define USE_NEW_QUERY_CALLBACKS
-
#include "v8.h"
#include "api.h"
@@ -9637,32 +9635,53 @@ THREADED_TEST(PixelArray) {
}
-template <class ExternalArrayClass, class ElementType>
-static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
- int64_t low,
- int64_t high) {
+THREADED_TEST(PixelArrayInfo) {
v8::HandleScope scope;
LocalContext context;
- const int kElementCount = 40;
- int element_size = 0;
+ for (int size = 0; size < 100; size += 10) {
+ uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(size));
+ v8::Handle<v8::Object> obj = v8::Object::New();
+ obj->SetIndexedPropertiesToPixelData(pixel_data, size);
+ CHECK(obj->HasIndexedPropertiesInPixelData());
+ CHECK_EQ(pixel_data, obj->GetIndexedPropertiesPixelData());
+ CHECK_EQ(size, obj->GetIndexedPropertiesPixelDataLength());
+ free(pixel_data);
+ }
+}
+
+
+static int ExternalArrayElementSize(v8::ExternalArrayType array_type) {
switch (array_type) {
case v8::kExternalByteArray:
case v8::kExternalUnsignedByteArray:
- element_size = 1;
+ return 1;
break;
case v8::kExternalShortArray:
case v8::kExternalUnsignedShortArray:
- element_size = 2;
+ return 2;
break;
case v8::kExternalIntArray:
case v8::kExternalUnsignedIntArray:
case v8::kExternalFloatArray:
- element_size = 4;
+ return 4;
break;
default:
UNREACHABLE();
- break;
+ return -1;
}
+ UNREACHABLE();
+ return -1;
+}
+
+
+template <class ExternalArrayClass, class ElementType>
+static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
+ int64_t low,
+ int64_t high) {
+ v8::HandleScope scope;
+ LocalContext context;
+ const int kElementCount = 40;
+ int element_size = ExternalArrayElementSize(array_type);
ElementType* array_data =
static_cast<ElementType*>(malloc(kElementCount * element_size));
i::Handle<ExternalArrayClass> array =
@@ -10043,6 +10062,35 @@ THREADED_TEST(ExternalArrays) {
}
+void ExternalArrayInfoTestHelper(v8::ExternalArrayType array_type) {
+ v8::HandleScope scope;
+ LocalContext context;
+ for (int size = 0; size < 100; size += 10) {
+ int element_size = ExternalArrayElementSize(array_type);
+ void* external_data = malloc(size * element_size);
+ v8::Handle<v8::Object> obj = v8::Object::New();
+ obj->SetIndexedPropertiesToExternalArrayData(
+ external_data, array_type, size);
+ CHECK(obj->HasIndexedPropertiesInExternalArrayData());
+ CHECK_EQ(external_data, obj->GetIndexedPropertiesExternalArrayData());
+ CHECK_EQ(array_type, obj->GetIndexedPropertiesExternalArrayDataType());
+ CHECK_EQ(size, obj->GetIndexedPropertiesExternalArrayDataLength());
+ free(external_data);
+ }
+}
+
+
+THREADED_TEST(ExternalArrayInfo) {
+ ExternalArrayInfoTestHelper(v8::kExternalByteArray);
+ ExternalArrayInfoTestHelper(v8::kExternalUnsignedByteArray);
+ ExternalArrayInfoTestHelper(v8::kExternalShortArray);
+ ExternalArrayInfoTestHelper(v8::kExternalUnsignedShortArray);
+ ExternalArrayInfoTestHelper(v8::kExternalIntArray);
+ ExternalArrayInfoTestHelper(v8::kExternalUnsignedIntArray);
+ ExternalArrayInfoTestHelper(v8::kExternalFloatArray);
+}
+
+
THREADED_TEST(ScriptContextDependence) {
v8::HandleScope scope;
LocalContext c1;
@@ -10127,7 +10175,13 @@ v8::Handle<Value> AnalyzeStackInNativeCode(const v8::Arguments& args) {
stackTrace->GetFrame(0));
checkStackFrame(origin, "baz", 8, 3, false, true,
stackTrace->GetFrame(1));
- checkStackFrame(NULL, "", 1, 1, true, false,
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ bool is_eval = true;
+#else // ENABLE_DEBUGGER_SUPPORT
+ bool is_eval = false;
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+ checkStackFrame(NULL, "", 1, 1, is_eval, false,
stackTrace->GetFrame(2));
// The last frame is an anonymous function that has the initial call to foo.
checkStackFrame(origin, "", 10, 1, false, false,
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 4c3ff5e3..e6896378 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -25,9 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdlib.h>
+#ifdef ENABLE_DEBUGGER_SUPPORT
-#define USE_NEW_QUERY_CALLBACKS
+#include <stdlib.h>
#include "v8.h"
@@ -194,8 +194,9 @@ static int SetBreakPoint(Handle<v8::internal::JSFunction> fun, int position) {
static int break_point = 0;
Handle<v8::internal::SharedFunctionInfo> shared(fun->shared());
Debug::SetBreakPoint(
- shared, position,
- Handle<Object>(v8::internal::Smi::FromInt(++break_point)));
+ shared,
+ Handle<Object>(v8::internal::Smi::FromInt(++break_point)),
+ &position);
return break_point;
}
@@ -2029,6 +2030,51 @@ TEST(ScriptBreakPointLine) {
}
+// Test top level script break points set on lines.
+TEST(ScriptBreakPointLineTopLevel) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ env.ExposeDebug();
+
+ v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
+ v8::Undefined());
+
+ v8::Local<v8::String> script = v8::String::New(
+ "function f() {\n"
+ " a = 1; // line 1\n"
+ "}\n"
+ "a = 2; // line 3\n");
+ v8::Local<v8::Function> f;
+ {
+ v8::HandleScope scope;
+ v8::Script::Compile(script, v8::String::New("test.html"))->Run();
+ }
+ f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+
+ Heap::CollectAllGarbage(false);
+
+ SetScriptBreakPointByNameFromJS("test.html", 3, -1);
+
+ // Call f and check that there was no break points.
+ break_point_hit_count = 0;
+ f->Call(env->Global(), 0, NULL);
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Recompile and run script and check that break point was hit.
+ break_point_hit_count = 0;
+ v8::Script::Compile(script, v8::String::New("test.html"))->Run();
+ CHECK_EQ(1, break_point_hit_count);
+
+ // Call f and check that there are still no break points.
+ break_point_hit_count = 0;
+ f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ CHECK_EQ(0, break_point_hit_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
// Test that it is possible to remove the last break point for a function
// inside the break handling of that break point.
TEST(RemoveBreakPointInBreak) {
@@ -6571,3 +6617,4 @@ TEST(DebugEventContext) {
CheckDebuggerUnloaded();
}
+#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index c4be35ee..7587da8b 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -27,8 +27,6 @@
#include <stdlib.h>
-#define USE_NEW_QUERY_CALLBACKS
-
#include "v8.h"
#include "heap.h"
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 3189e5e1..5903fe65 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -248,6 +248,72 @@ TEST(Type0) {
COMPARE(mvn(r5, Operand(r4), SetCC, cc),
"31f05004 mvnccs r5, r4");
+ // Instructions autotransformed by the assembler.
+ // mov -> mvn.
+ COMPARE(mov(r3, Operand(-1), LeaveCC, al),
+ "e3e03000 mvn r3, #0");
+ COMPARE(mov(r4, Operand(-2), SetCC, al),
+ "e3f04001 mvns r4, #1");
+ COMPARE(mov(r5, Operand(0x0ffffff0), SetCC, ne),
+ "13f052ff mvnnes r5, #-268435441");
+ COMPARE(mov(r6, Operand(-1), LeaveCC, ne),
+ "13e06000 mvnne r6, #0");
+
+ // mvn -> mov.
+ COMPARE(mvn(r3, Operand(-1), LeaveCC, al),
+ "e3a03000 mov r3, #0");
+ COMPARE(mvn(r4, Operand(-2), SetCC, al),
+ "e3b04001 movs r4, #1");
+ COMPARE(mvn(r5, Operand(0x0ffffff0), SetCC, ne),
+ "13b052ff movnes r5, #-268435441");
+ COMPARE(mvn(r6, Operand(-1), LeaveCC, ne),
+ "13a06000 movne r6, #0");
+
+ // mov -> movw.
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ COMPARE(mov(r5, Operand(0x01234), LeaveCC, ne),
+ "13015234 movwne r5, #4660");
+ // We only disassemble one instruction so the eor instruction is not here.
+ COMPARE(eor(r5, r4, Operand(0x1234), LeaveCC, ne),
+ "1301c234 movwne ip, #4660");
+ // Movw can't do setcc so we don't get that here. Mov immediate with setcc
+ // is pretty strange anyway.
+ COMPARE(mov(r5, Operand(0x01234), SetCC, ne),
+ "159fc000 ldrne ip, [pc, #+0]");
+ // We only disassemble one instruction so the eor instruction is not here.
+ // The eor does the setcc so we get a movw here.
+ COMPARE(eor(r5, r4, Operand(0x1234), SetCC, ne),
+ "1301c234 movwne ip, #4660");
+
+ COMPARE(movt(r5, 0x4321, ne),
+ "13445321 movtne r5, #17185");
+ COMPARE(movw(r5, 0xabcd, eq),
+ "030a5bcd movweq r5, #43981");
+ }
+
+ // Eor doesn't have an eor-negative variant, but we can do an mvn followed by
+ // an eor to get the same effect.
+ COMPARE(eor(r5, r4, Operand(0xffffff34), SetCC, ne),
+ "13e0c0cb mvnne ip, #203");
+
+ // and <-> bic.
+ COMPARE(and_(r3, r5, Operand(0xfc03ffff)),
+ "e3c537ff bic r3, r5, #66846720");
+ COMPARE(bic(r3, r5, Operand(0xfc03ffff)),
+ "e20537ff and r3, r5, #66846720");
+
+ // sub <-> add.
+ COMPARE(add(r3, r5, Operand(-1024)),
+ "e2453b01 sub r3, r5, #1024");
+ COMPARE(sub(r3, r5, Operand(-1024)),
+ "e2853b01 add r3, r5, #1024");
+
+ // cmp <-> cmn.
+ COMPARE(cmp(r3, Operand(-1024)),
+ "e3730b01 cmn r3, #1024");
+ COMPARE(cmn(r3, Operand(-1024)),
+ "e3530b01 cmp r3, #1024");
+
// Miscellaneous instructions encoded as type 0.
COMPARE(blx(ip),
"e12fff3c blx ip");
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index c8e01979..e51bfabd 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -276,9 +276,11 @@ TEST(DisasmIa320) {
__ jmp(&L1);
__ jmp(Operand(ebx, ecx, times_4, 10000));
+#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget());
__ jmp(Operand::StaticVariable(after_break_target));
+#endif // ENABLE_DEBUGGER_SUPPORT
__ jmp(ic, RelocInfo::CODE_TARGET);
__ nop();
@@ -375,7 +377,7 @@ TEST(DisasmIa320) {
__ divsd(xmm1, xmm0);
__ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
// 128 bit move instructions.
__ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
diff --git a/test/cctest/test-func-name-inference.cc b/test/cctest/test-func-name-inference.cc
index 67791fb7..563cc4bf 100644
--- a/test/cctest/test-func-name-inference.cc
+++ b/test/cctest/test-func-name-inference.cc
@@ -81,6 +81,7 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
int func_pos = Runtime::StringMatch(script_src, func_pos_str, 0);
CHECK_NE(0, func_pos);
+#ifdef ENABLE_DEBUGGER_SUPPORT
// Obtain SharedFunctionInfo for the function.
Object* shared_func_info_ptr =
Runtime::FindSharedFunctionInfoInScript(i_script, func_pos);
@@ -92,6 +93,7 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
SmartPointer<char> inferred_name =
shared_func_info->inferred_name()->ToCString();
CHECK_EQ(ref_inferred_name, *inferred_name);
+#endif // ENABLE_DEBUGGER_SUPPORT
}
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 2e568946..7f1e3d80 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -6,9 +6,11 @@
#include "v8.h"
#include "heap-profiler.h"
+#include "snapshot.h"
#include "string-stream.h"
#include "cctest.h"
#include "zone-inl.h"
+#include "../include/v8-profiler.h"
namespace i = v8::internal;
using i::ClustersCoarser;
@@ -390,4 +392,236 @@ TEST(RetainerProfile) {
CHECK_EQ("(global property);1", printer.GetRetainers("C"));
}
+
+namespace {
+
+class NamedEntriesDetector {
+ public:
+ NamedEntriesDetector()
+ : has_A1(false), has_B1(false), has_C1(false),
+ has_A2(false), has_B2(false), has_C2(false) {
+ }
+
+ void Apply(i::HeapEntry* entry) {
+ const char* node_name = entry->name();
+ if (strcmp("A1", node_name) == 0
+ && entry->GetRetainingPaths()->length() > 0) has_A1 = true;
+ if (strcmp("B1", node_name) == 0
+ && entry->GetRetainingPaths()->length() > 0) has_B1 = true;
+ if (strcmp("C1", node_name) == 0
+ && entry->GetRetainingPaths()->length() > 0) has_C1 = true;
+ if (strcmp("A2", node_name) == 0
+ && entry->GetRetainingPaths()->length() > 0) has_A2 = true;
+ if (strcmp("B2", node_name) == 0
+ && entry->GetRetainingPaths()->length() > 0) has_B2 = true;
+ if (strcmp("C2", node_name) == 0
+ && entry->GetRetainingPaths()->length() > 0) has_C2 = true;
+ }
+
+ bool has_A1;
+ bool has_B1;
+ bool has_C1;
+ bool has_A2;
+ bool has_B2;
+ bool has_C2;
+};
+
+} // namespace
+
+
+static const v8::HeapGraphNode* GetGlobalObject(
+ const v8::HeapSnapshot* snapshot) {
+ CHECK_EQ(1, snapshot->GetHead()->GetChildrenCount());
+ return snapshot->GetHead()->GetChild(0)->GetToNode();
+}
+
+
+static const v8::HeapGraphNode* GetProperty(const v8::HeapGraphNode* node,
+ v8::HeapGraphEdge::Type type,
+ const char* name) {
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = node->GetChild(i);
+ v8::String::AsciiValue prop_name(prop->GetName());
+ if (prop->GetType() == type && strcmp(name, *prop_name) == 0)
+ return prop->GetToNode();
+ }
+ return NULL;
+}
+
+
+static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = node->GetChild(i);
+ const v8::HeapGraphNode* node = prop->GetToNode();
+ if (node->GetType() == v8::HeapGraphNode::STRING) {
+ v8::String::AsciiValue node_name(node->GetName());
+ if (strcmp(contents, *node_name) == 0) return true;
+ }
+ }
+ return false;
+}
+
+
+TEST(HeapSnapshot) {
+ v8::HandleScope scope;
+
+ v8::Handle<v8::String> token1 = v8::String::New("token1");
+ v8::Handle<v8::Context> env1 = v8::Context::New();
+ env1->SetSecurityToken(token1);
+ env1->Enter();
+
+ CompileAndRunScript(
+ "function A1() {}\n"
+ "function B1(x) { this.x = x; }\n"
+ "function C1(x) { this.x1 = x; this.x2 = x; }\n"
+ "var a1 = new A1();\n"
+ "var b1_1 = new B1(a1), b1_2 = new B1(a1);\n"
+ "var c1 = new C1(a1);");
+
+ v8::Handle<v8::String> token2 = v8::String::New("token2");
+ v8::Handle<v8::Context> env2 = v8::Context::New();
+ env2->SetSecurityToken(token2);
+ env2->Enter();
+
+ CompileAndRunScript(
+ "function A2() {}\n"
+ "function B2(x) { return function() { return typeof x; }; }\n"
+ "function C2(x) { this.x1 = x; this.x2 = x; this[1] = x; }\n"
+ "var a2 = new A2();\n"
+ "var b2_1 = new B2(a2), b2_2 = new B2(a2);\n"
+ "var c2 = new C2(a2);");
+ const v8::HeapSnapshot* snapshot_env2 =
+ v8::HeapProfiler::TakeSnapshot(v8::String::New("env2"));
+ const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2);
+
+ // Verify, that JS global object of env2 doesn't have '..1'
+ // properties, but has '..2' properties.
+ CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "a1"));
+ CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b1_1"));
+ CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b1_2"));
+ CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "c1"));
+ const v8::HeapGraphNode* a2_node =
+ GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "a2");
+ CHECK_NE(NULL, a2_node);
+ CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b2_1"));
+ CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b2_2"));
+ CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "c2"));
+
+ // Verify that anything related to '[ABC]1' is not reachable.
+ NamedEntriesDetector det;
+ i::HeapSnapshot* i_snapshot_env2 =
+ const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(snapshot_env2));
+ i_snapshot_env2->IterateEntries(&det);
+ CHECK(!det.has_A1);
+ CHECK(!det.has_B1);
+ CHECK(!det.has_C1);
+ CHECK(det.has_A2);
+ CHECK(det.has_B2);
+ CHECK(det.has_C2);
+
+ // Verify 'a2' object retainers. They are:
+ // - (global object).a2
+ // - c2.x1, c2.x2, c2[1]
+ // - b2_1 and b2_2 closures: via 'x' variable
+ CHECK_EQ(6, a2_node->GetRetainingPathsCount());
+ bool has_global_obj_a2_ref = false;
+ bool has_c2_x1_ref = false, has_c2_x2_ref = false, has_c2_1_ref = false;
+ bool has_b2_1_x_ref = false, has_b2_2_x_ref = false;
+ for (int i = 0; i < a2_node->GetRetainingPathsCount(); ++i) {
+ const v8::HeapGraphPath* path = a2_node->GetRetainingPath(i);
+ const int edges_count = path->GetEdgesCount();
+ CHECK_GT(edges_count, 0);
+ const v8::HeapGraphEdge* last_edge = path->GetEdge(edges_count - 1);
+ v8::String::AsciiValue last_edge_name(last_edge->GetName());
+ if (strcmp("a2", *last_edge_name) == 0
+ && last_edge->GetType() == v8::HeapGraphEdge::PROPERTY) {
+ has_global_obj_a2_ref = true;
+ continue;
+ }
+ CHECK_GT(edges_count, 1);
+ const v8::HeapGraphEdge* prev_edge = path->GetEdge(edges_count - 2);
+ v8::String::AsciiValue prev_edge_name(prev_edge->GetName());
+ if (strcmp("x1", *last_edge_name) == 0
+ && last_edge->GetType() == v8::HeapGraphEdge::PROPERTY
+ && strcmp("c2", *prev_edge_name) == 0) has_c2_x1_ref = true;
+ if (strcmp("x2", *last_edge_name) == 0
+ && last_edge->GetType() == v8::HeapGraphEdge::PROPERTY
+ && strcmp("c2", *prev_edge_name) == 0) has_c2_x2_ref = true;
+ if (strcmp("1", *last_edge_name) == 0
+ && last_edge->GetType() == v8::HeapGraphEdge::ELEMENT
+ && strcmp("c2", *prev_edge_name) == 0) has_c2_1_ref = true;
+ if (strcmp("x", *last_edge_name) == 0
+ && last_edge->GetType() == v8::HeapGraphEdge::CONTEXT_VARIABLE
+ && strcmp("b2_1", *prev_edge_name) == 0) has_b2_1_x_ref = true;
+ if (strcmp("x", *last_edge_name) == 0
+ && last_edge->GetType() == v8::HeapGraphEdge::CONTEXT_VARIABLE
+ && strcmp("b2_2", *prev_edge_name) == 0) has_b2_2_x_ref = true;
+ }
+ CHECK(has_global_obj_a2_ref);
+ CHECK(has_c2_x1_ref);
+ CHECK(has_c2_x2_ref);
+ CHECK(has_c2_1_ref);
+ CHECK(has_b2_1_x_ref);
+ CHECK(has_b2_2_x_ref);
+}
+
+
+TEST(HeapSnapshotCodeObjects) {
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ CompileAndRunScript(
+ "function lazy(x) { return x - 1; }\n"
+ "function compiled(x) { return x + 1; }\n"
+ "compiled(1)");
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8::String::New("code"));
+
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* compiled =
+ GetProperty(global, v8::HeapGraphEdge::PROPERTY, "compiled");
+ CHECK_NE(NULL, compiled);
+ CHECK_EQ(v8::HeapGraphNode::CLOSURE, compiled->GetType());
+ const v8::HeapGraphNode* lazy =
+ GetProperty(global, v8::HeapGraphEdge::PROPERTY, "lazy");
+ CHECK_NE(NULL, lazy);
+ CHECK_EQ(v8::HeapGraphNode::CLOSURE, lazy->GetType());
+
+ // Find references to code.
+ const v8::HeapGraphNode* compiled_code =
+ GetProperty(compiled, v8::HeapGraphEdge::INTERNAL, "code");
+ CHECK_NE(NULL, compiled_code);
+ const v8::HeapGraphNode* lazy_code =
+ GetProperty(lazy, v8::HeapGraphEdge::INTERNAL, "code");
+ CHECK_NE(NULL, lazy_code);
+
+ // Verify that non-compiled code doesn't contain references to "x"
+ // literal, while compiled code does.
+ bool compiled_references_x = false, lazy_references_x = false;
+ for (int i = 0, count = compiled_code->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = compiled_code->GetChild(i);
+ const v8::HeapGraphNode* node = prop->GetToNode();
+ if (node->GetType() == v8::HeapGraphNode::CODE) {
+ if (HasString(node, "x")) {
+ compiled_references_x = true;
+ break;
+ }
+ }
+ }
+ for (int i = 0, count = lazy_code->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = lazy_code->GetChild(i);
+ const v8::HeapGraphNode* node = prop->GetToNode();
+ if (node->GetType() == v8::HeapGraphNode::CODE) {
+ if (HasString(node, "x")) {
+ lazy_references_x = true;
+ break;
+ }
+ }
+ }
+ CHECK(compiled_references_x);
+ CHECK(!lazy_references_x);
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-liveedit.cc b/test/cctest/test-liveedit.cc
index ec1a7a6c..244980a1 100644
--- a/test/cctest/test-liveedit.cc
+++ b/test/cctest/test-liveedit.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
#include <stdlib.h>
#include "v8.h"
@@ -172,3 +174,5 @@ TEST(LiveEditDiffer) {
CompareStrings("abbabababababaaabbabababababbabbbbbbbababa",
"bbbbabababbbabababbbabababababbabbababa");
}
+
+#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index c4c8a457..3ec25c9f 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -98,9 +98,11 @@ static int make_code(TypeCode type, int id) {
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
static int register_code(int reg) {
return Debug::k_register_address << kDebugIdShift | reg;
}
+#endif // ENABLE_DEBUGGER_SUPPORT
TEST(ExternalReferenceEncoder) {
@@ -113,8 +115,10 @@ TEST(ExternalReferenceEncoder) {
Encode(encoder, Runtime::kAbort));
CHECK_EQ(make_code(IC_UTILITY, IC::kLoadCallbackProperty),
Encode(encoder, IC_Utility(IC::kLoadCallbackProperty)));
+#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(make_code(DEBUG_ADDRESS, register_code(3)),
Encode(encoder, Debug_Address(Debug::k_register_address, 3)));
+#endif // ENABLE_DEBUGGER_SUPPORT
ExternalReference keyed_load_function_prototype =
ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
@@ -131,9 +135,11 @@ TEST(ExternalReferenceEncoder) {
ExternalReference::address_of_real_stack_limit();
CHECK_EQ(make_code(UNCLASSIFIED, 5),
encoder.Encode(real_stack_limit_address.address()));
- CHECK_EQ(make_code(UNCLASSIFIED, 12),
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ CHECK_EQ(make_code(UNCLASSIFIED, 15),
encoder.Encode(ExternalReference::debug_break().address()));
- CHECK_EQ(make_code(UNCLASSIFIED, 7),
+#endif // ENABLE_DEBUGGER_SUPPORT
+ CHECK_EQ(make_code(UNCLASSIFIED, 10),
encoder.Encode(ExternalReference::new_space_start().address()));
CHECK_EQ(make_code(UNCLASSIFIED, 3),
encoder.Encode(ExternalReference::roots_address().address()));
@@ -150,8 +156,10 @@ TEST(ExternalReferenceDecoder) {
decoder.Decode(make_code(RUNTIME_FUNCTION, Runtime::kAbort)));
CHECK_EQ(AddressOf(IC_Utility(IC::kLoadCallbackProperty)),
decoder.Decode(make_code(IC_UTILITY, IC::kLoadCallbackProperty)));
+#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(AddressOf(Debug_Address(Debug::k_register_address, 3)),
decoder.Decode(make_code(DEBUG_ADDRESS, register_code(3))));
+#endif // ENABLE_DEBUGGER_SUPPORT
ExternalReference keyed_load_function =
ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(keyed_load_function.address(),
@@ -164,10 +172,12 @@ TEST(ExternalReferenceDecoder) {
decoder.Decode(make_code(UNCLASSIFIED, 4)));
CHECK_EQ(ExternalReference::address_of_real_stack_limit().address(),
decoder.Decode(make_code(UNCLASSIFIED, 5)));
+#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(ExternalReference::debug_break().address(),
- decoder.Decode(make_code(UNCLASSIFIED, 12)));
+ decoder.Decode(make_code(UNCLASSIFIED, 15)));
+#endif // ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(ExternalReference::new_space_start().address(),
- decoder.Decode(make_code(UNCLASSIFIED, 7)));
+ decoder.Decode(make_code(UNCLASSIFIED, 10)));
}
diff --git a/test/mjsunit/apply.js b/test/mjsunit/apply.js
index a4b0fd7f..cab7eb82 100644
--- a/test/mjsunit/apply.js
+++ b/test/mjsunit/apply.js
@@ -112,12 +112,25 @@ function al() {
return arguments.length + arguments[arguments.length - 1];
}
+var stack_corner_case_failure = false;
+
for (var j = 1; j < 0x40000000; j <<= 1) {
try {
var a = new Array(j);
a[j - 1] = 42;
assertEquals(42 + j, al.apply(345, a));
} catch (e) {
+ if (e.toString().indexOf("Maximum call stack size exceeded") != -1) {
+ // For some combinations of build settings, it may be the case that the
+ // stack here is just tall enough to contain the array whose size is
+ // specified by j but is not tall enough to contain the activation
+ // record for the apply call. Allow one such corner case through,
+ // checking that the length check will do the right thing for an array
+ // the next size up.
+ assertEquals(false, stack_corner_case_failure);
+ stack_corner_case_failure = true;
+ continue;
+ }
assertTrue(e.toString().indexOf("Function.prototype.apply") != -1,
"exception does not contain Function.prototype.apply: " +
e.toString());
@@ -127,7 +140,7 @@ for (var j = 1; j < 0x40000000; j <<= 1) {
a = new Array(j);
a[j - 1] = 42;
al.apply(345, a);
- assertUnreachable("Apply of arrray with length " + a.length +
+ assertUnreachable("Apply of array with length " + a.length +
" should have thrown");
} catch (e) {
assertTrue(e.toString().indexOf("Function.prototype.apply") != -1,
diff --git a/test/mjsunit/debug-setbreakpoint.js b/test/mjsunit/debug-setbreakpoint.js
index 3981dc45..9661c95a 100644
--- a/test/mjsunit/debug-setbreakpoint.js
+++ b/test/mjsunit/debug-setbreakpoint.js
@@ -116,7 +116,7 @@ function listener(event, exec_state, event_data, data) {
mirror = debug.MakeMirror(o.a);
testArguments(dcp, '{"type":"handle","target":' + mirror.handle() + '}', true, false);
- testArguments(dcp, '{"type":"script","target":"sourceUrlScript","line":1}', true, true);
+ testArguments(dcp, '{"type":"script","target":"sourceUrlScript","line":0}', true, true);
// Indicate that all was processed.
listenerComplete = true;
@@ -134,6 +134,7 @@ function f() {
};
function g() {
+ // Comment.
f();
};
@@ -184,3 +185,8 @@ Debug.setListener(breakListener);
sourceUrlFunc();
assertTrue(breakListenerCalled, "Break listener not called on breakpoint set by sourceURL");
+
+// Set a break point on a line with the comment, and check that actual position
+// is the next line after the comment.
+var number = Debug.setScriptBreakPointById(g_script_id, g_line + 1);
+assertEquals(g_line + 2, Debug.findBreakPoint(number).actual_location.line);
diff --git a/test/mjsunit/keyed-call-generic.js b/test/mjsunit/keyed-call-generic.js
index 0b49b3e8..03146984 100644
--- a/test/mjsunit/keyed-call-generic.js
+++ b/test/mjsunit/keyed-call-generic.js
@@ -94,3 +94,20 @@ testMany(fixed_array, first3num, first3num);
testMany(dict_array, first3num, first3num);
testMany(fast_prop, first3str, first3num);
testMany(normal_prop, first3str, first3num);
+
+
+function testException(receiver, keys, exceptions) {
+ for (var i = 0; i != 10; i++) {
+ for (var k = 0; k != keys.length; k++) {
+ var thrown = false;
+ try {
+ var result = receiver[keys[k]]();
+ } catch (e) {
+ thrown = true;
+ }
+ assertEquals(exceptions[k], thrown);
+ }
+ }
+}
+
+testException([zero, one, /* hole */ ], [0, 1, 2], [false, false, true]);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 514d345a..ceb5e620 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -34,9 +34,6 @@ bugs: FAIL
# too long to run in debug mode on ARM.
fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm)
-# Issue 494: new snapshot code breaks mjsunit/apply on mac debug snapshot.
-apply: PASS, FAIL if ($system == macos && $mode == debug)
-
big-object-literal: PASS, SKIP if ($arch == arm)
# Issue 488: this test sometimes times out.
diff --git a/test/mjsunit/object-define-property.js b/test/mjsunit/object-define-property.js
index 46bfb34c..b258aa75 100644
--- a/test/mjsunit/object-define-property.js
+++ b/test/mjsunit/object-define-property.js
@@ -714,3 +714,156 @@ try {
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
+
+
+var obj6 = {};
+obj6[1] = 'foo';
+obj6[2] = 'bar';
+obj6[3] = '42';
+obj6[4] = '43';
+obj6[5] = '44';
+
+var descElement = { value: 'foobar' };
+var descElementNonConfigurable = { value: 'barfoo', configurable: false };
+var descElementNonWritable = { value: 'foofoo', writable: false };
+var descElementNonEnumerable = { value: 'barbar', enumerable: false };
+var descElementAllFalse = { value: 'foofalse',
+ configurable: false,
+ writable: false,
+ enumerable: false };
+
+
+// Redefine existing property.
+Object.defineProperty(obj6, '1', descElement);
+desc = Object.getOwnPropertyDescriptor(obj6, '1');
+assertEquals(desc.value, 'foobar');
+assertTrue(desc.writable);
+assertTrue(desc.enumerable);
+assertTrue(desc.configurable);
+
+// Redefine existing property with configurable: false.
+Object.defineProperty(obj6, '2', descElementNonConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj6, '2');
+assertEquals(desc.value, 'barfoo');
+assertTrue(desc.writable);
+assertTrue(desc.enumerable);
+assertFalse(desc.configurable);
+
+// Ensure that we can't overwrite the non configurable element.
+try {
+ Object.defineProperty(obj6, '2', descElement);
+ assertUnreachable();
+} catch (e) {
+ assertTrue(/Cannot redefine property/.test(e));
+}
+
+Object.defineProperty(obj6, '3', descElementNonWritable);
+desc = Object.getOwnPropertyDescriptor(obj6, '3');
+assertEquals(desc.value, 'foofoo');
+assertFalse(desc.writable);
+assertTrue(desc.enumerable);
+assertTrue(desc.configurable);
+
+// Redefine existing property with configurable: false.
+Object.defineProperty(obj6, '4', descElementNonEnumerable);
+desc = Object.getOwnPropertyDescriptor(obj6, '4');
+assertEquals(desc.value, 'barbar');
+assertTrue(desc.writable);
+assertFalse(desc.enumerable);
+assertTrue(desc.configurable);
+
+// Redefine existing property with configurable: false.
+Object.defineProperty(obj6, '5', descElementAllFalse);
+desc = Object.getOwnPropertyDescriptor(obj6, '5');
+assertEquals(desc.value, 'foofalse');
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertFalse(desc.configurable);
+
+// Define non existing property - all attributes should default to false.
+Object.defineProperty(obj6, '15', descElement);
+desc = Object.getOwnPropertyDescriptor(obj6, '15');
+assertEquals(desc.value, 'foobar');
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertFalse(desc.configurable);
+
+// Make sure that we can't redefine using direct access.
+obj6[15] ='overwrite';
+assertEquals(obj6[15],'foobar');
+
+
+// Repeat the above tests on an array.
+var arr = new Array();
+arr[1] = 'foo';
+arr[2] = 'bar';
+arr[3] = '42';
+arr[4] = '43';
+arr[5] = '44';
+
+var descElement = { value: 'foobar' };
+var descElementNonConfigurable = { value: 'barfoo', configurable: false };
+var descElementNonWritable = { value: 'foofoo', writable: false };
+var descElementNonEnumerable = { value: 'barbar', enumerable: false };
+var descElementAllFalse = { value: 'foofalse',
+ configurable: false,
+ writable: false,
+ enumerable: false };
+
+
+// Redefine existing property.
+Object.defineProperty(arr, '1', descElement);
+desc = Object.getOwnPropertyDescriptor(arr, '1');
+assertEquals(desc.value, 'foobar');
+assertTrue(desc.writable);
+assertTrue(desc.enumerable);
+assertTrue(desc.configurable);
+
+// Redefine existing property with configurable: false.
+Object.defineProperty(arr, '2', descElementNonConfigurable);
+desc = Object.getOwnPropertyDescriptor(arr, '2');
+assertEquals(desc.value, 'barfoo');
+assertTrue(desc.writable);
+assertTrue(desc.enumerable);
+assertFalse(desc.configurable);
+
+// Ensure that we can't overwrite the non configurable element.
+try {
+ Object.defineProperty(arr, '2', descElement);
+ assertUnreachable();
+} catch (e) {
+ assertTrue(/Cannot redefine property/.test(e));
+}
+
+Object.defineProperty(arr, '3', descElementNonWritable);
+desc = Object.getOwnPropertyDescriptor(arr, '3');
+assertEquals(desc.value, 'foofoo');
+assertFalse(desc.writable);
+assertTrue(desc.enumerable);
+assertTrue(desc.configurable);
+
+// Redefine existing property with configurable: false.
+Object.defineProperty(arr, '4', descElementNonEnumerable);
+desc = Object.getOwnPropertyDescriptor(arr, '4');
+assertEquals(desc.value, 'barbar');
+assertTrue(desc.writable);
+assertFalse(desc.enumerable);
+assertTrue(desc.configurable);
+
+// Redefine existing property with configurable: false.
+Object.defineProperty(arr, '5', descElementAllFalse);
+desc = Object.getOwnPropertyDescriptor(arr, '5');
+assertEquals(desc.value, 'foofalse');
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertFalse(desc.configurable);
+
+// Define non existing property - all attributes should default to false.
+Object.defineProperty(arr, '15', descElement);
+desc = Object.getOwnPropertyDescriptor(arr, '15');
+assertEquals(desc.value, 'foobar');
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertFalse(desc.configurable);
+
+
diff --git a/test/mjsunit/bugs/bug-619.js b/test/mjsunit/regress/regress-619.js
index ef8ba80e..24bdbc18 100644
--- a/test/mjsunit/bugs/bug-619.js
+++ b/test/mjsunit/regress/regress-619.js
@@ -25,9 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// When this bug is corrected move to object-define-property and add
-// additional tests for configurable in the same manner as existing tests
-// there.
+// Tests that Object.defineProperty works correctly on array indices.
+// Please see http://code.google.com/p/v8/issues/detail?id=619 for details.
var obj = {};
obj[1] = 42;
diff --git a/test/mjsunit/regress/regress-747.js b/test/mjsunit/regress/regress-747.js
new file mode 100644
index 00000000..6fcc0000
--- /dev/null
+++ b/test/mjsunit/regress/regress-747.js
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose_gc
+
+// This test makes sure that we do flush code with heap allocated locals.
+// This can be a problem if eval is used within the scope.
+// See: http://code.google.com/p/v8/issues/detail?id=747
+
+(function() {
+ var x = 42;
+ this.callEval = function() {eval('x');};
+})();
+
+try {
+ callEval();
+} catch (e) {
+ assertUnreachable();
+}
+
+gc();
+gc();
+gc();
+gc();
+gc();
+gc();
+
+try {
+ callEval();
+} catch (e) {
+ assertUnreachable();
+}
diff --git a/test/mjsunit/samevalue.js b/test/mjsunit/samevalue.js
index 2de677e6..6cb35e6e 100644
--- a/test/mjsunit/samevalue.js
+++ b/test/mjsunit/samevalue.js
@@ -1,102 +1,102 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Flags: --expose-natives_as natives
-// Test the SameValue internal method.
-
-var obj1 = {x: 10, y: 11, z: "test"};
-var obj2 = {x: 10, y: 11, z: "test"};
-
-assertTrue(natives.SameValue(0, 0));
-assertTrue(natives.SameValue(+0, +0));
-assertTrue(natives.SameValue(-0, -0));
-assertTrue(natives.SameValue(1, 1));
-assertTrue(natives.SameValue(2, 2));
-assertTrue(natives.SameValue(-1, -1));
-assertTrue(natives.SameValue(0.5, 0.5));
-assertTrue(natives.SameValue(true, true));
-assertTrue(natives.SameValue(false, false));
-assertTrue(natives.SameValue(NaN, NaN));
-assertTrue(natives.SameValue(null, null));
-assertTrue(natives.SameValue("foo", "foo"));
-assertTrue(natives.SameValue(obj1, obj1));
-// Undefined values.
-assertTrue(natives.SameValue());
-assertTrue(natives.SameValue(undefined, undefined));
-
-assertFalse(natives.SameValue(0,1));
-assertFalse(natives.SameValue("foo", "bar"));
-assertFalse(natives.SameValue(obj1, obj2));
-assertFalse(natives.SameValue(true, false));
-
-assertFalse(natives.SameValue(obj1, true));
-assertFalse(natives.SameValue(obj1, "foo"));
-assertFalse(natives.SameValue(obj1, 1));
-assertFalse(natives.SameValue(obj1, undefined));
-assertFalse(natives.SameValue(obj1, NaN));
-
-assertFalse(natives.SameValue(undefined, true));
-assertFalse(natives.SameValue(undefined, "foo"));
-assertFalse(natives.SameValue(undefined, 1));
-assertFalse(natives.SameValue(undefined, obj1));
-assertFalse(natives.SameValue(undefined, NaN));
-
-assertFalse(natives.SameValue(NaN, true));
-assertFalse(natives.SameValue(NaN, "foo"));
-assertFalse(natives.SameValue(NaN, 1));
-assertFalse(natives.SameValue(NaN, obj1));
-assertFalse(natives.SameValue(NaN, undefined));
-
-assertFalse(natives.SameValue("foo", true));
-assertFalse(natives.SameValue("foo", 1));
-assertFalse(natives.SameValue("foo", obj1));
-assertFalse(natives.SameValue("foo", undefined));
-assertFalse(natives.SameValue("foo", NaN));
-
-assertFalse(natives.SameValue(true, 1));
-assertFalse(natives.SameValue(true, obj1));
-assertFalse(natives.SameValue(true, undefined));
-assertFalse(natives.SameValue(true, NaN));
-assertFalse(natives.SameValue(true, "foo"));
-
-assertFalse(natives.SameValue(1, true));
-assertFalse(natives.SameValue(1, obj1));
-assertFalse(natives.SameValue(1, undefined));
-assertFalse(natives.SameValue(1, NaN));
-assertFalse(natives.SameValue(1, "foo"));
-
-// Special string cases.
-assertFalse(natives.SameValue("1", 1));
-assertFalse(natives.SameValue("true", true));
-assertFalse(natives.SameValue("false", false));
-assertFalse(natives.SameValue("undefined", undefined));
-assertFalse(natives.SameValue("NaN", NaN));
-
-// -0 and +0 are should be different
-assertFalse(natives.SameValue(+0, -0));
-assertFalse(natives.SameValue(-0, +0));
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Flags: --expose-natives_as natives
+// Test the SameValue internal method.
+
+var obj1 = {x: 10, y: 11, z: "test"};
+var obj2 = {x: 10, y: 11, z: "test"};
+
+assertTrue(natives.SameValue(0, 0));
+assertTrue(natives.SameValue(+0, +0));
+assertTrue(natives.SameValue(-0, -0));
+assertTrue(natives.SameValue(1, 1));
+assertTrue(natives.SameValue(2, 2));
+assertTrue(natives.SameValue(-1, -1));
+assertTrue(natives.SameValue(0.5, 0.5));
+assertTrue(natives.SameValue(true, true));
+assertTrue(natives.SameValue(false, false));
+assertTrue(natives.SameValue(NaN, NaN));
+assertTrue(natives.SameValue(null, null));
+assertTrue(natives.SameValue("foo", "foo"));
+assertTrue(natives.SameValue(obj1, obj1));
+// Undefined values.
+assertTrue(natives.SameValue());
+assertTrue(natives.SameValue(undefined, undefined));
+
+assertFalse(natives.SameValue(0,1));
+assertFalse(natives.SameValue("foo", "bar"));
+assertFalse(natives.SameValue(obj1, obj2));
+assertFalse(natives.SameValue(true, false));
+
+assertFalse(natives.SameValue(obj1, true));
+assertFalse(natives.SameValue(obj1, "foo"));
+assertFalse(natives.SameValue(obj1, 1));
+assertFalse(natives.SameValue(obj1, undefined));
+assertFalse(natives.SameValue(obj1, NaN));
+
+assertFalse(natives.SameValue(undefined, true));
+assertFalse(natives.SameValue(undefined, "foo"));
+assertFalse(natives.SameValue(undefined, 1));
+assertFalse(natives.SameValue(undefined, obj1));
+assertFalse(natives.SameValue(undefined, NaN));
+
+assertFalse(natives.SameValue(NaN, true));
+assertFalse(natives.SameValue(NaN, "foo"));
+assertFalse(natives.SameValue(NaN, 1));
+assertFalse(natives.SameValue(NaN, obj1));
+assertFalse(natives.SameValue(NaN, undefined));
+
+assertFalse(natives.SameValue("foo", true));
+assertFalse(natives.SameValue("foo", 1));
+assertFalse(natives.SameValue("foo", obj1));
+assertFalse(natives.SameValue("foo", undefined));
+assertFalse(natives.SameValue("foo", NaN));
+
+assertFalse(natives.SameValue(true, 1));
+assertFalse(natives.SameValue(true, obj1));
+assertFalse(natives.SameValue(true, undefined));
+assertFalse(natives.SameValue(true, NaN));
+assertFalse(natives.SameValue(true, "foo"));
+
+assertFalse(natives.SameValue(1, true));
+assertFalse(natives.SameValue(1, obj1));
+assertFalse(natives.SameValue(1, undefined));
+assertFalse(natives.SameValue(1, NaN));
+assertFalse(natives.SameValue(1, "foo"));
+
+// Special string cases.
+assertFalse(natives.SameValue("1", 1));
+assertFalse(natives.SameValue("true", true));
+assertFalse(natives.SameValue("false", false));
+assertFalse(natives.SameValue("undefined", undefined));
+assertFalse(natives.SameValue("NaN", NaN));
+
+// -0 and +0 are should be different
+assertFalse(natives.SameValue(+0, -0));
+assertFalse(natives.SameValue(-0, +0));
diff --git a/test/mjsunit/string-externalize.js b/test/mjsunit/string-externalize.js
new file mode 100644
index 00000000..5b1f9170
--- /dev/null
+++ b/test/mjsunit/string-externalize.js
@@ -0,0 +1,95 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-externalize-string
+
+var size = 1024;
+
+function test() {
+ var str = "";
+
+ // Build an ascii cons string.
+ for (var i = 0; i < size; i++) {
+ str += String.fromCharCode(i & 0x7f);
+ }
+ assertTrue(isAsciiString(str));
+
+ var twoByteExternalWithAsciiData =
+ "AA" + (function() { return "A"; })();
+ externalizeString(twoByteExternalWithAsciiData, true /* force two-byte */);
+ assertFalse(isAsciiString(twoByteExternalWithAsciiData));
+
+ var realTwoByteExternalString =
+ "\u1234\u1234" + (function() { return "\u1234"; })();
+ externalizeString(realTwoByteExternalString);
+ assertFalse(isAsciiString(realTwoByteExternalString));
+
+ assertTrue(isAsciiString(["a", twoByteExternalWithAsciiData].join("")));
+
+ // Appending a two-byte string that contains only ascii chars should
+ // still produce an ascii cons.
+ var str1 = str + twoByteExternalWithAsciiData;
+ assertTrue(isAsciiString(str1));
+
+ // Force flattening of the string.
+ var old_length = str1.length - twoByteExternalWithAsciiData.length;
+ for (var i = 0; i < old_length; i++) {
+ assertEquals(String.fromCharCode(i & 0x7f), str1[i]);
+ }
+ for (var i = old_length; i < str1.length; i++) {
+ assertEquals("A", str1[i]);
+ }
+
+ // Flattened string should still be ascii.
+ assertTrue(isAsciiString(str1));
+
+ // Lower-casing an ascii string should produce ascii.
+ assertTrue(isAsciiString(str1.toLowerCase()));
+
+ assertFalse(isAsciiString(["a", realTwoByteExternalString].join("")));
+
+ // Appending a real two-byte string should produce a two-byte cons.
+ var str2 = str + realTwoByteExternalString;
+ assertFalse(isAsciiString(str2));
+
+ // Force flattening of the string.
+ old_length = str2.length - realTwoByteExternalString.length;
+ for (var i = 0; i < old_length; i++) {
+ assertEquals(String.fromCharCode(i & 0x7f), str2[i]);
+ }
+ for (var i = old_length; i < str.length; i++) {
+ assertEquals("\u1234", str2[i]);
+ }
+
+ // Flattened string should still be two-byte.
+ assertFalse(isAsciiString(str2));
+}
+
+// Run the test many times to ensure IC-s don't break things.
+for (var i = 0; i < 10; i++) {
+ test();
+}