summaryrefslogtreecommitdiffstats
path: root/src/arm/code-stubs-arm.cc
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2012-03-05 11:04:45 +0000
committerBen Murdoch <benm@google.com>2012-04-11 15:39:56 +0100
commit592a9fc1d8ea420377a2e7efd0600e20b058be2b (patch)
tree23fe22995f4f9056a96266d169d49426a5e745d7 /src/arm/code-stubs-arm.cc
parente25ed7434cc3a061dd965ad25b923bca153aed94 (diff)
downloadandroid_external_v8-592a9fc1d8ea420377a2e7efd0600e20b058be2b.tar.gz
android_external_v8-592a9fc1d8ea420377a2e7efd0600e20b058be2b.tar.bz2
android_external_v8-592a9fc1d8ea420377a2e7efd0600e20b058be2b.zip
Merge V8 at 3.7.12.28
Bug: 5688872 Change-Id: Iddb40cae44d51a2b449f2858951e0472771f5981
Diffstat (limited to 'src/arm/code-stubs-arm.cc')
-rw-r--r--src/arm/code-stubs-arm.cc1086
1 files changed, 847 insertions, 239 deletions
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 36450c94..f2c0f992 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -98,9 +98,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
&gc,
TAG_OBJECT);
- int map_index = strict_mode_ == kStrictMode
- ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
- : Context::FUNCTION_MAP_INDEX;
+ int map_index = (language_mode_ == CLASSIC_MODE)
+ ? Context::FUNCTION_MAP_INDEX
+ : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
// Compute the function map in the current global context and set that
// as the map of the allocated object.
@@ -189,6 +189,121 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: function.
+ // [sp + kPointerSize]: serialized scope info
+
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ r0, r1, r2, &gc, TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ ldr(r3, MemOperand(sp, 0));
+
+ // Load the serialized scope info from the stack.
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ mov(r2, Operand(Smi::FromInt(length)));
+ __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+
+ // If this block context is nested in the global context we get a smi
+ // sentinel instead of a function. The block context should get the
+ // canonical empty function of the global context as its closure which
+ // we still have to look up.
+ Label after_sentinel;
+ __ JumpIfNotSmi(r3, &after_sentinel);
+ if (FLAG_debug_code) {
+ const char* message = "Expected 0 as a Smi sentinel";
+ __ cmp(r3, Operand::Zero());
+ __ Assert(eq, message);
+ }
+ __ ldr(r3, GlobalObjectOperand());
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
+ __ bind(&after_sentinel);
+
+ // Setup the fixed slots.
+ __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
+ __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
+ __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
+
+ // Copy the global object from the previous context.
+ __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
+
+ // Initialize the rest of the slots to the hole value.
+ __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < slots_; i++) {
+ __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, r0);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+ MacroAssembler* masm,
+ int length,
+ FastCloneShallowArrayStub::Mode mode,
+ Label* fail) {
+ // Registers on entry:
+ //
+ // r3: boilerplate literal array.
+ ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = 0;
+ if (length > 0) {
+ elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ ? FixedDoubleArray::SizeFor(length)
+ : FixedArray::SizeFor(length);
+ }
+ int size = JSArray::kSize + elements_size;
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size,
+ r0,
+ r1,
+ r2,
+ fail,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length == 0)) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r0, i));
+ }
+ }
+
+ if (length > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ add(r2, r0, Operand(JSArray::kSize));
+ __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ ASSERT((elements_size % kPointerSize) == 0);
+ __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+ }
+}
+
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -196,10 +311,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// [sp + kPointerSize]: literal index.
// [sp + (2 * kPointerSize)]: literals array.
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
// Load boilerplate object into r3 and check if we need to create a
// boilerplate.
Label slow_case;
@@ -207,64 +318,111 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ ldr(r0, MemOperand(sp, 1 * kPointerSize));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, ip);
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
__ b(eq, &slow_case);
+ FastCloneShallowArrayStub::Mode mode = mode_;
+ if (mode == CLONE_ANY_ELEMENTS) {
+ Label double_elements, check_fast_elements;
+ __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &check_fast_elements);
+ GenerateFastCloneShallowArrayCommon(masm, 0,
+ COPY_ON_WRITE_ELEMENTS, &slow_case);
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&check_fast_elements);
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &double_elements);
+ GenerateFastCloneShallowArrayCommon(masm, length_,
+ CLONE_ELEMENTS, &slow_case);
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&double_elements);
+ mode = CLONE_DOUBLE_ELEMENTS;
+ // Fall through to generate the code to handle double elements.
+ }
+
if (FLAG_debug_code) {
const char* message;
Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
+ if (mode == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+ message = "Expected (writable) fixed double array";
+ expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
}
__ push(r3);
__ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
__ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, expected_map_index);
- __ cmp(r3, ip);
+ __ CompareRoot(r3, expected_map_index);
__ Assert(eq, message);
__ pop(r3);
}
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- r0,
- r1,
- r2,
- &slow_case,
- TAG_OBJECT);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ add(r2, r0, Operand(JSArray::kSize));
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
- // Copy the elements array.
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: object literal flags.
+ // [sp + kPointerSize]: constant properties.
+ // [sp + (2 * kPointerSize)]: literal index.
+ // [sp + (3 * kPointerSize)]: literals array.
+
+ // Load boilerplate object into r3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
+ __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
+ __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &slow_case);
+
+ // Check that the boilerplate contains only fast properties and we can
+ // statically determine the instance size.
+ int size = JSObject::kHeaderSize + length_ * kPointerSize;
+ __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
+ __ cmp(r0, Operand(size >> kPointerSizeLog2));
+ __ b(ne, &slow_case);
+
+ // Allocate the JS object and copy header together with all in-object
+ // properties from the boilerplate.
+ __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
+ for (int i = 0; i < size; i += kPointerSize) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r0, i));
}
// Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
+ __ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
__ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+ __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
}
@@ -838,9 +996,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
- // Call C routine that may not cause GC or other trouble.
- __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
- 0, 2);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
@@ -857,6 +1017,29 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+ // These variants are compiled ahead of time. See next method.
+ if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
+ return true;
+ }
+ if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
+ return true;
+ }
+ // Other register combinations are generated as and when they are needed,
+ // so it is unsafe to call them from stubs (we can't generate a stub while
+ // we are generating a stub).
+ return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+ WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
+ WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
+ stub1.GetCode()->set_is_pregenerated(true);
+ stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1197,6 +1380,8 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
+
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
0, 2);
__ pop(pc); // Return.
@@ -1214,7 +1399,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@@ -1606,6 +1791,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
@@ -1713,6 +1900,41 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
}
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ stm(db_w, sp, kCallerSaved | lr.bit());
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vstr(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+ const int argument_count = 1;
+ const int fp_argument_count = 0;
+ const Register scratch = r1;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+ __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ argument_count);
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vldr(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ }
+ __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
+}
+
+
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
@@ -1866,12 +2088,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r1, Operand(r0));
+ __ pop(r0);
+ }
__ bind(&heapnumber_allocated);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@@ -1912,13 +2135,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(r0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r2, r0); // Move the new heap number into r2.
- // Get the heap number into r0, now that the new heap number is in r2.
- __ pop(r0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r2, r0); // Move the new heap number into r2.
+ // Get the heap number into r0, now that the new heap number is in r2.
+ __ pop(r0);
+ }
// Convert the heap number in r0 to an untagged integer in r1.
// This can't go slow-case because it's the same number we already
@@ -2028,6 +2252,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -3086,6 +3314,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ cmp(r3, r5);
__ b(ne, &calculate);
// Cache hit. Load result, cleanup and return.
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_hit(), 1, scratch0, scratch1);
if (tagged) {
// Pop input value from stack and load result into r0.
__ pop();
@@ -3098,6 +3329,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
} // if (CpuFeatures::IsSupported(VFP3))
__ bind(&calculate);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
__ bind(&invalid_cache);
ExternalReference runtime_function =
@@ -3133,10 +3367,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ }
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret();
@@ -3149,14 +3384,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in d2 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ mov(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ }
__ Ret();
}
}
@@ -3173,6 +3409,7 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
} else {
__ vmov(r0, r1, d2);
}
+ AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@@ -3182,6 +3419,10 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
__ CallCFunction(ExternalReference::math_cos_double_function(isolate),
0, 1);
break;
+ case TranscendentalCache::TAN:
+ __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
+ 0, 1);
+ break;
case TranscendentalCache::LOG:
__ CallCFunction(ExternalReference::math_log_double_function(isolate),
0, 1);
@@ -3199,6 +3440,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
// Add more cases when necessary.
case TranscendentalCache::SIN: return Runtime::kMath_sin;
case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case TranscendentalCache::TAN: return Runtime::kMath_tan;
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
@@ -3268,11 +3510,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()),
- 1, 1);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()),
+ 1, 1);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@@ -3298,11 +3543,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@@ -3319,6 +3567,37 @@ bool CEntryStub::NeedsImmovableCode() {
}
+bool CEntryStub::IsPregenerated() {
+ return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+ result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+ CEntryStub::GenerateAheadOfTime();
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ Handle<Code> code = save_doubles.GetCode();
+ code->set_is_pregenerated(true);
+ StoreBufferOverflowStub stub(kSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ Handle<Code> code = stub.GetCode();
+ code->set_is_pregenerated(true);
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(r0);
}
@@ -3430,8 +3709,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r3, MemOperand(ip));
+ __ mov(r3, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
@@ -3469,6 +3747,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
@@ -3527,7 +3806,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r3: argc
// [sp+0]: argv
- Label invoke, exit;
+ Label invoke, handler_entry, exit;
// Called from C, so do not pop argc and args on exit (preserve sp)
// No need to save register-passed args
@@ -3590,31 +3869,33 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&cont);
__ push(ip);
- // Call a faked try-block that does the invoke.
- __ bl(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit);
- // Invoke: Link this frame into the handler chain.
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
__ bind(&invoke);
// Must preserve r0-r4, r5-r7 are available.
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r5, MemOperand(ip));
+ __ mov(r5, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r5, MemOperand(ip));
@@ -3708,7 +3989,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
const Register inline_site = r9;
const Register scratch = r2;
- const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
+ const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
Label slow, loop, is_instance, is_not_instance, not_js_object;
@@ -3738,7 +4019,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
// Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
@@ -3759,7 +4040,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(inline_site, lr, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
- __ str(map, MemOperand(scratch));
+ __ ldr(scratch, MemOperand(scratch));
+ __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
}
// Register mapping: r3 is object map and r4 is function prototype.
@@ -3851,10 +4133,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
- __ EnterInternalFrame();
- __ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r0, r1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
__ cmp(r0, Operand::Zero());
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@@ -4250,10 +4533,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
@@ -4375,25 +4654,39 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label seq_string;
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string.
- __ and_(r1, r0, Operand(kIsNotStringMask | kStringRepresentationMask), SetCC);
+ // First check for flat string. None of the following string type tests will
+ // succeed if subject is not a string or a short external string.
+ __ and_(r1,
+ r0,
+ Operand(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask),
+ SetCC);
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ b(eq, &seq_string);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
+ // r1: whether subject is a string and if yes, its string representation
// Check for flat cons string or sliced string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
// In the case of a sliced string its offset has to be taken into account.
- Label cons_string, check_encoding;
+ Label cons_string, external_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
__ b(lt, &cons_string);
- __ b(eq, &runtime);
+ __ b(eq, &external_string);
+
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ b(ne, &runtime);
// String is sliced.
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
@@ -4404,8 +4697,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// String is a cons string, check whether it is flat.
__ bind(&cons_string);
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
- __ cmp(r0, r1);
+ __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
// Is first part of cons or parent of slice a flat string?
@@ -4414,7 +4706,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
- __ b(ne, &runtime);
+ __ b(ne, &external_string);
+
__ bind(&seq_string);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
@@ -4480,8 +4773,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4532,8 +4824,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r1, MemOperand(r1, 0));
+ __ mov(r1, Operand(isolate->factory()->the_hole_value()));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(r2, 0));
@@ -4575,16 +4866,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
- __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
+ __ mov(r2, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ r2,
+ r7,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
- __ mov(r3, last_match_info_elements);
- __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ subject,
+ r7,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -4615,6 +4915,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
+ // External string. Short external strings have already been ruled out.
+ // r0: scratch
+ __ bind(&external_string);
+ __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ tst(r0, Operand(kIsIndirectStringMask));
+ __ Assert(eq, "external string expected, but not found");
+ }
+ __ ldr(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ sub(subject,
+ subject,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ jmp(&seq_string);
+
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -4712,7 +5032,24 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
+void CallFunctionStub::FinishCode(Handle<Code> code) {
+ code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+ UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // r1 : the function to call
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -4727,16 +5064,12 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ b(ne, &call);
// Patch the receiver on the stack with the global receiver object.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ str(r2, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
}
- // Get the function to call from the stack.
- // function, receiver [, arguments]
- __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
__ JumpIfSmi(r1, &non_function);
@@ -4774,7 +5107,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(r5, CALL_AS_FUNCTION);
+ __ SetCallKind(r5, CALL_AS_METHOD);
{
Handle<Code> adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -4855,100 +5188,41 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
__ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(scratch_));
+ __ cmp(ip, Operand(index_));
__ b(ls, index_out_of_range_);
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(eq, &flat_string);
+ __ mov(index_, Operand(index_, ASR, kSmiTagSize));
+
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_,
+ result_,
+ &call_runtime_);
- // Handle non-flat strings.
- __ and_(result_, result_, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ cmp(result_, Operand(kExternalStringTag));
- __ b(gt, &sliced_string);
- __ b(eq, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- Label assure_seq_string;
- __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(result_, Operand(ip));
- __ b(ne, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
- __ jmp(&assure_seq_string);
-
- // SlicedString, unpack and add offset.
- __ bind(&sliced_string);
- __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
- __ add(scratch_, scratch_, result_);
- __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
-
- // Assure that we are dealing with a sequential string. Go to runtime if not.
- __ bind(&assure_seq_string);
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // Check that parent is not an external string. Go to runtime otherwise.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(ne, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ tst(result_, Operand(kStringEncodingMask));
- __ b(ne, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register. We can
- // add without shifting since the smi tag size is the log2 of the
- // number of bytes in a two-byte character.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ add(scratch_, object_, Operand(scratch_));
- __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
- __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
-
- __ bind(&got_char_code);
__ mov(result_, Operand(result_, LSL, kSmiTagSize));
__ bind(&exit_);
}
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
__ CheckMap(index_,
- scratch_,
+ result_,
Heap::kHeapNumberMapRootIndex,
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ Push(object_, index_);
+ __ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4959,15 +5233,14 @@ void StringCharCodeAtGenerator::GenerateSlow(
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ Move(scratch_, r0);
- __ pop(index_);
+ __ Move(index_, r0);
__ pop(object_);
// Reload the instance type.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -4976,6 +5249,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
+ __ mov(index_, Operand(index_, LSL, kSmiTagSize));
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
__ Move(result_, r0);
@@ -5012,7 +5286,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -5037,7 +5312,8 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -5321,11 +5597,11 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ cmp(undefined, candidate);
__ b(eq, not_found);
- // Must be null (deleted entry).
+ // Must be the hole (deleted entry).
if (FLAG_debug_code) {
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(ip, candidate);
- __ Assert(eq, "oddball in symbol table is not undefined or null");
+ __ Assert(eq, "oddball in symbol table is not undefined or the hole");
}
__ jmp(&next_probe[i]);
@@ -5363,11 +5639,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
// hash = character + (character << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
+ __ add(hash, character, Operand(character, LSL, 10));
// hash ^= hash >> 6;
__ eor(hash, hash, Operand(hash, LSR, 6));
}
@@ -5392,12 +5664,13 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// hash ^= hash >> 11;
__ eor(hash, hash, Operand(hash, LSR, 11));
// hash += hash << 15;
- __ add(hash, hash, Operand(hash, LSL, 15));
+ __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
- __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
+ uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
+ __ and_(hash, hash, Operand(kHashShiftCutOffMask));
// if (hash == 0) hash = 27;
- __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
+ __ mov(hash, Operand(27), LeaveCC, eq);
}
@@ -5612,15 +5885,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r3: from index (untagged smi)
// r6 (a.k.a. to): to (smi)
// r7 (a.k.a. from): from offset (smi)
- Label allocate_slice, sliced_string, seq_string;
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r1, Operand(kStringRepresentationMask));
- __ b(eq, &seq_string);
+ Label allocate_slice, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ tst(r1, Operand(kIsIndirectStringMask));
- // External string. Jump to runtime.
- __ b(eq, &runtime);
+ __ b(eq, &seq_or_external_string);
__ tst(r1, Operand(kSlicedNotConsMask));
__ b(ne, &sliced_string);
@@ -5639,8 +5909,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
__ jmp(&allocate_slice);
- __ bind(&seq_string);
- // Sequential string. Just move string to the right register.
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the correct register.
__ mov(r5, r0);
__ bind(&allocate_slice);
@@ -6366,12 +6636,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- __ EnterInternalFrame();
- __ Push(r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
- __ push(ip);
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r0);
+ __ mov(ip, Operand(Smi::FromInt(op_)));
+ __ push(ip);
+ __ CallExternalReference(miss, 3);
+ }
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -6410,14 +6681,13 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<String> name,
+ Register scratch0) {
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -6475,14 +6745,12 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ mov(r1, Operand(Handle<String>(name)));
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
+ __ CallStub(&stub);
__ tst(r0, Operand(r0));
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
__ b(ne, miss);
- return result;
}
@@ -6497,6 +6765,11 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
Register name,
Register scratch1,
Register scratch2) {
+ ASSERT(!elements.is(scratch1));
+ ASSERT(!elements.is(scratch2));
+ ASSERT(!name.is(scratch1));
+ ASSERT(!name.is(scratch2));
+
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
@@ -6540,8 +6813,14 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
~(scratch1.bit() | scratch2.bit());
__ stm(db_w, sp, spill_mask);
- __ Move(r0, elements);
- __ Move(r1, name);
+ if (name.is(r0)) {
+ ASSERT(!elements.is(r1));
+ __ Move(r1, name);
+ __ Move(r0, elements);
+ } else {
+ __ Move(r0, elements);
+ __ Move(r1, name);
+ }
StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
__ tst(r0, Operand(r0));
@@ -6554,6 +6833,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// r1: key
@@ -6643,6 +6924,333 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
+struct AheadOfTimeWriteBarrierStubList {
+ Register object, value, address;
+ RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+ // Used in RegExpExecStub.
+ { r6, r4, r7, EMIT_REMEMBERED_SET },
+ { r6, r2, r7, EMIT_REMEMBERED_SET },
+ // Used in CompileArrayPushCall.
+ // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+ // Also used in KeyedStoreIC::GenerateGeneric.
+ { r3, r4, r5, EMIT_REMEMBERED_SET },
+ // Used in CompileStoreGlobal.
+ { r4, r1, r2, OMIT_REMEMBERED_SET },
+ // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { r1, r2, r3, EMIT_REMEMBERED_SET },
+ { r3, r2, r1, EMIT_REMEMBERED_SET },
+ // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { r2, r1, r3, EMIT_REMEMBERED_SET },
+ { r3, r1, r2, EMIT_REMEMBERED_SET },
+ // KeyedStoreStubCompiler::GenerateStoreFastElement.
+ { r4, r2, r3, EMIT_REMEMBERED_SET },
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { r2, r3, r9, EMIT_REMEMBERED_SET },
+ // ElementsTransitionGenerator::GenerateDoubleToObject
+ { r6, r2, r0, EMIT_REMEMBERED_SET },
+ { r2, r6, r9, EMIT_REMEMBERED_SET },
+ // StoreArrayLiteralElementStub::Generate
+ { r5, r0, r6, EMIT_REMEMBERED_SET },
+ // Null termination.
+ { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ if (object_.is(entry->object) &&
+ value_.is(entry->value) &&
+ address_.is(entry->address) &&
+ remembered_set_action_ == entry->action &&
+ save_fp_regs_mode_ == kDontSaveFPRegs) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+ return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ RecordWriteStub stub(entry->object,
+ entry->value,
+ entry->address,
+ entry->action,
+ kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back and
+ // forth between a compare instructions (a nop in this position) and the
+ // real branch when we start and stop incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+ __ b(&skip_to_incremental_noncompacting);
+ __ b(&skip_to_incremental_compacting);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+ ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+ PatchBranchIntoNop(masm, 0);
+ PatchBranchIntoNop(masm, Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ Register address =
+ r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.is(regs_.object()));
+ ASSERT(!address.is(r0));
+ __ Move(address, regs_.address());
+ __ Move(r0, regs_.object());
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ Move(r1, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ ldr(r1, MemOperand(address, 0));
+ }
+ __ mov(r2, Operand(ExternalReference::isolate_address()));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ CallCFunction(
+ ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate()),
+ argument_count);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ }
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ eq,
+ &ensure_not_white);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ eq,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.object(), regs_.address());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : element value to store
+ // -- r1 : array literal
+ // -- r2 : map of array literal
+ // -- r3 : element index as smi
+ // -- r4 : array literal index in function as smi
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+
+ __ CheckFastElements(r2, r5, &double_elements);
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(r0, &smi_element);
+ __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+ __ bind(&slow_elements);
+ // call.
+ __ Push(r1, r3, r0);
+ __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
+ __ Push(r5, r4);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r0, MemOperand(r6, 0));
+ // Update the write barrier for the array store.
+ __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+ __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r10,
+ &slow_elements);
+ __ Ret();
+}
+
#undef __
} } // namespace v8::internal