diff options
Diffstat (limited to 'src/ia32')
-rw-r--r-- | src/ia32/assembler-ia32-inl.h | 24 | ||||
-rw-r--r-- | src/ia32/assembler-ia32.cc | 15 | ||||
-rw-r--r-- | src/ia32/assembler-ia32.h | 1 | ||||
-rw-r--r-- | src/ia32/builtins-ia32.cc | 22 | ||||
-rw-r--r-- | src/ia32/codegen-ia32.cc | 772 | ||||
-rw-r--r-- | src/ia32/codegen-ia32.h | 33 | ||||
-rw-r--r-- | src/ia32/debug-ia32.cc | 44 | ||||
-rw-r--r-- | src/ia32/disasm-ia32.cc | 1 | ||||
-rw-r--r-- | src/ia32/fast-codegen-ia32.cc | 954 | ||||
-rw-r--r-- | src/ia32/fast-codegen-ia32.h | 155 | ||||
-rw-r--r-- | src/ia32/full-codegen-ia32.cc | 187 | ||||
-rw-r--r-- | src/ia32/ic-ia32.cc | 27 | ||||
-rw-r--r-- | src/ia32/macro-assembler-ia32.cc | 81 | ||||
-rw-r--r-- | src/ia32/macro-assembler-ia32.h | 35 | ||||
-rw-r--r-- | src/ia32/stub-cache-ia32.cc | 47 | ||||
-rw-r--r-- | src/ia32/virtual-frame-ia32.h | 16 |
16 files changed, 936 insertions, 1478 deletions
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h index 7fa151e9..ecbdfdcf 100644 --- a/src/ia32/assembler-ia32-inl.h +++ b/src/ia32/assembler-ia32-inl.h @@ -183,6 +183,30 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { } +template<typename StaticVisitor> +void RelocInfo::Visit() { + RelocInfo::Mode mode = rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT) { + StaticVisitor::VisitPointer(target_object_address()); + } else if (RelocInfo::IsCodeTarget(mode)) { + StaticVisitor::VisitCodeTarget(this); + } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { + StaticVisitor::VisitExternalReference(target_reference_address()); +#ifdef ENABLE_DEBUGGER_SUPPORT + } else if (Debug::has_break_points() && + ((RelocInfo::IsJSReturn(mode) && + IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(mode) && + IsPatchedDebugBreakSlotSequence()))) { + StaticVisitor::VisitDebugTarget(this); +#endif + } else if (mode == RelocInfo::RUNTIME_ENTRY) { + StaticVisitor::VisitRuntimeEntry(this); + } +} + + + Immediate::Immediate(int x) { x_ = x; rmode_ = RelocInfo::NONE; diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc index 6c830cba..2565acb5 100644 --- a/src/ia32/assembler-ia32.cc +++ b/src/ia32/assembler-ia32.cc @@ -1142,6 +1142,21 @@ void Assembler::rcl(Register dst, uint8_t imm8) { } +void Assembler::rcr(Register dst, uint8_t imm8) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + ASSERT(is_uint5(imm8)); // illegal shift count + if (imm8 == 1) { + EMIT(0xD1); + EMIT(0xD8 | dst.code()); + } else { + EMIT(0xC1); + EMIT(0xD8 | dst.code()); + EMIT(imm8); + } +} + + void Assembler::sar(Register dst, uint8_t imm8) { EnsureSpace ensure_space(this); last_pc_ = pc_; diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h index c76c55cf..8a5a4c5f 100644 --- a/src/ia32/assembler-ia32.h +++ b/src/ia32/assembler-ia32.h @@ -625,6 +625,7 @@ class Assembler : public Malloced { void or_(const Operand& dst, const Immediate& x); void rcl(Register dst, uint8_t imm8); + void rcr(Register dst, uint8_t imm8); void sar(Register dst, uint8_t imm8); void sar_cl(Register dst); diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc index 3adb014b..35a90a4a 100644 --- a/src/ia32/builtins-ia32.cc +++ b/src/ia32/builtins-ia32.cc @@ -429,6 +429,26 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { } +void Builtins::Generate_LazyCompile(MacroAssembler* masm) { + // Enter an internal frame. + __ EnterInternalFrame(); + + // Push a copy of the function onto the stack. + __ push(edi); + + __ push(edi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kLazyCompile, 1); + __ pop(edi); + + // Tear down temporary frame. + __ LeaveInternalFrame(); + + // Do a tail-call of the compiled function. + __ lea(ecx, FieldOperand(eax, Code::kHeaderSize)); + __ jmp(Operand(ecx)); +} + + void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 1. Make sure we have at least one argument. { Label done; @@ -548,7 +568,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); __ SmiUntag(ebx); - __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); + __ mov(edx, FieldOperand(edi, JSFunction::kCodeOffset)); __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); __ cmp(eax, Operand(ebx)); __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline))); diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc index ba7785b2..02a03fc7 100644 --- a/src/ia32/codegen-ia32.cc +++ b/src/ia32/codegen-ia32.cc @@ -202,105 +202,92 @@ void CodeGenerator::Generate(CompilationInfo* info) { // esi: callee's context allocator_->Initialize(); - if (info->mode() == CompilationInfo::PRIMARY) { - frame_->Enter(); - - // Allocate space for locals and initialize them. - frame_->AllocateStackSlots(); - - // Allocate the local context if needed. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - if (heap_slots > 0) { - Comment cmnt(masm_, "[ allocate local context"); - // Allocate local context. - // Get outer context and create a new context based on it. - frame_->PushFunction(); - Result context; - if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); - context = frame_->CallStub(&stub, 1); - } else { - context = frame_->CallRuntime(Runtime::kNewContext, 1); - } + frame_->Enter(); + + // Allocate space for locals and initialize them. + frame_->AllocateStackSlots(); + + // Allocate the local context if needed. + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment cmnt(masm_, "[ allocate local context"); + // Allocate local context. + // Get outer context and create a new context based on it. + frame_->PushFunction(); + Result context; + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + context = frame_->CallStub(&stub, 1); + } else { + context = frame_->CallRuntime(Runtime::kNewContext, 1); + } - // Update context local. - frame_->SaveContextRegister(); + // Update context local. + frame_->SaveContextRegister(); - // Verify that the runtime call result and esi agree. - if (FLAG_debug_code) { - __ cmp(context.reg(), Operand(esi)); - __ Assert(equal, "Runtime::NewContext should end up in esi"); - } + // Verify that the runtime call result and esi agree. + if (FLAG_debug_code) { + __ cmp(context.reg(), Operand(esi)); + __ Assert(equal, "Runtime::NewContext should end up in esi"); } + } - // TODO(1241774): Improve this code: - // 1) only needed if we have a context - // 2) no need to recompute context ptr every single time - // 3) don't copy parameter operand code from SlotOperand! - { - Comment cmnt2(masm_, "[ copy context parameters into .context"); - // Note that iteration order is relevant here! If we have the same - // parameter twice (e.g., function (x, y, x)), and that parameter - // needs to be copied into the context, it must be the last argument - // passed to the parameter that needs to be copied. This is a rare - // case so we don't check for it, instead we rely on the copying - // order: such a parameter is copied repeatedly into the same - // context location and thus the last value is what is seen inside - // the function. - for (int i = 0; i < scope()->num_parameters(); i++) { - Variable* par = scope()->parameter(i); - Slot* slot = par->slot(); - if (slot != NULL && slot->type() == Slot::CONTEXT) { - // The use of SlotOperand below is safe in unspilled code - // because the slot is guaranteed to be a context slot. - // - // There are no parameters in the global scope. - ASSERT(!scope()->is_global_scope()); - frame_->PushParameterAt(i); - Result value = frame_->Pop(); - value.ToRegister(); - - // SlotOperand loads context.reg() with the context object - // stored to, used below in RecordWrite. - Result context = allocator_->Allocate(); - ASSERT(context.is_valid()); - __ mov(SlotOperand(slot, context.reg()), value.reg()); - int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; - Result scratch = allocator_->Allocate(); - ASSERT(scratch.is_valid()); - frame_->Spill(context.reg()); - frame_->Spill(value.reg()); - __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg()); - } + // TODO(1241774): Improve this code: + // 1) only needed if we have a context + // 2) no need to recompute context ptr every single time + // 3) don't copy parameter operand code from SlotOperand! + { + Comment cmnt2(masm_, "[ copy context parameters into .context"); + // Note that iteration order is relevant here! If we have the same + // parameter twice (e.g., function (x, y, x)), and that parameter + // needs to be copied into the context, it must be the last argument + // passed to the parameter that needs to be copied. This is a rare + // case so we don't check for it, instead we rely on the copying + // order: such a parameter is copied repeatedly into the same + // context location and thus the last value is what is seen inside + // the function. + for (int i = 0; i < scope()->num_parameters(); i++) { + Variable* par = scope()->parameter(i); + Slot* slot = par->slot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + // The use of SlotOperand below is safe in unspilled code + // because the slot is guaranteed to be a context slot. + // + // There are no parameters in the global scope. + ASSERT(!scope()->is_global_scope()); + frame_->PushParameterAt(i); + Result value = frame_->Pop(); + value.ToRegister(); + + // SlotOperand loads context.reg() with the context object + // stored to, used below in RecordWrite. + Result context = allocator_->Allocate(); + ASSERT(context.is_valid()); + __ mov(SlotOperand(slot, context.reg()), value.reg()); + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + Result scratch = allocator_->Allocate(); + ASSERT(scratch.is_valid()); + frame_->Spill(context.reg()); + frame_->Spill(value.reg()); + __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg()); } } + } - // Store the arguments object. This must happen after context - // initialization because the arguments object may be stored in - // the context. - if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) { - StoreArgumentsObject(true); - } - - // Initialize ThisFunction reference if present. - if (scope()->is_function_scope() && scope()->function() != NULL) { - frame_->Push(Factory::the_hole_value()); - StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT); - } - } else { - // When used as the secondary compiler for splitting, ebp, esi, - // and edi have been pushed on the stack. Adjust the virtual - // frame to match this state. - frame_->Adjust(3); - allocator_->Unuse(edi); + // Store the arguments object. This must happen after context + // initialization because the arguments object may be stored in + // the context. + if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) { + StoreArgumentsObject(true); + } - // Bind all the bailout labels to the beginning of the function. - List<CompilationInfo::Bailout*>* bailouts = info->bailouts(); - for (int i = 0; i < bailouts->length(); i++) { - __ bind(bailouts->at(i)->label()); - } + // Initialize ThisFunction reference if present. + if (scope()->is_function_scope() && scope()->function() != NULL) { + frame_->Push(Factory::the_hole_value()); + StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT); } + // Initialize the function return target after the locals are set // up, because it needs the expected frame height from the frame. function_return_.set_direction(JumpTarget::BIDIRECTIONAL); @@ -1038,7 +1025,11 @@ const char* GenericBinaryOpStub::GetName() { } -// Call the specialized stub for a binary operation. +// Perform or call the specialized stub for a binary operation. Requires the +// three registers left, right and dst to be distinct and spilled. This +// deferred operation has up to three entry points: The main one calls the +// runtime system. The second is for when the result is a non-Smi. The +// third is for when at least one of the inputs is non-Smi and we have SSE2. class DeferredInlineBinaryOperation: public DeferredCode { public: DeferredInlineBinaryOperation(Token::Value op, @@ -1051,11 +1042,23 @@ class DeferredInlineBinaryOperation: public DeferredCode { : op_(op), dst_(dst), left_(left), right_(right), left_info_(left_info), right_info_(right_info), mode_(mode) { set_comment("[ DeferredInlineBinaryOperation"); + ASSERT(!left.is(right)); } virtual void Generate(); + // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and + // Exit(). + virtual bool AutoSaveAndRestore() { return false; } + + void JumpToAnswerOutOfRange(Condition cond); + void JumpToConstantRhs(Condition cond, Smi* smi_value); + Label* NonSmiInputLabel(); + private: + void GenerateAnswerOutOfRange(); + void GenerateNonSmiInput(); + Token::Value op_; Register dst_; Register left_; @@ -1063,15 +1066,42 @@ class DeferredInlineBinaryOperation: public DeferredCode { TypeInfo left_info_; TypeInfo right_info_; OverwriteMode mode_; + Label answer_out_of_range_; + Label non_smi_input_; + Label constant_rhs_; + Smi* smi_value_; }; +Label* DeferredInlineBinaryOperation::NonSmiInputLabel() { + if (Token::IsBitOp(op_) && CpuFeatures::IsSupported(SSE2)) { + return &non_smi_input_; + } else { + return entry_label(); + } +} + + +void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) { + __ j(cond, &answer_out_of_range_); +} + + +void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond, + Smi* smi_value) { + smi_value_ = smi_value; + __ j(cond, &constant_rhs_); +} + + void DeferredInlineBinaryOperation::Generate() { - Label done; - if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) || - (op_ ==Token::SUB) || - (op_ == Token::MUL) || - (op_ == Token::DIV))) { + // Registers are not saved implicitly for this stub, so we should not + // tread on the registers that were not passed to us. + if (CpuFeatures::IsSupported(SSE2) && + ((op_ == Token::ADD) || + (op_ == Token::SUB) || + (op_ == Token::MUL) || + (op_ == Token::DIV))) { CpuFeatures::Scope use_sse2(SSE2); Label call_runtime, after_alloc_failure; Label left_smi, right_smi, load_right, do_op; @@ -1131,7 +1161,6 @@ void DeferredInlineBinaryOperation::Generate() { __ cvtsi2sd(xmm1, Operand(right_)); __ SmiTag(right_); if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) { - Label alloc_failure; __ push(left_); __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure); __ pop(left_); @@ -1146,19 +1175,200 @@ void DeferredInlineBinaryOperation::Generate() { default: UNREACHABLE(); } __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0); - __ jmp(&done); + Exit(); + __ bind(&after_alloc_failure); __ pop(left_); __ bind(&call_runtime); } + // Register spilling is not done implicitly for this stub. + // We can't postpone it any more now though. + SaveRegisters(); + GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB, TypeInfo::Combine(left_info_, right_info_)); stub.GenerateCall(masm_, left_, right_); if (!dst_.is(eax)) __ mov(dst_, eax); - __ bind(&done); + RestoreRegisters(); + Exit(); + + if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) { + GenerateNonSmiInput(); + } + if (answer_out_of_range_.is_linked()) { + GenerateAnswerOutOfRange(); + } +} + + +void DeferredInlineBinaryOperation::GenerateNonSmiInput() { + // We know at least one of the inputs was not a Smi. + // This is a third entry point into the deferred code. + // We may not overwrite left_ because we want to be able + // to call the handling code for non-smi answer and it + // might want to overwrite the heap number in left_. + ASSERT(!right_.is(dst_)); + ASSERT(!left_.is(dst_)); + ASSERT(!left_.is(right_)); + // This entry point is used for bit ops where the right hand side + // is a constant Smi and the left hand side is a heap object. It + // is also used for bit ops where both sides are unknown, but where + // at least one of them is a heap object. + bool rhs_is_constant = constant_rhs_.is_linked(); + // We can't generate code for both cases. + ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked()); + + if (FLAG_debug_code) { + __ int3(); // We don't fall through into this code. + } + + __ bind(&non_smi_input_); + + if (rhs_is_constant) { + __ bind(&constant_rhs_); + // In this case the input is a heap object and it is in the dst_ register. + // The left_ and right_ registers have not been initialized yet. + __ mov(right_, Immediate(smi_value_)); + __ mov(left_, Operand(dst_)); + if (!CpuFeatures::IsSupported(SSE2)) { + __ jmp(entry_label()); + return; + } else { + CpuFeatures::Scope use_sse2(SSE2); + __ JumpIfNotNumber(dst_, left_info_, entry_label()); + __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label()); + __ SmiUntag(right_); + } + } else { + // We know we have SSE2 here because otherwise the label is not linked (see + // NonSmiInputLabel). + CpuFeatures::Scope use_sse2(SSE2); + // Handle the non-constant right hand side situation: + if (left_info_.IsSmi()) { + // Right is a heap object. + __ JumpIfNotNumber(right_, right_info_, entry_label()); + __ ConvertToInt32(right_, right_, dst_, right_info_, entry_label()); + __ mov(dst_, Operand(left_)); + __ SmiUntag(dst_); + } else if (right_info_.IsSmi()) { + // Left is a heap object. + __ JumpIfNotNumber(left_, left_info_, entry_label()); + __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label()); + __ SmiUntag(right_); + } else { + // Here we don't know if it's one or both that is a heap object. + Label only_right_is_heap_object, got_both; + __ mov(dst_, Operand(left_)); + __ SmiUntag(dst_, &only_right_is_heap_object); + // Left was a heap object. + __ JumpIfNotNumber(left_, left_info_, entry_label()); + __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label()); + __ SmiUntag(right_, &got_both); + // Both were heap objects. + __ rcl(right_, 1); // Put tag back. + __ JumpIfNotNumber(right_, right_info_, entry_label()); + __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label()); + __ jmp(&got_both); + __ bind(&only_right_is_heap_object); + __ JumpIfNotNumber(right_, right_info_, entry_label()); + __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label()); + __ bind(&got_both); + } + } + ASSERT(op_ == Token::BIT_AND || + op_ == Token::BIT_OR || + op_ == Token::BIT_XOR || + right_.is(ecx)); + switch (op_) { + case Token::BIT_AND: __ and_(dst_, Operand(right_)); break; + case Token::BIT_OR: __ or_(dst_, Operand(right_)); break; + case Token::BIT_XOR: __ xor_(dst_, Operand(right_)); break; + case Token::SHR: __ shr_cl(dst_); break; + case Token::SAR: __ sar_cl(dst_); break; + case Token::SHL: __ shl_cl(dst_); break; + default: UNREACHABLE(); + } + if (op_ == Token::SHR) { + // Check that the *unsigned* result fits in a smi. Neither of + // the two high-order bits can be set: + // * 0x80000000: high bit would be lost when smi tagging. + // * 0x40000000: this number would convert to negative when smi + // tagging. + __ test(dst_, Immediate(0xc0000000)); + __ j(not_zero, &answer_out_of_range_); + } else { + // Check that the *signed* result fits in a smi. + __ cmp(dst_, 0xc0000000); + __ j(negative, &answer_out_of_range_); + } + __ SmiTag(dst_); + Exit(); +} + + +void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() { + Label after_alloc_failure2; + Label allocation_ok; + __ bind(&after_alloc_failure2); + // We have to allocate a number, causing a GC, while keeping hold of + // the answer in dst_. The answer is not a Smi. We can't just call the + // runtime shift function here because we already threw away the inputs. + __ xor_(left_, Operand(left_)); + __ shl(dst_, 1); // Put top bit in carry flag and Smi tag the low bits. + __ rcr(left_, 1); // Rotate with carry. + __ push(dst_); // Smi tagged low 31 bits. + __ push(left_); // 0 or 0x80000000, which is Smi tagged in both cases. + __ CallRuntime(Runtime::kNumberAlloc, 0); + if (!left_.is(eax)) { + __ mov(left_, eax); + } + __ pop(right_); // High bit. + __ pop(dst_); // Low 31 bits. + __ shr(dst_, 1); // Put 0 in top bit. + __ or_(dst_, Operand(right_)); + __ jmp(&allocation_ok); + + // This is the second entry point to the deferred code. It is used only by + // the bit operations. + // The dst_ register has the answer. It is not Smi tagged. If mode_ is + // OVERWRITE_LEFT then left_ must contain either an overwritable heap number + // or a Smi. + // Put a heap number pointer in left_. + __ bind(&answer_out_of_range_); + SaveRegisters(); + if (mode_ == OVERWRITE_LEFT) { + __ test(left_, Immediate(kSmiTagMask)); + __ j(not_zero, &allocation_ok); + } + // This trashes right_. + __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2); + __ bind(&allocation_ok); + if (CpuFeatures::IsSupported(SSE2) && op_ != Token::SHR) { + CpuFeatures::Scope use_sse2(SSE2); + ASSERT(Token::IsBitOp(op_)); + // Signed conversion. + __ cvtsi2sd(xmm0, Operand(dst_)); + __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0); + } else { + if (op_ == Token::SHR) { + __ push(Immediate(0)); // High word of unsigned value. + __ push(dst_); + __ fild_d(Operand(esp, 0)); + __ Drop(2); + } else { + ASSERT(Token::IsBitOp(op_)); + __ push(dst_); + __ fild_s(Operand(esp, 0)); // Signed conversion. + __ pop(dst_); + } + __ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset)); + } + __ mov(dst_, left_); + RestoreRegisters(); + Exit(); } @@ -1499,10 +1709,25 @@ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left, TypeInfo left_info, TypeInfo right_info, DeferredCode* deferred) { + JumpIfNotBothSmiUsingTypeInfo(left, + right, + scratch, + left_info, + right_info, + deferred->entry_label()); +} + + +void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left, + Register right, + Register scratch, + TypeInfo left_info, + TypeInfo right_info, + Label* on_not_smi) { if (left.is(right)) { if (!left_info.IsSmi()) { __ test(left, Immediate(kSmiTagMask)); - deferred->Branch(not_zero); + __ j(not_zero, on_not_smi); } else { if (FLAG_debug_code) __ AbortIfNotSmi(left); } @@ -1511,17 +1736,17 @@ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left, __ mov(scratch, left); __ or_(scratch, Operand(right)); __ test(scratch, Immediate(kSmiTagMask)); - deferred->Branch(not_zero); + __ j(not_zero, on_not_smi); } else { __ test(left, Immediate(kSmiTagMask)); - deferred->Branch(not_zero); + __ j(not_zero, on_not_smi); if (FLAG_debug_code) __ AbortIfNotSmi(right); } } else { if (FLAG_debug_code) __ AbortIfNotSmi(left); if (!right_info.IsSmi()) { __ test(right, Immediate(kSmiTagMask)); - deferred->Branch(not_zero); + __ j(not_zero, on_not_smi); } else { if (FLAG_debug_code) __ AbortIfNotSmi(right); } @@ -1606,13 +1831,16 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, right->ToRegister(); frame_->Spill(eax); frame_->Spill(edx); + // DeferredInlineBinaryOperation requires all the registers that it is + // told about to be spilled and distinct. + Result distinct_right = frame_->MakeDistinctAndSpilled(left, right); // Check that left and right are smi tagged. DeferredInlineBinaryOperation* deferred = new DeferredInlineBinaryOperation(op, (op == Token::DIV) ? eax : edx, left->reg(), - right->reg(), + distinct_right.reg(), left_type_info, right_type_info, overwrite_mode); @@ -1695,15 +1923,24 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, left->ToRegister(); ASSERT(left->is_register() && !left->reg().is(ecx)); ASSERT(right->is_register() && right->reg().is(ecx)); + if (left_type_info.IsSmi()) { + if (FLAG_debug_code) __ AbortIfNotSmi(left->reg()); + } + if (right_type_info.IsSmi()) { + if (FLAG_debug_code) __ AbortIfNotSmi(right->reg()); + } // We will modify right, it must be spilled. frame_->Spill(ecx); + // DeferredInlineBinaryOperation requires all the registers that it is told + // about to be spilled and distinct. We know that right is ecx and left is + // not ecx. + frame_->Spill(left->reg()); // Use a fresh answer register to avoid spilling the left operand. answer = allocator_->Allocate(); ASSERT(answer.is_valid()); - // Check that both operands are smis using the answer register as a - // temporary. + DeferredInlineBinaryOperation* deferred = new DeferredInlineBinaryOperation(op, answer.reg(), @@ -1712,55 +1949,28 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, left_type_info, right_type_info, overwrite_mode); + JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(), + left_type_info, right_type_info, + deferred->NonSmiInputLabel()); - Label do_op, left_nonsmi; - // If right is a smi we make a fast case if left is either a smi - // or a heapnumber. - if (CpuFeatures::IsSupported(SSE2) && right_type_info.IsSmi()) { - CpuFeatures::Scope use_sse2(SSE2); - __ mov(answer.reg(), left->reg()); - // Fast case - both are actually smis. - if (!left_type_info.IsSmi()) { - __ test(answer.reg(), Immediate(kSmiTagMask)); - __ j(not_zero, &left_nonsmi); - } else { - if (FLAG_debug_code) __ AbortIfNotSmi(left->reg()); - } - if (FLAG_debug_code) __ AbortIfNotSmi(right->reg()); - __ SmiUntag(answer.reg()); - __ jmp(&do_op); + // Untag both operands. + __ mov(answer.reg(), left->reg()); + __ SmiUntag(answer.reg()); + __ SmiUntag(right->reg()); // Right is ecx. - __ bind(&left_nonsmi); - // Branch if not a heapnumber. - __ cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset), - Factory::heap_number_map()); - deferred->Branch(not_equal); - - // Load integer value into answer register using truncation. - __ cvttsd2si(answer.reg(), - FieldOperand(answer.reg(), HeapNumber::kValueOffset)); - // Branch if we do not fit in a smi. - __ cmp(answer.reg(), 0xc0000000); - deferred->Branch(negative); - } else { - JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(), - left_type_info, right_type_info, deferred); - - // Untag both operands. - __ mov(answer.reg(), left->reg()); - __ SmiUntag(answer.reg()); - } - - __ bind(&do_op); - __ SmiUntag(ecx); // Perform the operation. + ASSERT(right->reg().is(ecx)); switch (op) { - case Token::SAR: + case Token::SAR: { __ sar_cl(answer.reg()); - // No checks of result necessary + if (!left_type_info.IsSmi()) { + // Check that the *signed* result fits in a smi. + __ cmp(answer.reg(), 0xc0000000); + deferred->JumpToAnswerOutOfRange(negative); + } break; + } case Token::SHR: { - Label result_ok; __ shr_cl(answer.reg()); // Check that the *unsigned* result fits in a smi. Neither of // the two high-order bits can be set: @@ -1773,21 +1983,14 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, // case. The low bit of the left argument may be lost, but only // in a case where it is dropped anyway. __ test(answer.reg(), Immediate(0xc0000000)); - __ j(zero, &result_ok); - __ SmiTag(ecx); - deferred->Jump(); - __ bind(&result_ok); + deferred->JumpToAnswerOutOfRange(not_zero); break; } case Token::SHL: { - Label result_ok; __ shl_cl(answer.reg()); // Check that the *signed* result fits in a smi. __ cmp(answer.reg(), 0xc0000000); - __ j(positive, &result_ok); - __ SmiTag(ecx); - deferred->Jump(); - __ bind(&result_ok); + deferred->JumpToAnswerOutOfRange(negative); break; } default: @@ -1805,6 +2008,9 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, // Handle the other binary operations. left->ToRegister(); right->ToRegister(); + // DeferredInlineBinaryOperation requires all the registers that it is told + // about to be spilled. + Result distinct_right = frame_->MakeDistinctAndSpilled(left, right); // A newly allocated register answer is used to hold the answer. The // registers containing left and right are not modified so they don't // need to be spilled in the fast case. @@ -1816,12 +2022,16 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, new DeferredInlineBinaryOperation(op, answer.reg(), left->reg(), - right->reg(), + distinct_right.reg(), left_type_info, right_type_info, overwrite_mode); - JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(), - left_type_info, right_type_info, deferred); + Label non_smi_bit_op; + if (op != Token::BIT_OR) { + JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(), + left_type_info, right_type_info, + deferred->NonSmiInputLabel()); + } __ mov(answer.reg(), left->reg()); switch (op) { @@ -1864,6 +2074,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, case Token::BIT_OR: __ or_(answer.reg(), Operand(right->reg())); + __ test(answer.reg(), Immediate(kSmiTagMask)); + __ j(not_zero, deferred->NonSmiInputLabel()); break; case Token::BIT_AND: @@ -1878,6 +2090,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, UNREACHABLE(); break; } + deferred->BindExit(); left->Unuse(); right->Unuse(); @@ -2363,27 +2576,25 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr, case Token::BIT_XOR: case Token::BIT_AND: { operand->ToRegister(); + // DeferredInlineBinaryOperation requires all the registers that it is + // told about to be spilled. frame_->Spill(operand->reg()); - DeferredCode* deferred = NULL; - if (reversed) { - deferred = - new DeferredInlineSmiOperationReversed(op, - operand->reg(), - smi_value, - operand->reg(), - operand->type_info(), - overwrite_mode); - } else { - deferred = new DeferredInlineSmiOperation(op, - operand->reg(), - operand->reg(), - operand->type_info(), - smi_value, - overwrite_mode); - } + DeferredInlineBinaryOperation* deferred = NULL; if (!operand->type_info().IsSmi()) { + Result left = allocator()->Allocate(); + ASSERT(left.is_valid()); + Result right = allocator()->Allocate(); + ASSERT(right.is_valid()); + deferred = new DeferredInlineBinaryOperation( + op, + operand->reg(), + left.reg(), + right.reg(), + operand->type_info(), + TypeInfo::Smi(), + overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT); __ test(operand->reg(), Immediate(kSmiTagMask)); - deferred->Branch(not_zero); + deferred->JumpToConstantRhs(not_zero, smi_value); } else if (FLAG_debug_code) { __ AbortIfNotSmi(operand->reg()); } @@ -2399,7 +2610,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr, __ or_(Operand(operand->reg()), Immediate(value)); } } - deferred->BindExit(); + if (deferred != NULL) deferred->BindExit(); answer = *operand; break; } @@ -3212,10 +3423,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand, __ j(zero, &build_args); __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx); __ j(not_equal, &build_args); - __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset)); Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply)); - __ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset), - Immediate(apply_code)); + __ cmp(FieldOperand(eax, JSFunction::kCodeOffset), Immediate(apply_code)); __ j(not_equal, &build_args); // Check that applicand is a function. @@ -4389,7 +4598,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { __ mov(ebx, Operand(eax)); // If the property has been removed while iterating, we just skip it. - __ cmp(ebx, Factory::null_value()); + __ test(ebx, Operand(ebx)); node->continue_target()->Branch(equal); end_del_check.Bind(); @@ -4397,10 +4606,11 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { // loop. edx: i'th entry of the enum cache (or string there of) frame_->EmitPush(ebx); { Reference each(this, node->each()); - // Loading a reference may leave the frame in an unspilled state. - frame_->SpillAll(); if (!each.is_illegal()) { if (each.size() > 0) { + // Loading a reference may leave the frame in an unspilled state. + frame_->SpillAll(); + // Get the value (under the reference on the stack) from memory. frame_->EmitPush(frame_->ElementAt(each.size())); each.SetValue(NOT_CONST_INIT); frame_->Drop(2); @@ -5502,12 +5712,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { frame_->Push(node->constant_elements()); int length = node->values()->length(); Result clone; - if (node->depth() > 1) { + if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) { + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); + clone = frame_->CallStub(&stub, 3); + __ IncrementCounter(&Counters::cow_arrays_created_stub, 1); + } else if (node->depth() > 1) { clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3); - } else if (length > FastCloneShallowArrayStub::kMaximumLength) { + } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - FastCloneShallowArrayStub stub(length); + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::CLONE_ELEMENTS, length); clone = frame_->CallStub(&stub, 3); } frame_->Push(&clone); @@ -5517,12 +5733,9 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { for (int i = 0; i < length; i++) { Expression* value = node->values()->at(i); - // If value is a literal the property value is already set in the - // boilerplate object. - if (value->AsLiteral() != NULL) continue; - // If value is a materialized literal the property value is already set - // in the boilerplate object if it is simple. - if (CompileTimeValue::IsCompileTimeValue(value)) continue; + if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) { + continue; + } // The property must be set by generated code. Load(value); @@ -6539,7 +6752,7 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) { } - void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) { +void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) { // This generates a fast version of: // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' || // typeof(arg) == function). @@ -6560,6 +6773,143 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) { } +// Deferred code to check whether the String JavaScript object is safe for using +// default value of. This code is called after the bit caching this information +// in the map has been checked with the map for the object in the map_result_ +// register. On return the register map_result_ contains 1 for true and 0 for +// false. +class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode { + public: + DeferredIsStringWrapperSafeForDefaultValueOf(Register object, + Register map_result, + Register scratch1, + Register scratch2) + : object_(object), + map_result_(map_result), + scratch1_(scratch1), + scratch2_(scratch2) { } + + virtual void Generate() { + Label false_result; + + // Check that map is loaded as expected. + if (FLAG_debug_code) { + __ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ Assert(equal, "Map not in expected register"); + } + + // Check for fast case object. Generate false result for slow case object. + __ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset)); + __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset)); + __ cmp(scratch1_, Factory::hash_table_map()); + __ j(equal, &false_result); + + // Look for valueOf symbol in the descriptor array, and indicate false if + // found. The type is not checked, so if it is a transition it is a false + // negative. + __ mov(map_result_, + FieldOperand(map_result_, Map::kInstanceDescriptorsOffset)); + __ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset)); + // map_result_: descriptor array + // scratch1_: length of descriptor array + // Calculate the end of the descriptor array. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kPointerSize == 4); + __ lea(scratch1_, + Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize)); + // Calculate location of the first key name. + __ add(Operand(map_result_), + Immediate(FixedArray::kHeaderSize + + DescriptorArray::kFirstIndex * kPointerSize)); + // Loop through all the keys in the descriptor array. If one of these is the + // symbol valueOf the result is false. + Label entry, loop; + __ jmp(&entry); + __ bind(&loop); + __ mov(scratch2_, FieldOperand(map_result_, 0)); + __ cmp(scratch2_, Factory::value_of_symbol()); + __ j(equal, &false_result); + __ add(Operand(map_result_), Immediate(kPointerSize)); + __ bind(&entry); + __ cmp(map_result_, Operand(scratch1_)); + __ j(not_equal, &loop); + + // Reload map as register map_result_ was used as temporary above. + __ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset)); + + // If a valueOf property is not found on the object check that it's + // prototype is the un-modified String prototype. If not result is false. + __ mov(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset)); + __ test(scratch1_, Immediate(kSmiTagMask)); + __ j(zero, &false_result); + __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset)); + __ mov(scratch2_, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ mov(scratch2_, + FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset)); + __ cmp(scratch1_, + CodeGenerator::ContextOperand( + scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); + __ j(not_equal, &false_result); + // Set the bit in the map to indicate that it has been checked safe for + // default valueOf and set true result. + __ or_(FieldOperand(map_result_, Map::kBitField2Offset), + Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ Set(map_result_, Immediate(1)); + __ jmp(exit_label()); + __ bind(&false_result); + // Set false result. + __ Set(map_result_, Immediate(0)); + } + + private: + Register object_; + Register map_result_; + Register scratch1_; + Register scratch2_; +}; + + +void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf( + ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Result obj = frame_->Pop(); // Pop the string wrapper. + obj.ToRegister(); + ASSERT(obj.is_valid()); + if (FLAG_debug_code) { + __ AbortIfSmi(obj.reg()); + } + + // Check whether this map has already been checked to be safe for default + // valueOf. + Result map_result = allocator()->Allocate(); + ASSERT(map_result.is_valid()); + __ mov(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset)); + __ test_b(FieldOperand(map_result.reg(), Map::kBitField2Offset), + 1 << Map::kStringWrapperSafeForDefaultValueOf); + destination()->true_target()->Branch(not_zero); + + // We need an additional two scratch registers for the deferred code. + Result temp1 = allocator()->Allocate(); + ASSERT(temp1.is_valid()); + Result temp2 = allocator()->Allocate(); + ASSERT(temp2.is_valid()); + + DeferredIsStringWrapperSafeForDefaultValueOf* deferred = + new DeferredIsStringWrapperSafeForDefaultValueOf( + obj.reg(), map_result.reg(), temp1.reg(), temp2.reg()); + deferred->Branch(zero); + deferred->BindExit(); + __ test(map_result.reg(), Operand(map_result.reg())); + obj.Unuse(); + map_result.Unuse(); + temp1.Unuse(); + temp2.Unuse(); + destination()->Split(not_equal); +} + + void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) { // This generates a fast version of: // (%_ClassOf(arg) === 'Function') @@ -7254,7 +7604,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) { KeyedLoadIC::kSlowCaseBitFieldMask); deferred->Branch(not_zero); - // Check the object's elements are in fast case. + // Check the object's elements are in fast case and writable. __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset)); __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset), Immediate(Factory::fixed_array_map())); @@ -9149,15 +9499,10 @@ Result CodeGenerator::EmitKeyedLoad() { if (FLAG_debug_code) __ AbortIfNotSmi(key.reg()); } - // Get the elements array from the receiver and check that it - // is not a dictionary. + // Get the elements array from the receiver. __ mov(elements.reg(), FieldOperand(receiver.reg(), JSObject::kElementsOffset)); - if (FLAG_debug_code) { - __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), - Immediate(Factory::fixed_array_map())); - __ Assert(equal, "JSObject with fast elements map has slow elements"); - } + __ AssertFastElements(elements.reg()); // Check that the key is within bounds. __ cmp(key.reg(), @@ -9467,6 +9812,11 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); + // Initialize the code pointer in the function to be the one + // found in the shared function info object. + __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); + __ mov(FieldOperand(eax, JSFunction::kCodeOffset), edx); + // Return and remove the on-stack parameter. __ ret(1 * kPointerSize); @@ -9549,6 +9899,24 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ cmp(ecx, Factory::undefined_value()); __ j(equal, &slow_case); + if (FLAG_debug_code) { + const char* message; + Handle<Map> expected_map; + if (mode_ == CLONE_ELEMENTS) { + message = "Expected (writable) fixed array"; + expected_map = Factory::fixed_array_map(); + } else { + ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); + message = "Expected copy-on-write fixed array"; + expected_map = Factory::fixed_cow_array_map(); + } + __ push(ecx); + __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); + __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map); + __ Assert(equal, message); + __ pop(ecx); + } + // Allocate both the JS array and the elements array in one big // allocation. This avoids multiple limit checks. __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT); diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h index 2368b23c..37b70110 100644 --- a/src/ia32/codegen-ia32.h +++ b/src/ia32/codegen-ia32.h @@ -358,6 +358,10 @@ class CodeGenerator: public AstVisitor { return FieldOperand(array, index_as_smi, times_half_pointer_size, offset); } + static Operand ContextOperand(Register context, int index) { + return Operand(context, Context::SlotOffset(index)); + } + private: // Construction/Destruction explicit CodeGenerator(MacroAssembler* masm); @@ -430,10 +434,6 @@ class CodeGenerator: public AstVisitor { // The following are used by class Reference. void LoadReference(Reference* ref); - static Operand ContextOperand(Register context, int index) { - return Operand(context, Context::SlotOffset(index)); - } - Operand SlotOperand(Slot* slot, Register tmp); Operand ContextSlotOperandCheckExtensions(Slot* slot, @@ -530,7 +530,7 @@ class CodeGenerator: public AstVisitor { // Emits code sequence that jumps to deferred code if the inputs // are not both smis. Cannot be in MacroAssembler because it takes - // advantage of TypeInfo to skip unneeded checks. + // a deferred code object. void JumpIfNotBothSmiUsingTypeInfo(Register left, Register right, Register scratch, @@ -538,6 +538,15 @@ class CodeGenerator: public AstVisitor { TypeInfo right_info, DeferredCode* deferred); + // Emits code sequence that jumps to the label if the inputs + // are not both smis. + void JumpIfNotBothSmiUsingTypeInfo(Register left, + Register right, + Register scratch, + TypeInfo left_info, + TypeInfo right_info, + Label* on_non_smi); + // If possible, combine two constant smi values using op to produce // a smi result, and push it on the virtual frame, all at compile time. // Returns true if it succeeds. Otherwise it has no effect. @@ -644,6 +653,8 @@ class CodeGenerator: public AstVisitor { void GenerateIsSpecObject(ZoneList<Expression*>* args); void GenerateIsFunction(ZoneList<Expression*>* args); void GenerateIsUndetectableObject(ZoneList<Expression*>* args); + void GenerateIsStringWrapperSafeForDefaultValueOf( + ZoneList<Expression*>* args); // Support for construct call checks. void GenerateIsConstructCall(ZoneList<Expression*>* args); @@ -802,6 +813,18 @@ class TranscendentalCacheStub: public CodeStub { }; +class ToBooleanStub: public CodeStub { + public: + ToBooleanStub() { } + + void Generate(MacroAssembler* masm); + + private: + Major MajorKey() { return ToBoolean; } + int MinorKey() { return 0; } +}; + + // Flag that indicates how to generate code for the stub GenericBinaryOpStub. enum GenericBinaryFlags { NO_GENERIC_BINARY_FLAGS = 0, diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc index dfa66342..b57cf3d0 100644 --- a/src/ia32/debug-ia32.cc +++ b/src/ia32/debug-ia32.cc @@ -254,32 +254,20 @@ void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { } -// FrameDropper is a code replacement for a JavaScript frame with possibly -// several frames above. -// There is no calling conventions here, because it never actually gets called, -// it only gets returned to. -// Frame structure (conforms InternalFrame structure): -// -- JSFunction -// -- code -// -- SMI maker -// -- context -// -- frame base void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { ExternalReference restarter_frame_function_slot = ExternalReference(Debug_Address::RestarterFrameFunctionPointer()); __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0)); // We do not know our frame height, but set esp based on ebp. - __ lea(esp, Operand(ebp, -4 * kPointerSize)); + __ lea(esp, Operand(ebp, -1 * kPointerSize)); - __ pop(edi); // function - - // Skip code self-reference and marker. - __ add(Operand(esp), Immediate(2 * kPointerSize)); - - __ pop(esi); // Context. + __ pop(edi); // Function. __ pop(ebp); + // Load context from the function. + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + // Get function code. __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); @@ -289,27 +277,9 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { __ jmp(Operand(edx)); } -#undef __ - - -// TODO(LiveEdit): consider making it platform-independent. -// TODO(LiveEdit): use more named constants instead of numbers. -Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame, - Handle<Code> code) { - ASSERT(bottom_js_frame->is_java_script()); - - Address fp = bottom_js_frame->fp(); - Memory::Object_at(fp - 4 * kPointerSize) = - Memory::Object_at(fp - 2 * kPointerSize); // Move edi (function). - - Memory::Object_at(fp - 3 * kPointerSize) = *code; - Memory::Object_at(fp - 2 * kPointerSize) = Smi::FromInt(StackFrame::INTERNAL); - - return reinterpret_cast<Object**>(&Memory::Object_at(fp - 4 * kPointerSize)); -} - -const int Debug::kFrameDropperFrameSize = 5; +const bool Debug::kFrameDropperSupported = true; +#undef __ #endif // ENABLE_DEBUGGER_SUPPORT diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc index dc4c27e8..64305ef6 100644 --- a/src/ia32/disasm-ia32.cc +++ b/src/ia32/disasm-ia32.cc @@ -560,6 +560,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) { case kROL: mnem = "rol"; break; case kROR: mnem = "ror"; break; case kRCL: mnem = "rcl"; break; + case kRCR: mnem = "rcr"; break; case kSHL: mnem = "shl"; break; case KSHR: mnem = "shr"; break; case kSAR: mnem = "sar"; break; diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc deleted file mode 100644 index b749e594..00000000 --- a/src/ia32/fast-codegen-ia32.cc +++ /dev/null @@ -1,954 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#if defined(V8_TARGET_ARCH_IA32) - -#include "codegen-inl.h" -#include "fast-codegen.h" -#include "data-flow.h" -#include "scopes.h" - -namespace v8 { -namespace internal { - -#define BAILOUT(reason) \ - do { \ - if (FLAG_trace_bailout) { \ - PrintF("%s\n", reason); \ - } \ - has_supported_syntax_ = false; \ - return; \ - } while (false) - - -#define CHECK_BAILOUT \ - do { \ - if (!has_supported_syntax_) return; \ - } while (false) - - -void FastCodeGenSyntaxChecker::Check(CompilationInfo* info) { - info_ = info; - - // We do not specialize if we do not have a receiver or if it is not a - // JS object with fast mode properties. - if (!info->has_receiver()) BAILOUT("No receiver"); - if (!info->receiver()->IsJSObject()) BAILOUT("Receiver is not an object"); - Handle<JSObject> object = Handle<JSObject>::cast(info->receiver()); - if (!object->HasFastProperties()) BAILOUT("Receiver is in dictionary mode"); - - // We do not support stack or heap slots (both of which require - // allocation). - Scope* scope = info->scope(); - if (scope->num_stack_slots() > 0) { - BAILOUT("Function has stack-allocated locals"); - } - if (scope->num_heap_slots() > 0) { - BAILOUT("Function has context-allocated locals"); - } - - VisitDeclarations(scope->declarations()); - CHECK_BAILOUT; - - // We do not support empty function bodies. - if (info->function()->body()->is_empty()) { - BAILOUT("Function has an empty body"); - } - VisitStatements(info->function()->body()); -} - - -void FastCodeGenSyntaxChecker::VisitDeclarations( - ZoneList<Declaration*>* decls) { - if (!decls->is_empty()) BAILOUT("Function has declarations"); -} - - -void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) { - if (stmts->length() != 1) { - BAILOUT("Function body is not a singleton statement."); - } - Visit(stmts->at(0)); -} - - -void FastCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) { - UNREACHABLE(); -} - - -void FastCodeGenSyntaxChecker::VisitBlock(Block* stmt) { - VisitStatements(stmt->statements()); -} - - -void FastCodeGenSyntaxChecker::VisitExpressionStatement( - ExpressionStatement* stmt) { - Visit(stmt->expression()); -} - - -void FastCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) { - // Supported. -} - - -void FastCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) { - BAILOUT("IfStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) { - BAILOUT("Continuestatement"); -} - - -void FastCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) { - BAILOUT("BreakStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) { - BAILOUT("ReturnStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitWithEnterStatement( - WithEnterStatement* stmt) { - BAILOUT("WithEnterStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) { - BAILOUT("WithExitStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) { - BAILOUT("SwitchStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) { - BAILOUT("DoWhileStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) { - BAILOUT("WhileStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) { - BAILOUT("ForStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) { - BAILOUT("ForInStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) { - BAILOUT("TryCatchStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitTryFinallyStatement( - TryFinallyStatement* stmt) { - BAILOUT("TryFinallyStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitDebuggerStatement( - DebuggerStatement* stmt) { - BAILOUT("DebuggerStatement"); -} - - -void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) { - BAILOUT("FunctionLiteral"); -} - - -void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { - BAILOUT("SharedFunctionInfoLiteral"); -} - - -void FastCodeGenSyntaxChecker::VisitConditional(Conditional* expr) { - BAILOUT("Conditional"); -} - - -void FastCodeGenSyntaxChecker::VisitSlot(Slot* expr) { - UNREACHABLE(); -} - - -void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) { - // Only global variable references are supported. - Variable* var = expr->var(); - if (!var->is_global() || var->is_this()) BAILOUT("Non-global variable"); - - // Check if the global variable is existing and non-deletable. - if (info()->has_global_object()) { - LookupResult lookup; - info()->global_object()->Lookup(*expr->name(), &lookup); - if (!lookup.IsProperty()) { - BAILOUT("Non-existing global variable"); - } - // We do not handle global variables with accessors or interceptors. - if (lookup.type() != NORMAL) { - BAILOUT("Global variable with accessors or interceptors."); - } - // We do not handle deletable global variables. - if (!lookup.IsDontDelete()) { - BAILOUT("Deletable global variable"); - } - } -} - - -void FastCodeGenSyntaxChecker::VisitLiteral(Literal* expr) { - BAILOUT("Literal"); -} - - -void FastCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) { - BAILOUT("RegExpLiteral"); -} - - -void FastCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) { - BAILOUT("ObjectLiteral"); -} - - -void FastCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) { - BAILOUT("ArrayLiteral"); -} - - -void FastCodeGenSyntaxChecker::VisitCatchExtensionObject( - CatchExtensionObject* expr) { - BAILOUT("CatchExtensionObject"); -} - - -void FastCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) { - // Simple assignments to (named) this properties are supported. - if (expr->op() != Token::ASSIGN) BAILOUT("Non-simple assignment"); - - Property* prop = expr->target()->AsProperty(); - if (prop == NULL) BAILOUT("Non-property assignment"); - VariableProxy* proxy = prop->obj()->AsVariableProxy(); - if (proxy == NULL || !proxy->var()->is_this()) { - BAILOUT("Non-this-property assignment"); - } - if (!prop->key()->IsPropertyName()) { - BAILOUT("Non-named-property assignment"); - } - - // We will only specialize for fields on the object itself. - // Expression::IsPropertyName implies that the name is a literal - // symbol but we do not assume that. - Literal* key = prop->key()->AsLiteral(); - if (key != NULL && key->handle()->IsString()) { - Handle<Object> receiver = info()->receiver(); - Handle<String> name = Handle<String>::cast(key->handle()); - LookupResult lookup; - receiver->Lookup(*name, &lookup); - if (!lookup.IsProperty()) { - BAILOUT("Assigned property not found at compile time"); - } - if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment"); - if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment"); - } else { - UNREACHABLE(); - BAILOUT("Unexpected non-string-literal property key"); - } - - Visit(expr->value()); -} - - -void FastCodeGenSyntaxChecker::VisitThrow(Throw* expr) { - BAILOUT("Throw"); -} - - -void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) { - // We support named this property references. - VariableProxy* proxy = expr->obj()->AsVariableProxy(); - if (proxy == NULL || !proxy->var()->is_this()) { - BAILOUT("Non-this-property reference"); - } - if (!expr->key()->IsPropertyName()) { - BAILOUT("Non-named-property reference"); - } - - // We will only specialize for fields on the object itself. - // Expression::IsPropertyName implies that the name is a literal - // symbol but we do not assume that. - Literal* key = expr->key()->AsLiteral(); - if (key != NULL && key->handle()->IsString()) { - Handle<Object> receiver = info()->receiver(); - Handle<String> name = Handle<String>::cast(key->handle()); - LookupResult lookup; - receiver->Lookup(*name, &lookup); - if (!lookup.IsProperty()) { - BAILOUT("Referenced property not found at compile time"); - } - if (lookup.holder() != *receiver) BAILOUT("Non-own property reference"); - if (!lookup.type() == FIELD) BAILOUT("Non-field property reference"); - } else { - UNREACHABLE(); - BAILOUT("Unexpected non-string-literal property key"); - } -} - - -void FastCodeGenSyntaxChecker::VisitCall(Call* expr) { - BAILOUT("Call"); -} - - -void FastCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) { - BAILOUT("CallNew"); -} - - -void FastCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) { - BAILOUT("CallRuntime"); -} - - -void FastCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) { - BAILOUT("UnaryOperation"); -} - - -void FastCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) { - BAILOUT("CountOperation"); -} - - -void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) { - // We support bitwise OR. - switch (expr->op()) { - case Token::COMMA: - BAILOUT("BinaryOperation COMMA"); - case Token::OR: - BAILOUT("BinaryOperation OR"); - case Token::AND: - BAILOUT("BinaryOperation AND"); - - case Token::BIT_OR: - // We support expressions nested on the left because they only require - // a pair of registers to keep all intermediate values in registers - // (i.e., the expression stack has height no more than two). - if (!expr->right()->IsLeaf()) BAILOUT("expression nested on right"); - - // We do not allow subexpressions with side effects because we - // (currently) bail out to the beginning of the full function. The - // only expressions with side effects that we would otherwise handle - // are assignments. - if (expr->left()->AsAssignment() != NULL || - expr->right()->AsAssignment() != NULL) { - BAILOUT("subexpression of binary operation has side effects"); - } - - Visit(expr->left()); - CHECK_BAILOUT; - Visit(expr->right()); - break; - - case Token::BIT_XOR: - BAILOUT("BinaryOperation BIT_XOR"); - case Token::BIT_AND: - BAILOUT("BinaryOperation BIT_AND"); - case Token::SHL: - BAILOUT("BinaryOperation SHL"); - case Token::SAR: - BAILOUT("BinaryOperation SAR"); - case Token::SHR: - BAILOUT("BinaryOperation SHR"); - case Token::ADD: - BAILOUT("BinaryOperation ADD"); - case Token::SUB: - BAILOUT("BinaryOperation SUB"); - case Token::MUL: - BAILOUT("BinaryOperation MUL"); - case Token::DIV: - BAILOUT("BinaryOperation DIV"); - case Token::MOD: - BAILOUT("BinaryOperation MOD"); - default: - UNREACHABLE(); - } -} - - -void FastCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) { - BAILOUT("CompareOperation"); -} - - -void FastCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) { - BAILOUT("ThisFunction"); -} - -#undef BAILOUT -#undef CHECK_BAILOUT - - -#define __ ACCESS_MASM(masm()) - -Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) { - // Label the AST before calling MakeCodePrologue, so AST node numbers are - // printed with the AST. - AstLabeler labeler; - labeler.Label(info); - - CodeGenerator::MakeCodePrologue(info); - - const int kInitialBufferSize = 4 * KB; - MacroAssembler masm(NULL, kInitialBufferSize); - - // Generate the fast-path code. - FastCodeGenerator fast_cgen(&masm); - fast_cgen.Generate(info); - if (fast_cgen.HasStackOverflow()) { - ASSERT(!Top::has_pending_exception()); - return Handle<Code>::null(); - } - - // Generate the full code for the function in bailout mode, using the same - // macro assembler. - CodeGenerator cgen(&masm); - CodeGeneratorScope scope(&cgen); - info->set_mode(CompilationInfo::SECONDARY); - cgen.Generate(info); - if (cgen.HasStackOverflow()) { - ASSERT(!Top::has_pending_exception()); - return Handle<Code>::null(); - } - - Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP); - return CodeGenerator::MakeCodeEpilogue(&masm, flags, info); -} - - -Register FastCodeGenerator::accumulator0() { return eax; } -Register FastCodeGenerator::accumulator1() { return edx; } -Register FastCodeGenerator::scratch0() { return ecx; } -Register FastCodeGenerator::scratch1() { return edi; } -Register FastCodeGenerator::receiver_reg() { return ebx; } -Register FastCodeGenerator::context_reg() { return esi; } - - -void FastCodeGenerator::EmitLoadReceiver() { - // Offset 2 is due to return address and saved frame pointer. - int index = 2 + function()->scope()->num_parameters(); - __ mov(receiver_reg(), Operand(ebp, index * kPointerSize)); -} - - -void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) { - ASSERT(!destination().is(no_reg)); - ASSERT(cell->IsJSGlobalPropertyCell()); - - __ mov(destination(), Immediate(cell)); - __ mov(destination(), - FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset)); - if (FLAG_debug_code) { - __ cmp(destination(), Factory::the_hole_value()); - __ Check(not_equal, "DontDelete cells can't contain the hole"); - } - - // The loaded value is not known to be a smi. - clear_as_smi(destination()); -} - - -void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) { - LookupResult lookup; - info()->receiver()->Lookup(*name, &lookup); - - ASSERT(lookup.holder() == *info()->receiver()); - ASSERT(lookup.type() == FIELD); - Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map()); - int index = lookup.GetFieldIndex() - map->inobject_properties(); - int offset = index * kPointerSize; - - // We will emit the write barrier unless the stored value is statically - // known to be a smi. - bool needs_write_barrier = !is_smi(accumulator0()); - - // Perform the store. Negative offsets are inobject properties. - if (offset < 0) { - offset += map->instance_size(); - __ mov(FieldOperand(receiver_reg(), offset), accumulator0()); - if (needs_write_barrier) { - // Preserve receiver from write barrier. - __ mov(scratch0(), receiver_reg()); - } - } else { - offset += FixedArray::kHeaderSize; - __ mov(scratch0(), - FieldOperand(receiver_reg(), JSObject::kPropertiesOffset)); - __ mov(FieldOperand(scratch0(), offset), accumulator0()); - } - - if (needs_write_barrier) { - if (destination().is(no_reg)) { - // After RecordWrite accumulator0 is only accidently a smi, but it is - // already marked as not known to be one. - __ RecordWrite(scratch0(), offset, accumulator0(), scratch1()); - } else { - // Copy the value to the other accumulator to preserve a copy from the - // write barrier. One of the accumulators is available as a scratch - // register. Neither is a smi. - __ mov(accumulator1(), accumulator0()); - clear_as_smi(accumulator1()); - Register value_scratch = other_accumulator(destination()); - __ RecordWrite(scratch0(), offset, value_scratch, scratch1()); - } - } else if (destination().is(accumulator1())) { - __ mov(accumulator1(), accumulator0()); - // Is a smi because we do not need the write barrier. - set_as_smi(accumulator1()); - } -} - - -void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) { - ASSERT(!destination().is(no_reg)); - LookupResult lookup; - info()->receiver()->Lookup(*name, &lookup); - - ASSERT(lookup.holder() == *info()->receiver()); - ASSERT(lookup.type() == FIELD); - Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map()); - int index = lookup.GetFieldIndex() - map->inobject_properties(); - int offset = index * kPointerSize; - - // Perform the load. Negative offsets are inobject properties. - if (offset < 0) { - offset += map->instance_size(); - __ mov(destination(), FieldOperand(receiver_reg(), offset)); - } else { - offset += FixedArray::kHeaderSize; - __ mov(scratch0(), - FieldOperand(receiver_reg(), JSObject::kPropertiesOffset)); - __ mov(destination(), FieldOperand(scratch0(), offset)); - } - - // The loaded value is not known to be a smi. - clear_as_smi(destination()); -} - - -void FastCodeGenerator::EmitBitOr() { - if (is_smi(accumulator0()) && is_smi(accumulator1())) { - // If both operands are known to be a smi then there is no need to check - // the operands or result. There is no need to perform the operation in - // an effect context. - if (!destination().is(no_reg)) { - // Leave the result in the destination register. Bitwise or is - // commutative. - __ or_(destination(), Operand(other_accumulator(destination()))); - } - } else { - // Left is in accumulator1, right in accumulator0. - Label* bailout = NULL; - if (destination().is(accumulator0())) { - __ mov(scratch0(), accumulator0()); - __ or_(destination(), Operand(accumulator1())); // Or is commutative. - __ test(destination(), Immediate(kSmiTagMask)); - bailout = info()->AddBailout(accumulator1(), scratch0()); // Left, right. - } else if (destination().is(accumulator1())) { - __ mov(scratch0(), accumulator1()); - __ or_(destination(), Operand(accumulator0())); - __ test(destination(), Immediate(kSmiTagMask)); - bailout = info()->AddBailout(scratch0(), accumulator0()); - } else { - ASSERT(destination().is(no_reg)); - __ mov(scratch0(), accumulator1()); - __ or_(scratch0(), Operand(accumulator0())); - __ test(scratch0(), Immediate(kSmiTagMask)); - bailout = info()->AddBailout(accumulator1(), accumulator0()); - } - __ j(not_zero, bailout, not_taken); - } - - // If we didn't bailout, the result (in fact, both inputs too) is known to - // be a smi. - set_as_smi(accumulator0()); - set_as_smi(accumulator1()); -} - - -void FastCodeGenerator::Generate(CompilationInfo* compilation_info) { - ASSERT(info_ == NULL); - info_ = compilation_info; - Comment cmnt(masm_, "[ function compiled by fast code generator"); - - // Save the caller's frame pointer and set up our own. - Comment prologue_cmnt(masm(), ";; Prologue"); - __ push(ebp); - __ mov(ebp, esp); - __ push(esi); // Context. - __ push(edi); // Closure. - // Note that we keep a live register reference to esi (context) at this - // point. - - Label* bailout_to_beginning = info()->AddBailout(); - // Receiver (this) is allocated to a fixed register. - if (info()->has_this_properties()) { - Comment cmnt(masm(), ";; MapCheck(this)"); - if (FLAG_print_ir) { - PrintF("#: MapCheck(this)\n"); - } - ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject()); - Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver()); - Handle<Map> map(object->map()); - EmitLoadReceiver(); - __ CheckMap(receiver_reg(), map, bailout_to_beginning, false); - } - - // If there is a global variable access check if the global object is the - // same as at lazy-compilation time. - if (info()->has_globals()) { - Comment cmnt(masm(), ";; MapCheck(GLOBAL)"); - if (FLAG_print_ir) { - PrintF("#: MapCheck(GLOBAL)\n"); - } - ASSERT(info()->has_global_object()); - Handle<Map> map(info()->global_object()->map()); - __ mov(scratch0(), CodeGenerator::GlobalObject()); - __ CheckMap(scratch0(), map, bailout_to_beginning, true); - } - - VisitStatements(function()->body()); - - Comment return_cmnt(masm(), ";; Return(<undefined>)"); - if (FLAG_print_ir) { - PrintF("#: Return(<undefined>)\n"); - } - __ mov(eax, Factory::undefined_value()); - __ mov(esp, ebp); - __ pop(ebp); - __ ret((scope()->num_parameters() + 1) * kPointerSize); -} - - -void FastCodeGenerator::VisitDeclaration(Declaration* decl) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitBlock(Block* stmt) { - VisitStatements(stmt->statements()); -} - - -void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) { - Visit(stmt->expression()); -} - - -void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) { - // Nothing to do. -} - - -void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitForStatement(ForStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitConditional(Conditional* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitSlot(Slot* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { - ASSERT(expr->var()->is_global() && !expr->var()->is_this()); - // Check if we can compile a global variable load directly from the cell. - ASSERT(info()->has_global_object()); - LookupResult lookup; - info()->global_object()->Lookup(*expr->name(), &lookup); - // We only support normal (non-accessor/interceptor) DontDelete properties - // for now. - ASSERT(lookup.IsProperty()); - ASSERT_EQ(NORMAL, lookup.type()); - ASSERT(lookup.IsDontDelete()); - Handle<Object> cell(info()->global_object()->GetPropertyCell(&lookup)); - - // Global variable lookups do not have side effects, so we do not need to - // emit code if we are in an effect context. - if (!destination().is(no_reg)) { - Comment cmnt(masm(), ";; Global"); - if (FLAG_print_ir) { - SmartPointer<char> name = expr->name()->ToCString(); - PrintF("%d: t%d = Global(%s)\n", expr->num(), - expr->num(), *name); - } - EmitGlobalVariableLoad(cell); - } -} - - -void FastCodeGenerator::VisitLiteral(Literal* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitAssignment(Assignment* expr) { - // Known to be a simple this property assignment. Effectively a unary - // operation. - { Register my_destination = destination(); - set_destination(accumulator0()); - Visit(expr->value()); - set_destination(my_destination); - } - - Property* prop = expr->target()->AsProperty(); - ASSERT_NOT_NULL(prop); - ASSERT_NOT_NULL(prop->obj()->AsVariableProxy()); - ASSERT(prop->obj()->AsVariableProxy()->var()->is_this()); - ASSERT(prop->key()->IsPropertyName()); - Handle<String> name = - Handle<String>::cast(prop->key()->AsLiteral()->handle()); - - Comment cmnt(masm(), ";; Store to this"); - if (FLAG_print_ir) { - SmartPointer<char> name_string = name->ToCString(); - PrintF("%d: ", expr->num()); - if (!destination().is(no_reg)) PrintF("t%d = ", expr->num()); - PrintF("Store(this, \"%s\", t%d)\n", *name_string, - expr->value()->num()); - } - - EmitThisPropertyStore(name); -} - - -void FastCodeGenerator::VisitThrow(Throw* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitProperty(Property* expr) { - ASSERT_NOT_NULL(expr->obj()->AsVariableProxy()); - ASSERT(expr->obj()->AsVariableProxy()->var()->is_this()); - ASSERT(expr->key()->IsPropertyName()); - if (!destination().is(no_reg)) { - Handle<String> name = - Handle<String>::cast(expr->key()->AsLiteral()->handle()); - - Comment cmnt(masm(), ";; Load from this"); - if (FLAG_print_ir) { - SmartPointer<char> name_string = name->ToCString(); - PrintF("%d: t%d = Load(this, \"%s\")\n", - expr->num(), expr->num(), *name_string); - } - EmitThisPropertyLoad(name); - } -} - - -void FastCodeGenerator::VisitCall(Call* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitCallNew(CallNew* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitCountOperation(CountOperation* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { - // We support limited binary operations: bitwise OR only allowed to be - // nested on the left. - ASSERT(expr->op() == Token::BIT_OR); - ASSERT(expr->right()->IsLeaf()); - - { Register my_destination = destination(); - set_destination(accumulator1()); - Visit(expr->left()); - set_destination(accumulator0()); - Visit(expr->right()); - set_destination(my_destination); - } - - Comment cmnt(masm(), ";; BIT_OR"); - if (FLAG_print_ir) { - PrintF("%d: ", expr->num()); - if (!destination().is(no_reg)) PrintF("t%d = ", expr->num()); - PrintF("BIT_OR(t%d, t%d)\n", expr->left()->num(), expr->right()->num()); - } - EmitBitOr(); -} - - -void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) { - UNREACHABLE(); -} - -#undef __ - - -} } // namespace v8::internal - -#endif // V8_TARGET_ARCH_IA32 diff --git a/src/ia32/fast-codegen-ia32.h b/src/ia32/fast-codegen-ia32.h deleted file mode 100644 index e0851afe..00000000 --- a/src/ia32/fast-codegen-ia32.h +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_FAST_CODEGEN_IA32_H_ -#define V8_FAST_CODEGEN_IA32_H_ - -#include "v8.h" - -#include "ast.h" -#include "compiler.h" -#include "list.h" - -namespace v8 { -namespace internal { - -class FastCodeGenSyntaxChecker: public AstVisitor { - public: - explicit FastCodeGenSyntaxChecker() - : info_(NULL), has_supported_syntax_(true) { - } - - void Check(CompilationInfo* info); - - CompilationInfo* info() { return info_; } - bool has_supported_syntax() { return has_supported_syntax_; } - - private: - void VisitDeclarations(ZoneList<Declaration*>* decls); - void VisitStatements(ZoneList<Statement*>* stmts); - - // AST node visit functions. -#define DECLARE_VISIT(type) virtual void Visit##type(type* node); - AST_NODE_LIST(DECLARE_VISIT) -#undef DECLARE_VISIT - - CompilationInfo* info_; - bool has_supported_syntax_; - - DISALLOW_COPY_AND_ASSIGN(FastCodeGenSyntaxChecker); -}; - - -class FastCodeGenerator: public AstVisitor { - public: - explicit FastCodeGenerator(MacroAssembler* masm) - : masm_(masm), info_(NULL), destination_(no_reg), smi_bits_(0) { - } - - static Handle<Code> MakeCode(CompilationInfo* info); - - void Generate(CompilationInfo* compilation_info); - - private: - MacroAssembler* masm() { return masm_; } - CompilationInfo* info() { return info_; } - - Register destination() { return destination_; } - void set_destination(Register reg) { destination_ = reg; } - - FunctionLiteral* function() { return info_->function(); } - Scope* scope() { return info_->scope(); } - - // Platform-specific fixed registers, all guaranteed distinct. - Register accumulator0(); - Register accumulator1(); - Register scratch0(); - Register scratch1(); - Register receiver_reg(); - Register context_reg(); - - Register other_accumulator(Register reg) { - ASSERT(reg.is(accumulator0()) || reg.is(accumulator1())); - return (reg.is(accumulator0())) ? accumulator1() : accumulator0(); - } - - // Flags are true if the respective register is statically known to hold a - // smi. We do not track every register, only the accumulator registers. - bool is_smi(Register reg) { - ASSERT(!reg.is(no_reg)); - return (smi_bits_ & reg.bit()) != 0; - } - void set_as_smi(Register reg) { - ASSERT(!reg.is(no_reg)); - smi_bits_ = smi_bits_ | reg.bit(); - } - void clear_as_smi(Register reg) { - ASSERT(!reg.is(no_reg)); - smi_bits_ = smi_bits_ & ~reg.bit(); - } - - // AST node visit functions. -#define DECLARE_VISIT(type) virtual void Visit##type(type* node); - AST_NODE_LIST(DECLARE_VISIT) -#undef DECLARE_VISIT - - // Emit code to load the receiver from the stack into receiver_reg. - void EmitLoadReceiver(); - - // Emit code to load a global variable directly from a global property - // cell into the destination register. - void EmitGlobalVariableLoad(Handle<Object> cell); - - // Emit a store to an own property of this. The stored value is expected - // in accumulator0 and the receiver in receiver_reg. The receiver - // register is preserved and the result (the stored value) is left in the - // destination register. - void EmitThisPropertyStore(Handle<String> name); - - // Emit a load from an own property of this. The receiver is expected in - // receiver_reg. The receiver register is preserved and the result is - // left in the destination register. - void EmitThisPropertyLoad(Handle<String> name); - - // Emit a bitwise or operation. The left operand is in accumulator1 and - // the right is in accumulator0. The result should be left in the - // destination register. - void EmitBitOr(); - - MacroAssembler* masm_; - CompilationInfo* info_; - - Register destination_; - uint32_t smi_bits_; - - DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator); -}; - - -} } // namespace v8::internal - -#endif // V8_FAST_CODEGEN_IA32_H_ diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index eb944e6f..68a0a960 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -54,97 +54,95 @@ namespace internal { // // The function builds a JS frame. Please see JavaScriptFrameConstants in // frames-ia32.h for its layout. -void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) { +void FullCodeGenerator::Generate(CompilationInfo* info) { ASSERT(info_ == NULL); info_ = info; SetFunctionPosition(function()); Comment cmnt(masm_, "[ function compiled by full code generator"); - if (mode == PRIMARY) { - __ push(ebp); // Caller's frame pointer. - __ mov(ebp, esp); - __ push(esi); // Callee's context. - __ push(edi); // Callee's JS Function. - - { Comment cmnt(masm_, "[ Allocate locals"); - int locals_count = scope()->num_stack_slots(); - if (locals_count == 1) { - __ push(Immediate(Factory::undefined_value())); - } else if (locals_count > 1) { - __ mov(eax, Immediate(Factory::undefined_value())); - for (int i = 0; i < locals_count; i++) { - __ push(eax); - } + __ push(ebp); // Caller's frame pointer. + __ mov(ebp, esp); + __ push(esi); // Callee's context. + __ push(edi); // Callee's JS Function. + + { Comment cmnt(masm_, "[ Allocate locals"); + int locals_count = scope()->num_stack_slots(); + if (locals_count == 1) { + __ push(Immediate(Factory::undefined_value())); + } else if (locals_count > 1) { + __ mov(eax, Immediate(Factory::undefined_value())); + for (int i = 0; i < locals_count; i++) { + __ push(eax); } } + } - bool function_in_register = true; + bool function_in_register = true; - // Possibly allocate a local context. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - if (heap_slots > 0) { - Comment cmnt(masm_, "[ Allocate local context"); - // Argument to NewContext is the function, which is still in edi. - __ push(edi); - if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); - __ CallStub(&stub); - } else { - __ CallRuntime(Runtime::kNewContext, 1); - } - function_in_register = false; - // Context is returned in both eax and esi. It replaces the context - // passed to us. It's saved in the stack and kept live in esi. - __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); - - // Copy parameters into context if necessary. - int num_parameters = scope()->num_parameters(); - for (int i = 0; i < num_parameters; i++) { - Slot* slot = scope()->parameter(i)->slot(); - if (slot != NULL && slot->type() == Slot::CONTEXT) { - int parameter_offset = StandardFrameConstants::kCallerSPOffset + - (num_parameters - 1 - i) * kPointerSize; - // Load parameter from stack. - __ mov(eax, Operand(ebp, parameter_offset)); - // Store it in the context. - int context_offset = Context::SlotOffset(slot->index()); - __ mov(Operand(esi, context_offset), eax); - // Update the write barrier. This clobbers all involved - // registers, so we have use a third register to avoid - // clobbering esi. - __ mov(ecx, esi); - __ RecordWrite(ecx, context_offset, eax, ebx); - } + // Possibly allocate a local context. + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment cmnt(masm_, "[ Allocate local context"); + // Argument to NewContext is the function, which is still in edi. + __ push(edi); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kNewContext, 1); + } + function_in_register = false; + // Context is returned in both eax and esi. It replaces the context + // passed to us. It's saved in the stack and kept live in esi. + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); + + // Copy parameters into context if necessary. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Slot* slot = scope()->parameter(i)->slot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ mov(eax, Operand(ebp, parameter_offset)); + // Store it in the context. + int context_offset = Context::SlotOffset(slot->index()); + __ mov(Operand(esi, context_offset), eax); + // Update the write barrier. This clobbers all involved + // registers, so we have use a third register to avoid + // clobbering esi. + __ mov(ecx, esi); + __ RecordWrite(ecx, context_offset, eax, ebx); } } + } - Variable* arguments = scope()->arguments()->AsVariable(); - if (arguments != NULL) { - // Function uses arguments object. - Comment cmnt(masm_, "[ Allocate arguments object"); - if (function_in_register) { - __ push(edi); - } else { - __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); - } - // Receiver is just before the parameters on the caller's stack. - int offset = scope()->num_parameters() * kPointerSize; - __ lea(edx, - Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset)); - __ push(edx); - __ push(Immediate(Smi::FromInt(scope()->num_parameters()))); - // Arguments to ArgumentsAccessStub: - // function, receiver address, parameter count. - // The stub will rewrite receiver and parameter count if the previous - // stack frame was an arguments adapter frame. - ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); - __ CallStub(&stub); - __ mov(ecx, eax); // Duplicate result. - Move(arguments->slot(), eax, ebx, edx); - Slot* dot_arguments_slot = - scope()->arguments_shadow()->AsVariable()->slot(); - Move(dot_arguments_slot, ecx, ebx, edx); + Variable* arguments = scope()->arguments()->AsVariable(); + if (arguments != NULL) { + // Function uses arguments object. + Comment cmnt(masm_, "[ Allocate arguments object"); + if (function_in_register) { + __ push(edi); + } else { + __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); } + // Receiver is just before the parameters on the caller's stack. + int offset = scope()->num_parameters() * kPointerSize; + __ lea(edx, + Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset)); + __ push(edx); + __ push(Immediate(Smi::FromInt(scope()->num_parameters()))); + // Arguments to ArgumentsAccessStub: + // function, receiver address, parameter count. + // The stub will rewrite receiver and parameter count if the previous + // stack frame was an arguments adapter frame. + ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); + __ CallStub(&stub); + __ mov(ecx, eax); // Duplicate result. + Move(arguments->slot(), eax, ebx, edx); + Slot* dot_arguments_slot = + scope()->arguments_shadow()->AsVariable()->slot(); + Move(dot_arguments_slot, ecx, ebx, edx); } { Comment cmnt(masm_, "[ Declarations"); @@ -1048,7 +1046,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ push(ecx); // Enumerable. __ push(ebx); // Current entry. __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); - __ cmp(eax, Factory::null_value()); + __ test(eax, Operand(eax)); __ j(equal, loop_statement.continue_target()); __ mov(ebx, Operand(eax)); @@ -1328,12 +1326,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset)); __ push(Immediate(Smi::FromInt(expr->literal_index()))); __ push(Immediate(expr->constant_elements())); - if (expr->depth() > 1) { + if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) { + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); + __ CallStub(&stub); + __ IncrementCounter(&Counters::cow_arrays_created_stub, 1); + } else if (expr->depth() > 1) { __ CallRuntime(Runtime::kCreateArrayLiteral, 3); - } else if (length > FastCloneShallowArrayStub::kMaximumLength) { + } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - FastCloneShallowArrayStub stub(length); + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::CLONE_ELEMENTS, length); __ CallStub(&stub); } @@ -2054,6 +2058,25 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) { } +void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( + ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + + // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only + // used in a few functions in runtime.js which should not normally be hit by + // this compiler. + __ jmp(if_false); + Apply(context_, if_true, if_false); +} + + void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc index 2cd41a15..283ae4dc 100644 --- a/src/ia32/ic-ia32.cc +++ b/src/ia32/ic-ia32.cc @@ -452,6 +452,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, // Loads an indexed element from a fast case array. +// If not_fast_array is NULL, doesn't perform the elements map check. static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -468,8 +469,12 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, // we fall through. __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset)); - // Check that the object is in fast mode (not dictionary). - __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true); + if (not_fast_array != NULL) { + // Check that the object is in fast mode and writable. + __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true); + } else { + __ AssertFastElements(scratch); + } // Check that the key (index) is within bounds. __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset)); __ j(above_equal, out_of_range); @@ -558,12 +563,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { GenerateKeyedLoadReceiverCheck( masm, edx, ecx, Map::kHasIndexedInterceptor, &slow); + // Check the "has fast elements" bit in the receiver's map which is + // now in ecx. + __ test_b(FieldOperand(ecx, Map::kBitField2Offset), + 1 << Map::kHasFastElements); + __ j(zero, &check_pixel_array, not_taken); + GenerateFastArrayLoad(masm, edx, eax, ecx, eax, - &check_pixel_array, + NULL, &slow); __ IncrementCounter(&Counters::keyed_load_generic_smi, 1); __ ret(0); @@ -572,7 +583,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Check whether the elements is a pixel array. // edx: receiver // eax: key - // ecx: elements + __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset)); __ mov(ebx, eax); __ SmiUntag(ebx); __ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true); @@ -967,7 +978,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // edx: JSObject // ecx: key (a smi) __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - // Check that the object is in fast mode (not dictionary). + // Check that the object is in fast mode and writable. __ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true); __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); __ j(below, &fast, taken); @@ -1023,8 +1034,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ jmp(&fast); // Array case: Get the length and the elements array from the JS - // array. Check that the array is in fast mode; if it is the - // length is always a smi. + // array. Check that the array is in fast mode (and writable); if it + // is the length is always a smi. __ bind(&array); // eax: value // edx: receiver, a JSArray @@ -1872,6 +1883,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) { __ j(not_equal, &miss, not_taken); // Check that elements are FixedArray. + // We rely on StoreIC_ArrayLength below to deal with all types of + // fast elements (including COW). __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset)); __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch); __ j(not_equal, &miss, not_taken); diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc index d0eeb773..658caf1e 100644 --- a/src/ia32/macro-assembler-ia32.cc +++ b/src/ia32/macro-assembler-ia32.cc @@ -373,7 +373,13 @@ void MacroAssembler::AbortIfNotNumber(Register object) { void MacroAssembler::AbortIfNotSmi(Register object) { test(object, Immediate(kSmiTagMask)); - Assert(equal, "Operand not a smi"); + Assert(equal, "Operand is not a smi"); +} + + +void MacroAssembler::AbortIfSmi(Register object) { + test(object, Immediate(kSmiTagMask)); + Assert(not_equal, "Operand is a smi"); } @@ -1292,7 +1298,7 @@ void MacroAssembler::InvokeFunction(Register fun, mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); SmiUntag(ebx); - mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); + mov(edx, FieldOperand(edi, JSFunction::kCodeOffset)); lea(edx, FieldOperand(edx, Code::kHeaderSize)); ParameterCount expected(ebx); @@ -1344,8 +1350,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { // Make sure the code objects in the builtins object and in the // builtin function are the same. push(target); - mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset)); + mov(target, FieldOperand(edi, JSFunction::kCodeOffset)); cmp(target, Operand(esp, 0)); Assert(equal, "Builtin code object changed"); pop(target); @@ -1459,6 +1464,21 @@ void MacroAssembler::Assert(Condition cc, const char* msg) { } +void MacroAssembler::AssertFastElements(Register elements) { + if (FLAG_debug_code) { + Label ok; + cmp(FieldOperand(elements, HeapObject::kMapOffset), + Immediate(Factory::fixed_array_map())); + j(equal, &ok); + cmp(FieldOperand(elements, HeapObject::kMapOffset), + Immediate(Factory::fixed_cow_array_map())); + j(equal, &ok); + Abort("JSObject with fast elements map has slow elements"); + bind(&ok); + } +} + + void MacroAssembler::Check(Condition cc, const char* msg) { Label L; j(cc, &L, taken); @@ -1510,6 +1530,59 @@ void MacroAssembler::Abort(const char* msg) { } +void MacroAssembler::JumpIfNotNumber(Register reg, + TypeInfo info, + Label* on_not_number) { + if (FLAG_debug_code) AbortIfSmi(reg); + if (!info.IsNumber()) { + cmp(FieldOperand(reg, HeapObject::kMapOffset), + Factory::heap_number_map()); + j(not_equal, on_not_number); + } +} + + +void MacroAssembler::ConvertToInt32(Register dst, + Register source, + Register scratch, + TypeInfo info, + Label* on_not_int32) { + if (FLAG_debug_code) { + AbortIfSmi(source); + AbortIfNotNumber(source); + } + if (info.IsInteger32()) { + cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset)); + } else { + Label done; + bool push_pop = (scratch.is(no_reg) && dst.is(source)); + ASSERT(!scratch.is(source)); + if (push_pop) { + push(dst); + scratch = dst; + } + if (scratch.is(no_reg)) scratch = dst; + cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset)); + cmp(scratch, 0x80000000u); + if (push_pop) { + j(not_equal, &done); + pop(dst); + jmp(on_not_int32); + } else { + j(equal, on_not_int32); + } + + bind(&done); + if (push_pop) { + add(Operand(esp), Immediate(kPointerSize)); // Pop. + } + if (!scratch.is(dst)) { + mov(dst, scratch); + } + } +} + + void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( Register instance_type, Register scratch, diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h index a17a2b4c..c23f6873 100644 --- a/src/ia32/macro-assembler-ia32.h +++ b/src/ia32/macro-assembler-ia32.h @@ -29,6 +29,7 @@ #define V8_IA32_MACRO_ASSEMBLER_IA32_H_ #include "assembler.h" +#include "type-info.h" namespace v8 { namespace internal { @@ -225,12 +226,44 @@ class MacroAssembler: public Assembler { sar(reg, kSmiTagSize); } + // Modifies the register even if it does not contain a Smi! + void SmiUntag(Register reg, TypeInfo info, Label* non_smi) { + ASSERT(kSmiTagSize == 1); + sar(reg, kSmiTagSize); + if (info.IsSmi()) { + ASSERT(kSmiTag == 0); + j(carry, non_smi); + } + } + + // Modifies the register even if it does not contain a Smi! + void SmiUntag(Register reg, Label* is_smi) { + ASSERT(kSmiTagSize == 1); + sar(reg, kSmiTagSize); + ASSERT(kSmiTag == 0); + j(not_carry, is_smi); + } + + // Assumes input is a heap object. + void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number); + + // Assumes input is a heap number. Jumps on things out of range. Also jumps + // on the min negative int32. Ignores frational parts. + void ConvertToInt32(Register dst, + Register src, // Can be the same as dst. + Register scratch, // Can be no_reg or dst, but not src. + TypeInfo info, + Label* on_not_int32); + // Abort execution if argument is not a number. Used in debug code. void AbortIfNotNumber(Register object); // Abort execution if argument is not a smi. Used in debug code. void AbortIfNotSmi(Register object); + // Abort execution if argument is a smi. Used in debug code. + void AbortIfSmi(Register object); + // --------------------------------------------------------------------------- // Exception handling @@ -475,6 +508,8 @@ class MacroAssembler: public Assembler { // Use --debug_code to enable. void Assert(Condition cc, const char* msg); + void AssertFastElements(Register elements); + // Like Assert(), but always enabled. void Check(Condition cc, const char* msg); diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc index c21dd4f0..c6c65f07 100644 --- a/src/ia32/stub-cache-ia32.cc +++ b/src/ia32/stub-cache-ia32.cc @@ -1255,30 +1255,6 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, } -// TODO(1241006): Avoid having lazy compile stubs specialized by the -// number of arguments. It is not needed anymore. -Object* StubCompiler::CompileLazyCompile(Code::Flags flags) { - // Enter an internal frame. - __ EnterInternalFrame(); - - // Push a copy of the function onto the stack. - __ push(edi); - - __ push(edi); // function is also the parameter to the runtime call - __ CallRuntime(Runtime::kLazyCompile, 1); - __ pop(edi); - - // Tear down temporary frame. - __ LeaveInternalFrame(); - - // Do a tail-call of the compiled function. - __ lea(ecx, FieldOperand(eax, Code::kHeaderSize)); - __ jmp(Operand(ecx)); - - return GetCodeWithFlags(flags, "LazyCompileStub"); -} - - void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) { if (kind_ == Code::KEYED_CALL_IC) { __ cmp(Operand(ecx), Immediate(Handle<String>(name))); @@ -1390,16 +1366,18 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); __ ret((argc + 1) * kPointerSize); } else { + Label call_builtin; + // Get the elements array of the object. __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset)); - // Check that the elements are in fast mode (not dictionary). + // Check that the elements are in fast mode and writable. __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), Immediate(Factory::fixed_array_map())); - __ j(not_equal, &miss); + __ j(not_equal, &call_builtin); if (argc == 1) { // Otherwise fall through to call builtin. - Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements; + Label exit, with_write_barrier, attempt_to_grow_elements; // Get the array's length into eax and calculate new length. __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); @@ -1480,10 +1458,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, // Elements are in new space, so write barrier is not required. __ ret((argc + 1) * kPointerSize); - - __ bind(&call_builtin); } + __ bind(&call_builtin); __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush), argc + 1, 1); @@ -1535,10 +1512,10 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, // Get the elements array of the object. __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset)); - // Check that the elements are in fast mode (not dictionary). + // Check that the elements are in fast mode and writable. __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), Immediate(Factory::fixed_array_map())); - __ j(not_equal, &miss); + __ j(not_equal, &call_builtin); // Get the array's length into ecx and calculate new length. __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset)); @@ -1595,6 +1572,9 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object, // -- esp[(argc + 1) * 4] : receiver // ----------------------------------- + // If object is not a string, bail out to regular call. + if (!object->IsString()) return Heap::undefined_value(); + const int argc = arguments().immediate(); Label miss; @@ -1605,6 +1585,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object, GenerateDirectLoadGlobalFunctionPrototype(masm(), Context::STRING_FUNCTION_INDEX, eax); + ASSERT(object != holder); CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, ebx, edx, edi, name, &miss); @@ -1659,6 +1640,9 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object, // -- esp[(argc + 1) * 4] : receiver // ----------------------------------- + // If object is not a string, bail out to regular call. + if (!object->IsString()) return Heap::undefined_value(); + const int argc = arguments().immediate(); Label miss; @@ -1670,6 +1654,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object, GenerateDirectLoadGlobalFunctionPrototype(masm(), Context::STRING_FUNCTION_INDEX, eax); + ASSERT(object != holder); CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, ebx, edx, edi, name, &miss); diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h index e00626b7..b9faa461 100644 --- a/src/ia32/virtual-frame-ia32.h +++ b/src/ia32/virtual-frame-ia32.h @@ -139,6 +139,22 @@ class VirtualFrame: public ZoneObject { if (is_used(reg)) SpillElementAt(register_location(reg)); } + // Make the two registers distinct and spill them. Returns the second + // register. If the registers were not distinct then it returns the new + // second register. + Result MakeDistinctAndSpilled(Result* left, Result* right) { + Spill(left->reg()); + Spill(right->reg()); + if (left->reg().is(right->reg())) { + RegisterAllocator* allocator = cgen()->allocator(); + Result fresh = allocator->Allocate(); + ASSERT(fresh.is_valid()); + masm()->mov(fresh.reg(), right->reg()); + return fresh; + } + return *right; + } + // Spill all occurrences of an arbitrary register if possible. Return the // register spilled or no_reg if it was not possible to free any register // (ie, they all have frame-external references). |