summaryrefslogtreecommitdiffstats
path: root/src/arm
diff options
context:
space:
mode:
authorKristian Monsen <kristianm@google.com>2010-06-28 14:14:28 +0100
committerKristian Monsen <kristianm@google.com>2010-07-02 09:44:56 +0100
commit9dcf7e2f83591d471e88bf7d230651900b8e424b (patch)
tree0a26792d5c298ecf46ab9be2252662fee5628f66 /src/arm
parenta94adf74b8a91ff002b9cade1736e5c4a50d52fb (diff)
downloadandroid_external_v8-9dcf7e2f83591d471e88bf7d230651900b8e424b.tar.gz
android_external_v8-9dcf7e2f83591d471e88bf7d230651900b8e424b.tar.bz2
android_external_v8-9dcf7e2f83591d471e88bf7d230651900b8e424b.zip
Update V8 to r4924 as required by WebKit r61871
Change-Id: Ic819dad0c1c9e035b8ffd306c96656ba87c5e85a
Diffstat (limited to 'src/arm')
-rw-r--r--src/arm/assembler-arm-inl.h5
-rw-r--r--src/arm/assembler-arm.cc122
-rw-r--r--src/arm/assembler-arm.h44
-rw-r--r--src/arm/builtins-arm.cc20
-rw-r--r--src/arm/codegen-arm.cc415
-rw-r--r--src/arm/codegen-arm.h10
-rw-r--r--src/arm/constants-arm.h3
-rw-r--r--src/arm/disasm-arm.cc22
-rw-r--r--src/arm/fast-codegen-arm.cc3
-rw-r--r--src/arm/full-codegen-arm.cc76
-rw-r--r--src/arm/ic-arm.cc465
-rw-r--r--src/arm/macro-assembler-arm.cc125
-rw-r--r--src/arm/macro-assembler-arm.h23
-rw-r--r--src/arm/simulator-arm.cc9
-rw-r--r--src/arm/stub-cache-arm.cc28
-rw-r--r--src/arm/virtual-frame-arm.cc1
-rw-r--r--src/arm/virtual-frame-arm.h5
17 files changed, 985 insertions, 391 deletions
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 8ca91265..114ec234 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -45,11 +45,6 @@
namespace v8 {
namespace internal {
-Condition NegateCondition(Condition cc) {
- ASSERT(cc != al);
- return static_cast<Condition>(cc ^ ne);
-}
-
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 025f28e5..f8d98db9 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -279,6 +279,25 @@ const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
+const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
+const Instr kMovMvnPattern = 0xd * B21;
+const Instr kMovMvnFlip = B22;
+const Instr kMovLeaveCCMask = 0xdff * B16;
+const Instr kMovLeaveCCPattern = 0x1a0 * B16;
+const Instr kMovwMask = 0xff * B20;
+const Instr kMovwPattern = 0x30 * B20;
+const Instr kMovwLeaveCCFlip = 0x5 * B21;
+const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
+const Instr kCmpCmnPattern = 0x15 * B20;
+const Instr kCmpCmnFlip = B21;
+const Instr kALUMask = 0x6f * B21;
+const Instr kAddPattern = 0x4 * B21;
+const Instr kSubPattern = 0x2 * B21;
+const Instr kBicPattern = 0xe * B21;
+const Instr kAndPattern = 0x0 * B21;
+const Instr kAddSubFlip = 0x6 * B21;
+const Instr kAndBicFlip = 0xe * B21;
+
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kRdMask = 0x0000f000;
static const int kRdShift = 12;
@@ -375,6 +394,12 @@ void Assembler::Align(int m) {
}
+void Assembler::CodeTargetAlign() {
+ // Preferred alignment of jump targets on some ARM chips.
+ Align(8);
+}
+
+
bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
@@ -626,7 +651,16 @@ void Assembler::next(Label* L) {
}
+static Instr EncodeMovwImmediate(uint32_t immediate) {
+ ASSERT(immediate < 0x10000);
+ return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+}
+
+
// Low-level code emission routines depending on the addressing mode.
+// If this returns true then you have to use the rotate_imm and immed_8
+// that it returns, because it may have already changed the instruction
+// to match them!
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
@@ -640,11 +674,43 @@ static bool fits_shifter(uint32_t imm32,
return true;
}
}
- // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
- if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= 0x2*B21;
- return true;
+ // If the opcode is one with a complementary version and the complementary
+ // immediate fits, change the opcode.
+ if (instr != NULL) {
+ if ((*instr & kMovMvnMask) == kMovMvnPattern) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kMovMvnFlip;
+ return true;
+ } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ if (imm32 < 0x10000) {
+ *instr ^= kMovwLeaveCCFlip;
+ *instr |= EncodeMovwImmediate(imm32);
+ *rotate_imm = *immed_8 = 0; // Not used for movw.
+ return true;
+ }
+ }
+ }
+ } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
+ if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kCmpCmnFlip;
+ return true;
+ }
+ } else {
+ Instr alu_insn = (*instr & kALUMask);
+ if (alu_insn == kAddPattern ||
+ alu_insn == kSubPattern) {
+ if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kAddSubFlip;
+ return true;
+ }
+ } else if (alu_insn == kAndPattern ||
+ alu_insn == kBicPattern) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kAndBicFlip;
+ return true;
+ }
+ }
}
}
return false;
@@ -655,7 +721,7 @@ static bool fits_shifter(uint32_t imm32,
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-static bool MustUseIp(RelocInfo::Mode rmode) {
+static bool MustUseConstantPool(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
@@ -670,6 +736,14 @@ static bool MustUseIp(RelocInfo::Mode rmode) {
}
+bool Operand::is_single_instruction() const {
+ if (rm_.is_valid()) return true;
+ if (MustUseConstantPool(rmode_)) return false;
+ uint32_t dummy1, dummy2;
+ return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
+}
+
+
void Assembler::addrmod1(Instr instr,
Register rn,
Register rd,
@@ -680,19 +754,34 @@ void Assembler::addrmod1(Instr instr,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (MustUseIp(x.rmode_) ||
+ if (MustUseConstantPool(x.rmode_) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]'.
- RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask);
if ((instr & ~CondMask) == 13*B21) { // mov, S not set
- ldr(rd, MemOperand(pc, 0), cond);
+ if (MustUseConstantPool(x.rmode_) ||
+ !CpuFeatures::IsSupported(ARMv7)) {
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ ldr(rd, MemOperand(pc, 0), cond);
+ } else {
+ // Will probably use movw, will certainly not use constant pool.
+ mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
+ movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ }
} else {
- ldr(ip, MemOperand(pc, 0), cond);
+ // If this is not a mov or mvn instruction we may still be able to avoid
+ // a constant pool entry by using mvn or movw.
+ if (!MustUseConstantPool(x.rmode_) &&
+ (instr & kMovMvnMask) != kMovMvnPattern) {
+ mov(ip, x, LeaveCC, cond);
+ } else {
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ ldr(ip, MemOperand(pc, 0), cond);
+ }
addrmod1(instr, rn, rd, Operand(ip));
}
return;
@@ -1003,6 +1092,17 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
}
+void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
+ ASSERT(immediate < 0x10000);
+ mov(reg, Operand(immediate), LeaveCC, cond);
+}
+
+
+void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
+ emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
+}
+
+
void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 14*B21 | s, src1, dst, src2);
@@ -1183,7 +1283,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (MustUseIp(src.rmode_) ||
+ if (MustUseConstantPool(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index e5d42f9a..869227a7 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -279,7 +279,10 @@ enum Condition {
// Returns the equivalent of !cc.
-INLINE(Condition NegateCondition(Condition cc));
+inline Condition NegateCondition(Condition cc) {
+ ASSERT(cc != al);
+ return static_cast<Condition>(cc ^ ne);
+}
// Corresponds to transposing the operands of a comparison.
@@ -418,6 +421,15 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
+ // Return true of this operand fits in one instruction so that no
+ // 2-instruction solution with a load into the ip register is necessary.
+ bool is_single_instruction() const;
+
+ inline int32_t immediate() const {
+ ASSERT(!rm_.is_valid());
+ return imm32_;
+ }
+
Register rm() const { return rm_; }
private:
@@ -532,6 +544,27 @@ extern const Instr kLdrPCPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
+extern const Instr kMovMvnMask;
+extern const Instr kMovMvnPattern;
+extern const Instr kMovMvnFlip;
+
+extern const Instr kMovLeaveCCMask;
+extern const Instr kMovLeaveCCPattern;
+extern const Instr kMovwMask;
+extern const Instr kMovwPattern;
+extern const Instr kMovwLeaveCCFlip;
+
+extern const Instr kCmpCmnMask;
+extern const Instr kCmpCmnPattern;
+extern const Instr kCmpCmnFlip;
+
+extern const Instr kALUMask;
+extern const Instr kAddPattern;
+extern const Instr kSubPattern;
+extern const Instr kAndPattern;
+extern const Instr kBicPattern;
+extern const Instr kAddSubFlip;
+extern const Instr kAndBicFlip;
class Assembler : public Malloced {
public:
@@ -670,6 +703,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
// Branch instructions
void b(int branch_offset, Condition cond = al);
@@ -748,6 +783,13 @@ class Assembler : public Malloced {
mov(dst, Operand(src), s, cond);
}
+ // ARMv7 instructions for loading a 32 bit immediate in two instructions.
+ // This may actually emit a different mov instruction, but on an ARMv7 it
+ // is guaranteed to only emit one instruction.
+ void movw(Register reg, uint32_t immediate, Condition cond = al);
+ // The constant for movt should be in the range 0-0xffff.
+ void movt(Register reg, uint32_t immediate, Condition cond = al);
+
void bic(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index ddbb9777..b1f29ba3 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -136,7 +136,8 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
- __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+ ASSERT(kSmiTag == 0);
+ __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
@@ -240,9 +241,10 @@ static void AllocateJSArray(MacroAssembler* masm,
FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
- __ and_(elements_array_storage,
- elements_array_storage,
- Operand(~kHeapObjectTagMask));
+ ASSERT(kSmiTag == 0);
+ __ sub(elements_array_storage,
+ elements_array_storage,
+ Operand(kHeapObjectTag));
// Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
@@ -617,12 +619,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ and_(r6,
- r0,
- Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
- __ add(r3, r3, Operand(r6, LSR, Map::kPreAllocatedPropertyFieldsByte * 8));
- __ and_(r6, r0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
- __ sub(r3, r3, Operand(r6, LSR, Map::kInObjectPropertiesByte * 8), SetCC);
+ __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
+ __ add(r3, r3, Operand(r6));
+ __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
+ __ sub(r3, r3, Operand(r6), SetCC);
// Done if no extra properties are to be allocated.
__ b(eq, &allocated);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 1ca236d1..8e87614c 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -268,8 +268,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Load the offset into r3.
int slot_offset =
FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(slot_offset));
- __ RecordWrite(r2, r3, r1);
+ __ RecordWrite(r2, Operand(slot_offset), r3, r1);
}
}
}
@@ -342,56 +341,27 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
}
- // Generate the return sequence if necessary.
- if (has_valid_frame() || function_return_.is_linked()) {
- if (!function_return_.is_linked()) {
- CodeForReturnPosition(info->function());
- }
- // exit
- // r0: result
- // sp: stack pointer
- // fp: frame pointer
- // cp: callee's context
+ // Handle the return from the function.
+ if (has_valid_frame()) {
+ // If there is a valid frame, control flow can fall off the end of
+ // the body. In that case there is an implicit return statement.
+ ASSERT(!function_return_is_shadowed_);
+ frame_->PrepareForReturn();
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-
+ if (function_return_.is_bound()) {
+ function_return_.Jump();
+ } else {
+ function_return_.Bind();
+ GenerateReturnSequence();
+ }
+ } else if (function_return_.is_linked()) {
+ // If the return target has dangling jumps to it, then we have not
+ // yet generated the return sequence. This can happen when (a)
+ // control does not flow off the end of the body so we did not
+ // compile an artificial return statement just above, and (b) there
+ // are return statements in the body but (c) they are all shadowed.
function_return_.Bind();
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns the parameter as it is.
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
-
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- // Make sure that the constant pool is not emitted inside of the return
- // sequence.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Tear down the frame which will restore the caller's frame pointer and
- // the link register.
- frame_->Exit();
-
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
-
-#ifdef DEBUG
- // Check that the size of the code used for returning matches what is
- // expected by the debugger. If the sp_delts above cannot be encoded in
- // the add instruction the add will generate two instructions.
- int return_sequence_length =
- masm_->InstructionsGeneratedSince(&check_exit_codesize);
- CHECK(return_sequence_length ==
- Assembler::kJSReturnSequenceInstructions ||
- return_sequence_length ==
- Assembler::kJSReturnSequenceInstructions + 1);
-#endif
- }
+ GenerateReturnSequence();
}
// Adjust for function-level loop nesting.
@@ -1203,7 +1173,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
frame_->EmitPush(tos, TypeInfo::Smi());
@@ -1215,7 +1185,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
deferred->BindExit();
@@ -1958,8 +1928,56 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
// returning thus making it easier to merge.
frame_->EmitPop(r0);
frame_->PrepareForReturn();
+ if (function_return_.is_bound()) {
+ // If the function return label is already bound we reuse the
+ // code by jumping to the return site.
+ function_return_.Jump();
+ } else {
+ function_return_.Bind();
+ GenerateReturnSequence();
+ }
+ }
+}
- function_return_.Jump();
+
+void CodeGenerator::GenerateReturnSequence() {
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns the parameter as it is.
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence.
+ { Assembler::BlockConstPoolScope block_const_pool(masm_);
+ // Tear down the frame which will restore the caller's frame pointer and
+ // the link register.
+ frame_->Exit();
+
+ // Here we use masm_-> instead of the __ macro to avoid the code coverage
+ // tool from instrumenting as we rely on the code size here.
+ int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+ masm_->add(sp, sp, Operand(sp_delta));
+ masm_->Jump(lr);
+ DeleteFrame();
+
+#ifdef DEBUG
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger. If the sp_delts above cannot be encoded in
+ // the add instruction the add will generate two instructions.
+ int return_sequence_length =
+ masm_->InstructionsGeneratedSince(&check_exit_codesize);
+ CHECK(return_sequence_length ==
+ Assembler::kJSReturnSequenceInstructions ||
+ return_sequence_length ==
+ Assembler::kJSReturnSequenceInstructions + 1);
+#endif
}
}
@@ -3090,9 +3108,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
exit.Branch(eq);
// scratch is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(offset));
// r1 could be identical with tos, but that doesn't matter.
- __ RecordWrite(scratch, r3, r1);
+ __ RecordWrite(scratch, Operand(offset), r3, r1);
}
// If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole
@@ -3445,8 +3462,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
__ str(r0, FieldMemOperand(r1, offset));
// Update the write barrier for the array address.
- __ mov(r3, Operand(offset));
- __ RecordWrite(r1, r3, r2);
+ __ RecordWrite(r1, Operand(offset), r3, r2);
}
ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -4069,28 +4085,34 @@ void CodeGenerator::VisitCall(Call* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Load(property->obj());
- if (!property->is_synthetic()) {
- // Duplicate receiver for later use.
- __ ldr(r0, MemOperand(sp, 0));
- frame_->EmitPush(r0);
- }
- Load(property->key());
- EmitKeyedLoad();
- // Put the function below the receiver.
if (property->is_synthetic()) {
+ Load(property->key());
+ EmitKeyedLoad();
+ // Put the function below the receiver.
// Use the global receiver.
frame_->EmitPush(r0); // Function.
LoadGlobalReceiver(r0);
+ // Call the function.
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
+ frame_->EmitPush(r0);
} else {
- // Switch receiver and function.
- frame_->EmitPop(r1); // Receiver.
- frame_->EmitPush(r0); // Function.
- frame_->EmitPush(r1); // Receiver.
- }
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- frame_->EmitPush(r0);
+ // Set the name register and call the IC initialization code.
+ Load(property->key());
+ frame_->EmitPop(r2); // Function name.
+
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeKeyedCallInitialize(arg_count, in_loop);
+ CodeForSourcePosition(node->position());
+ frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
+ __ ldr(cp, frame_->Context());
+ frame_->EmitPush(r0);
+ }
}
} else {
@@ -4254,8 +4276,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
// Store the value.
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier.
- __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
// Leave.
leave.Bind();
frame_->EmitPush(r0);
@@ -4685,7 +4706,8 @@ void CodeGenerator::GenerateRandomHeapNumber(
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
- __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
@@ -6628,8 +6650,12 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
// Gets the wrong answer for 0, but we already checked for that case above.
__ CountLeadingZeros(source_, mantissa, zeros_);
// Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here.
- __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
+ // We use mantissa as a scratch register here. Use a fudge factor to
+ // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
+ // that fit in the ARM's constant field.
+ int fudge = 0x400;
+ __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
+ __ add(mantissa, mantissa, Operand(fudge));
__ orr(exponent,
exponent,
Operand(mantissa, LSL, HeapNumber::kExponentShift));
@@ -6702,15 +6728,12 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
- Register exp_mask_reg = r5;
__ cmp(r0, r1);
__ b(ne, &not_identical);
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
if (cc != eq || !never_nan_nan) {
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
-
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
@@ -6771,8 +6794,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Read top bits of double representation (second word of value).
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
- __ and_(r3, r2, Operand(exp_mask_reg));
- __ cmp(r3, Operand(exp_mask_reg));
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
__ b(ne, &return_equal);
// Shift out flag and all exponent bits, retaining only mantissa.
@@ -6893,14 +6917,14 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
Register rhs_mantissa = exp_first ? r1 : r0;
Register lhs_mantissa = exp_first ? r3 : r2;
Label one_is_nan, neither_is_nan;
- Label lhs_not_nan_exp_mask_is_loaded;
-
- Register exp_mask_reg = r5;
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
- __ cmp(r4, Operand(exp_mask_reg));
- __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
+ __ Sbfx(r4,
+ lhs_exponent,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r4, Operand(-1));
+ __ b(ne, lhs_not_nan);
__ mov(r4,
Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
@@ -6909,10 +6933,12 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
__ b(ne, &one_is_nan);
__ bind(lhs_not_nan);
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ bind(&lhs_not_nan_exp_mask_is_loaded);
- __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
- __ cmp(r4, Operand(exp_mask_reg));
+ __ Sbfx(r4,
+ rhs_exponent,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r4, Operand(-1));
__ b(ne, &neither_is_nan);
__ mov(r4,
Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
@@ -7178,7 +7204,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::Generate(MacroAssembler* masm) {
- __ RecordWriteHelper(object_, offset_, scratch_);
+ __ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_);
__ Ret();
}
@@ -7338,12 +7364,16 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
+ Register heap_number_map = r6;
if (ShouldGenerateSmiCode()) {
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
// Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate.
- // The new heap number is in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow);
+ // The new heap number is in r5. r3 and r7 are scratch.
+ __ AllocateHeapNumber(
+ r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
@@ -7356,14 +7386,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ vmov(s13, r7);
__ vcvt_f64_s32(d6, s13);
} else {
- // Write Smi from rhs to r3 and r2 in double format. r6 is scratch.
+ // Write Smi from rhs to r3 and r2 in double format. r3 is scratch.
__ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ ConvertToDoubleStub stub1(r3, r2, r7, r9);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from lhs to r1 and r0 in double format. r6 is scratch.
+ // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
__ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ ConvertToDoubleStub stub2(r1, r0, r7, r9);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
@@ -7372,6 +7402,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
// We branch here if at least one of r0 and r1 is not a Smi.
__ bind(not_smi);
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// After this point we have the left hand side in r1 and the right hand side
// in r0.
@@ -7394,18 +7425,22 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
default:
break;
}
+ // Restore heap number map register.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
if (mode_ == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
}
// Move r0 to a double in r2-r3.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
@@ -7423,7 +7458,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&r0_is_smi);
if (mode_ == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (use_fp_registers) {
@@ -7435,7 +7470,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r6);
+ ConvertToDoubleStub stub3(r3, r2, r7, r4);
__ push(lr);
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
@@ -7448,6 +7483,8 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm);
+ // Restore heap number map register.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&r1_is_smi);
}
@@ -7457,7 +7494,9 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ bind(&r1_is_not_smi);
- __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
@@ -7475,7 +7514,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&r1_is_smi);
if (mode_ == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (use_fp_registers) {
@@ -7487,7 +7526,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r6);
+ ConvertToDoubleStub stub4(r1, r0, r7, r9);
__ push(lr);
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
@@ -7548,13 +7587,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
}
}
-
if (lhs.is(r0)) {
__ b(&slow);
__ bind(&slow_reverse);
__ Swap(r0, r1, ip);
}
+ heap_number_map = no_reg; // Don't use this any more from here on.
+
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
@@ -7633,7 +7673,10 @@ static void GetInt32(MacroAssembler* masm,
// Get exponent word.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
- __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+ __ Ubfx(scratch2,
+ scratch,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
__ mov(dest, Operand(0));
@@ -7641,9 +7684,14 @@ static void GetInt32(MacroAssembler* masm,
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
// the exponent that we are fastest at and also the highest exponent we can
// handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(scratch2, Operand(non_smi_exponent));
+ const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
+ // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
+ // split it up to avoid a constant pool entry. You can't do that in general
+ // for cmp because of the overflow flag, but we know the exponent is in the
+ // range 0-2047 so there is no overflow.
+ int fudge_factor = 0x400;
+ __ sub(scratch2, scratch2, Operand(fudge_factor));
+ __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
// If we have a match of the int32-but-not-Smi exponent then skip some logic.
__ b(eq, &right_exponent);
// If the exponent is higher than that then go to slow case. This catches
@@ -7653,17 +7701,14 @@ static void GetInt32(MacroAssembler* masm,
// We know the exponent is smaller than 30 (biased). If it is less than
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
// it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
+ const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
+ __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
if (!CpuFeatures::IsSupported(VFP3)) {
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- __ rsb(dest, dest, Operand(30));
+ // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
+ // get how much to shift down.
+ __ rsb(dest, scratch2, Operand(30));
}
__ bind(&right_exponent);
if (CpuFeatures::IsSupported(VFP3)) {
@@ -7715,9 +7760,13 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
Label rhs_is_smi, lhs_is_smi;
Label done_checking_rhs, done_checking_lhs;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
__ tst(lhs, Operand(kSmiTagMask));
__ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, lhs, r3, r5, r4, &slow);
__ jmp(&done_checking_lhs);
@@ -7727,7 +7776,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ tst(rhs, Operand(kSmiTagMask));
__ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, rhs, r2, r5, r4, &slow);
__ jmp(&done_checking_rhs);
@@ -7787,8 +7837,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
break;
}
case NO_OVERWRITE: {
- // Get a new heap number in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
default: break;
}
@@ -7807,8 +7857,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate);
- // Get a new heap number in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
__ jmp(&got_a_heap_number);
}
@@ -7934,10 +7984,11 @@ const char* GenericBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s",
+ "GenericBinaryOpStub_%s_%s%s_%s",
op_name,
overwrite_name,
- specialized_on_rhs_ ? "_ConstantRhs" : 0);
+ specialized_on_rhs_ ? "_ConstantRhs" : "",
+ BinaryOpIC::GetName(runtime_operands_type_));
return name_;
}
@@ -8130,6 +8181,28 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
__ Ret();
__ bind(&smi_is_unsuitable);
+ } else if (op_ == Token::MOD &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS) {
+ // Do generate a bit of smi code for modulus even though the default for
+ // modulus is not to do it, but as the ARM processor has no coprocessor
+ // support for modulus checking for smis makes sense.
+ Label slow;
+ ASSERT(!ShouldGenerateSmiCode());
+ ASSERT(kSmiTag == 0); // Adjust code below.
+ // Check for two positive smis.
+ __ orr(smi_test_reg, lhs, Operand(rhs));
+ __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
+ __ b(ne, &slow);
+ // Check that rhs is a power of two and not zero.
+ __ sub(scratch, rhs, Operand(1), SetCC);
+ __ b(mi, &slow);
+ __ tst(rhs, scratch);
+ __ b(ne, &slow);
+ // Calculate power of two modulus.
+ __ and_(result, lhs, Operand(scratch));
+ __ Ret();
+ __ bind(&slow);
}
HandleBinaryOpSlowCases(
masm,
@@ -8276,20 +8349,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&loaded);
// r2 = low 32 bits of double value
// r3 = high 32 bits of double value
- // Compute hash:
+ // Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, LSR, 16));
- __ eor(r1, r1, Operand(r1, LSR, 8));
+ __ eor(r1, r1, Operand(r1, ASR, 16));
+ __ eor(r1, r1, Operand(r1, ASR, 8));
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- if (CpuFeatures::IsSupported(ARMv7)) {
- const int kTranscendentalCacheSizeBits = 9;
- ASSERT_EQ(1 << kTranscendentalCacheSizeBits,
- TranscendentalCache::kCacheSize);
- __ ubfx(r1, r1, 0, kTranscendentalCacheSizeBits);
- } else {
- __ and_(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
- }
+ __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
// r2 = low 32 bits of double value.
// r3 = high 32 bits of double value.
@@ -8364,6 +8430,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
if (op_ == Token::SUB) {
// Check whether the value is a smi.
Label try_float;
@@ -8384,7 +8453,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(&done);
__ bind(&try_float);
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
__ b(ne, &slow);
// r0 is a heap number. Get a new heap number in r1.
if (overwrite_) {
@@ -8392,7 +8463,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else {
- __ AllocateHeapNumber(r1, r2, r3, &slow);
+ __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
@@ -8402,7 +8473,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
}
} else if (op_ == Token::BIT_NOT) {
// Check if the operand is a heap number.
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
@@ -8422,7 +8495,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
// Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case
// that needs the value in r0.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ mov(r0, Operand(r2));
}
@@ -9248,15 +9321,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// regexp_data: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
Label seq_string;
- const int kStringRepresentationEncodingMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- __ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
- // First check for sequential string.
- ASSERT_EQ(0, kStringTag);
- ASSERT_EQ(0, kSeqStringTag);
- __ tst(r1, Operand(kIsNotStringMask | kStringRepresentationMask));
+ // First check for flat string.
+ __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
+ ASSERT_EQ(0, kStringTag | kSeqStringTag);
__ b(eq, &seq_string);
// subject: Subject string
@@ -9266,8 +9335,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
- __ and_(r0, r0, Operand(kStringRepresentationMask));
- __ cmp(r0, Operand(kConsStringTag));
+ ASSERT(kExternalStringTag !=0);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
__ b(ne, &runtime);
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
__ LoadRoot(r1, Heap::kEmptyStringRootIndex);
@@ -9276,25 +9346,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ // Is first part a flat string?
ASSERT_EQ(0, kSeqStringTag);
__ tst(r0, Operand(kStringRepresentationMask));
__ b(nz, &runtime);
- __ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
__ bind(&seq_string);
- // r1: suject string type & kStringRepresentationEncodingMask
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
- // Check that the irregexp code has been generated for an ascii string. If
- // it has, the field contains a code object otherwise it contains the hole.
-#ifdef DEBUG
- const int kSeqAsciiString = kStringTag | kSeqStringTag | kAsciiStringTag;
- const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
- CHECK_EQ(4, kSeqAsciiString);
- CHECK_EQ(0, kSeqTwoByteString);
-#endif
+ // r0: Instance type of subject string
+ ASSERT_EQ(4, kAsciiStringTag);
+ ASSERT_EQ(0, kTwoByteStringTag);
// Find the code object based on the assumptions above.
- __ mov(r3, Operand(r1, ASR, 2), SetCC);
+ __ and_(r0, r0, Operand(kStringEncodingMask));
+ __ mov(r3, Operand(r0, ASR, 2), SetCC);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
@@ -9412,17 +9477,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
__ mov(r3, last_match_info_elements); // Moved up to reduce latency.
- __ mov(r2, Operand(RegExpImpl::kLastSubjectOffset)); // Ditto.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, r2, r7);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
__ mov(r3, last_match_info_elements);
- __ mov(r2, Operand(RegExpImpl::kLastInputOffset));
- __ RecordWrite(r3, r2, r7);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -10524,13 +10587,14 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
- Label non_ascii, allocated;
+ Label non_ascii, allocated, ascii_data;
ASSERT_EQ(0, kTwoByteStringTag);
__ tst(r4, Operand(kStringEncodingMask));
__ tst(r5, Operand(kStringEncodingMask), ne);
__ b(eq, &non_ascii);
// Allocate an ASCII cons string.
+ __ bind(&ascii_data);
__ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
@@ -10542,6 +10606,19 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // r4: first instance type.
+ // r5: second instance type.
+ __ tst(r4, Operand(kAsciiDataHintMask));
+ __ tst(r5, Operand(kAsciiDataHintMask), ne);
+ __ b(ne, &ascii_data);
+ __ eor(r4, r4, Operand(r5));
+ ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ b(eq, &ascii_data);
+
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
__ jmp(&allocated);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 91adff0f..be4d5561 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -315,6 +315,12 @@ class CodeGenerator: public AstVisitor {
// Main code generation function
void Generate(CompilationInfo* info);
+ // Generate the return sequence code. Should be called no more than
+ // once per compiled function, immediately after binding the return
+ // target (which can not be done more than once). The return value should
+ // be in r0.
+ void GenerateReturnSequence();
+
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode();
@@ -663,7 +669,9 @@ class GenericBinaryOpStub : public CodeStub {
}
void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index e36f595c..fa9adbd7 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -284,6 +284,9 @@ class Instr {
// with immediate
inline int RotateField() const { return Bits(11, 8); }
inline int Immed8Field() const { return Bits(7, 0); }
+ inline int Immed4Field() const { return Bits(19, 16); }
+ inline int ImmedMovwMovtField() const {
+ return Immed4Field() << 12 | Offset12Field(); }
// Fields used in Load/Store instructions
inline int PUField() const { return Bits(24, 23); }
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 1c05bc3a..40053699 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -101,6 +101,7 @@ class Decoder {
void PrintSRegister(int reg);
void PrintDRegister(int reg);
int FormatVFPRegister(Instr* instr, const char* format);
+ void PrintMovwMovt(Instr* instr);
int FormatVFPinstruction(Instr* instr, const char* format);
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
@@ -375,6 +376,16 @@ int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
}
+// Print the movw or movt instruction.
+void Decoder::PrintMovwMovt(Instr* instr) {
+ int imm = instr->ImmedMovwMovtField();
+ int rd = instr->RdField();
+ PrintRegister(rd);
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", imm);
+}
+
+
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
@@ -430,7 +441,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
return 1;
}
case 'm': {
- if (format[1] == 'e') { // 'memop: load/store instructions
+ if (format[1] == 'w') {
+ // 'mw: movt/movw instructions.
+ PrintMovwMovt(instr);
+ return 2;
+ }
+ if (format[1] == 'e') { // 'memop: load/store instructions.
ASSERT(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) {
Print("ldr");
@@ -776,7 +792,7 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "tst'cond 'rn, 'shift_op");
} else {
- Unknown(instr); // not used by V8
+ Format(instr, "movw'cond 'mw");
}
break;
}
@@ -794,7 +810,7 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "cmp'cond 'rn, 'shift_op");
} else {
- Unknown(instr); // not used by V8
+ Format(instr, "movt'cond 'mw");
}
break;
}
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 48eaf46a..36ac2aa3 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -102,8 +102,7 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
}
if (needs_write_barrier) {
- __ mov(scratch1(), Operand(offset));
- __ RecordWrite(scratch0(), scratch1(), scratch2());
+ __ RecordWrite(scratch0(), Operand(offset), scratch1(), scratch2());
}
if (destination().is(accumulator1())) {
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index e6196639..67328738 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -110,10 +110,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
__ mov(r1, Operand(Context::SlotOffset(slot->index())));
__ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
+ // registers, so we have to use two more registers to avoid
// clobbering cp.
__ mov(r2, Operand(cp));
- __ RecordWrite(r2, r1, r0);
+ __ RecordWrite(r2, Operand(r1), r3, r0);
}
}
}
@@ -666,8 +666,10 @@ void FullCodeGenerator::Move(Slot* dst,
__ str(src, location);
// Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) {
- __ mov(scratch2, Operand(Context::SlotOffset(dst->index())));
- __ RecordWrite(scratch1, scratch2, src);
+ __ RecordWrite(scratch1,
+ Operand(Context::SlotOffset(dst->index())),
+ scratch2,
+ src);
}
}
@@ -715,10 +717,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ str(result_register(),
CodeGenerator::ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index());
- __ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
__ mov(r1, Operand(cp));
- __ RecordWrite(r1, r2, result_register());
+ __ RecordWrite(r1, Operand(offset), r2, result_register());
}
break;
@@ -1252,8 +1253,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Update the write barrier for the array store with r0 as the scratch
// register.
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, result_register());
+ __ RecordWrite(r1, Operand(offset), r2, result_register());
}
if (result_saved) {
@@ -1493,8 +1493,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(offset), r2, r3);
break;
}
@@ -1648,6 +1647,30 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
}
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key,
+ RelocInfo::Mode mode) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ VisitForValue(key, kAccumulator);
+ __ mov(r2, r0);
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
+ in_loop);
+ __ Call(ic, mode);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ Apply(context_, r0);
+}
+
+
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
@@ -1743,35 +1766,28 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
- // Call to a keyed property, use keyed load IC followed by function
- // call.
+ // Call to a keyed property.
+ // For a synthetic property use keyed load IC followed by function call,
+ // for a regular property use keyed CallIC.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
if (prop->is_synthetic()) {
+ VisitForValue(prop->key(), kAccumulator);
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
__ pop(r1); // We do not need to keep the receiver.
- } else {
- __ ldr(r1, MemOperand(sp, 0)); // Keep receiver, to call function on.
- }
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- if (prop->is_synthetic()) {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
// Push result (function).
__ push(r0);
// Push Global receiver.
__ ldr(r1, CodeGenerator::GlobalObject());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ push(r1);
+ EmitCallWithStub(expr);
} else {
- // Pop receiver.
- __ pop(r1);
- // Push result (function).
- __ push(r0);
- __ push(r1);
+ EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
- EmitCallWithStub(expr);
}
} else {
// Call to some other expression. If the expression is an anonymous
@@ -2140,7 +2156,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
- __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
@@ -2259,8 +2276,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ bind(&done);
Apply(context_, r0);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index d0a32e81..c6de4d8e 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -167,16 +167,22 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register key,
+ Register result,
Register t0,
Register t1,
Register t2) {
// Register use:
//
- // elements - holds the slow-case elements of the receiver and is unchanged.
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
//
- // key - holds the smi key on entry and is unchanged if a branch is
- // performed to the miss label.
- // Holds the result on exit if the load succeeded.
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
//
// Scratch registers:
//
@@ -248,7 +254,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
- __ ldr(key, FieldMemOperand(t2, kValueOffset));
+ __ ldr(result, FieldMemOperand(t2, kValueOffset));
}
@@ -298,22 +304,159 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
}
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+ // Check that the object isn't a smi.
+ __ BranchOnSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+ __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ b(ne, slow);
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ cmp(scratch1, Operand(JS_OBJECT_TYPE));
+ __ b(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - holds the elements of the receiver on exit.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used to hold elements map and elements length.
+ // Holds the elements map if not_fast_array branch is taken.
+ //
+ // scratch2 - used to hold the loaded value.
+
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(scratch1, ip);
+ __ b(ne, not_fast_array);
+ // Check that the key (index) is within bounds.
+ __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(scratch1));
+ __ b(hs, out_of_range);
+ // Fast case: Do the load.
+ __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // The key is a smi.
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ ldr(scratch2,
+ MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, ip);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ b(eq, out_of_range);
+ __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if a key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_symbol) {
+ // The key is not a smi.
+ // Is it a string?
+ __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
+ __ b(ge, not_symbol);
+
+ // Is the string an array index, with cached numeric value?
+ __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
+ __ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
+ __ b(eq, index_string);
+
+ // Is the string a symbol?
+ // map: key map
+ __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kSymbolTag != 0);
+ __ tst(hash, Operand(kIsSymbolMask));
+ __ b(eq, not_symbol);
+}
+
+
+// Picks out an array index from the hash field.
+static void GenerateIndexFromHash(MacroAssembler* masm,
+ Register key,
+ Register hash) {
+ // Register use:
+ // key - holds the overwritten key on exit.
+ // hash - holds the key's hash. Clobbered.
+
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ // Here we actually clobber the key which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ __ mov(key, Operand(hash, LSL, kSmiTagSize));
+}
+
+
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind) {
// ----------- S t a t e -------------
+ // -- r1 : receiver
// -- r2 : name
- // -- lr : return address
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
// Probe the stub cache.
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// If the stub cache probing failed, the receiver might be a value.
@@ -355,9 +498,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
- // Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
}
@@ -390,7 +531,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -443,13 +584,11 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ CheckAccessGlobalProxy(r1, r0, &miss);
__ b(&invoke);
- // Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -465,7 +604,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
__ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
+ __ mov(r1, Operand(ExternalReference(IC_Utility(id))));
CEntryStub stub(1);
__ CallStub(&stub);
@@ -496,18 +635,165 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+}
+
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into r1.
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
+ GenerateMiss(masm, argc);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
+ GenerateMiss(masm, argc);
+}
+
+
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- UNREACHABLE();
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- UNREACHABLE();
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into r1.
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+
+ Label do_call, slow_call, slow_load, slow_reload_receiver;
+ Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+ Label index_smi, index_string;
+
+ // Check that the key is a smi.
+ __ BranchOnNotSmi(r2, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &slow_call);
+
+ GenerateFastArrayLoad(
+ masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
+ __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1, r0, r3);
+
+ __ bind(&do_call);
+ // receiver in r1 is not used after this point.
+ // r2: key
+ // r1: function
+
+ // Check that the value in r1 is a JSFunction.
+ __ BranchOnSmi(r1, &slow_call);
+ __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
+ __ b(ne, &slow_call);
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+
+ __ bind(&check_number_dictionary);
+ // r2: key
+ // r3: elements map
+ // r4: elements
+ // Check whether the elements is a number dictionary.
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r3, ip);
+ __ b(ne, &slow_load);
+ __ mov(r0, Operand(r2, ASR, kSmiTagSize));
+ // r0: untagged index
+ GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
+ __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1, r0, r3);
+ __ jmp(&do_call);
+
+ __ bind(&slow_load);
+ // This branch is taken when calling KeyedCallIC_Miss is neither required
+ // nor beneficial.
+ __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1, r0, r3);
+ __ EnterInternalFrame();
+ __ push(r2); // save the key
+ __ Push(r1, r2); // pass the receiver and the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(r2); // restore the key
+ __ LeaveInternalFrame();
+ __ mov(r1, r0);
+ __ jmp(&do_call);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
+
+ // The key is known to be a symbol.
+ // If the receiver is a regular JS object with slow properties then do
+ // a quick inline probe of the receiver's dictionary.
+ // Otherwise do the monomorphic cache probe.
+ GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &lookup_monomorphic_cache);
+
+ __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+ __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r3, ip);
+ __ b(ne, &lookup_monomorphic_cache);
+
+ GenerateDictionaryLoad(
+ masm, &slow_load, r1, r2, r1, r0, r3, r4, DICTIONARY_CHECK_DONE);
+ __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3);
+ __ jmp(&do_call);
+
+ __ bind(&lookup_monomorphic_cache);
+ __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1, r0, r3);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+ // Fall through on miss.
+
+ __ bind(&slow_call);
+ // This branch is taken if:
+ // - the receiver requires boxing or access check,
+ // - the key is neither smi nor symbol,
+ // - the value loaded is not a function,
+ // - there is hope that the runtime will create a monomorphic call stub
+ // that will get fetched next time.
+ __ IncrementCounter(&Counters::keyed_call_generic_slow, 1, r0, r3);
+ GenerateMiss(masm, argc);
+
+ __ bind(&index_string);
+ GenerateIndexFromHash(masm, r2, r3);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- UNREACHABLE();
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
+ GenerateMiss(masm, argc);
}
@@ -759,49 +1045,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Register key = r0;
Register receiver = r1;
- // Check that the object isn't a smi.
- __ BranchOnSmi(receiver, &slow);
- // Get the map of the receiver.
- __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r3, Operand(kSlowCaseBitFieldMask));
- __ b(ne, &slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(JS_OBJECT_TYPE));
- __ b(lt, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r3, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
- __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &check_pixel_array);
- // Check that the key (index) is within bounds.
- __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ cmp(key, Operand(r3));
- __ b(hs, &slow);
- // Fast case: Do the load.
- __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(r2, MemOperand(r3, key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ b(eq, &slow);
- __ mov(r0, r2);
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
__ Ret();
@@ -831,7 +1084,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &slow);
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
- GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
+ GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
@@ -840,24 +1093,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
- // The key is not a smi.
- // Is it a string?
- // r0: key
- // r1: receiver
- __ CompareObjectType(r0, r2, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, &slow);
-
- // Is the string an array index, with cached numeric value?
- __ ldr(r3, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r3, Operand(String::kContainsCachedArrayIndexMask));
- __ b(eq, &index_string);
-
- // Is the string a symbol?
- // r2: key map
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- ASSERT(kSymbolTag != 0);
- __ tst(r3, Operand(kIsSymbolMask));
- __ b(eq, &slow);
+ GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
@@ -873,7 +1109,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
- __ and_(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
+ __ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
@@ -918,25 +1154,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
__ Ret();
- __ b(&slow);
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- // r0: key (string)
- // r1: receiver
- // r3: hash field
- // We want the smi-tagged index in r0. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ and_(r3, r3, Operand(String::kArrayIndexValueMask));
- // Here we actually clobber the key (r0) which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- __ mov(r0, Operand(r3, ASR, String::kHashShift - kSmiTagSize));
+ GenerateIndexFromHash(masm, key, r3);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1120,7 +1339,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Use r0 for result as key is not needed any more.
- __ AllocateHeapNumber(r0, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r0, r3, r4, r6, &slow);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
@@ -1151,7 +1371,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
@@ -1188,7 +1409,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
- __ AllocateHeapNumber(r4, r5, r6, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
@@ -1204,7 +1426,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
@@ -1215,7 +1438,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
- __ AllocateHeapNumber(r3, r4, r5, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
@@ -1473,7 +1697,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ Ret(eq);
// Update write barrier for the elements array address.
__ sub(r4, r5, Operand(elements));
- __ RecordWrite(elements, r4, r5);
+ __ RecordWrite(elements, Operand(r4), r5, r6);
__ Ret();
}
@@ -1665,32 +1889,29 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
if (array_type == kExternalFloatArray) {
+ // vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
__ vcvt_f32_f64(s0, d0);
__ vmov(r5, s0);
__ str(r5, MemOperand(r3, r4, LSL, 2));
} else {
- Label done;
-
// Need to perform float-to-int conversion.
- // Test for NaN.
- __ vcmp(d0, d0);
- // Move vector status bits to normal status bits.
- __ vmrs(v8::internal::pc);
- __ mov(r5, Operand(0), LeaveCC, vs); // NaN converts to 0.
- __ b(vs, &done);
-
- // Test whether exponent equal to 0x7FF (infinity or NaN).
- __ vmov(r6, r7, d0);
- __ mov(r5, Operand(0x7FF00000));
- __ and_(r6, r6, Operand(r5));
- __ teq(r6, Operand(r5));
- __ mov(r6, Operand(0), LeaveCC, eq);
+ // Test for NaN or infinity (both give zero).
+ __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
+
+ // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+
+ __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs and Infinities have all-one exponents so they sign extend to -1.
+ __ cmp(r6, Operand(-1));
+ __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
@@ -1698,10 +1919,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
} else {
__ vcvt_u32_f64(s0, d0, ne);
}
-
__ vmov(r5, s0, ne);
- __ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6292b581..29e168c5 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -216,6 +216,71 @@ void MacroAssembler::Move(Register dst, Register src) {
}
+void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
+ Condition cond) {
+ if (!CpuFeatures::IsSupported(ARMv7) || src2.is_single_instruction()) {
+ and_(dst, src1, src2, LeaveCC, cond);
+ return;
+ }
+ int32_t immediate = src2.immediate();
+ if (immediate == 0) {
+ mov(dst, Operand(0), LeaveCC, cond);
+ return;
+ }
+ if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) {
+ ubfx(dst, src1, 0, WhichPowerOf2(immediate + 1), cond);
+ return;
+ }
+ and_(dst, src1, src2, LeaveCC, cond);
+}
+
+
+void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
+ Condition cond) {
+ ASSERT(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ and_(dst, src1, Operand(mask), LeaveCC, cond);
+ if (lsb != 0) {
+ mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
+ }
+ } else {
+ ubfx(dst, src1, lsb, width, cond);
+ }
+}
+
+
+void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
+ Condition cond) {
+ ASSERT(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ and_(dst, src1, Operand(mask), LeaveCC, cond);
+ int shift_up = 32 - lsb - width;
+ int shift_down = lsb + shift_up;
+ if (shift_up != 0) {
+ mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
+ }
+ if (shift_down != 0) {
+ mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
+ }
+ } else {
+ sbfx(dst, src1, lsb, width, cond);
+ }
+}
+
+
+void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
+ ASSERT(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ bic(dst, dst, Operand(mask));
+ } else {
+ bfc(dst, lsb, width, cond);
+ }
+}
+
+
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
@@ -245,31 +310,32 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::RecordWriteHelper(Register object,
- Register offset,
- Register scratch) {
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
if (FLAG_debug_code) {
// Check that the object is not in new space.
Label not_in_new_space;
- InNewSpace(object, scratch, ne, &not_in_new_space);
+ InNewSpace(object, scratch1, ne, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
}
- mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once.
-
- // Calculate region number.
- add(offset, object, Operand(offset)); // Add offset into the object.
- and_(offset, offset, Operand(ip)); // Offset into page of the object.
- mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
+ // Add offset into the object.
+ add(scratch0, object, offset);
// Calculate page address.
- bic(object, object, Operand(ip));
+ Bfc(object, 0, kPageSizeBits);
+
+ // Calculate region number.
+ Ubfx(scratch0, scratch0, Page::kRegionSizeLog2,
+ kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
- ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
- orr(scratch, scratch, Operand(ip, LSL, offset));
- str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ orr(scratch1, scratch1, Operand(ip, LSL, scratch0));
+ str(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
}
@@ -287,21 +353,23 @@ void MacroAssembler::InNewSpace(Register object,
// Will clobber 4 registers: object, offset, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object, Register offset,
- Register scratch) {
+void MacroAssembler::RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
Label done;
// First, test that the object is not in the new space. We cannot set
// region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
+ InNewSpace(object, scratch0, eq, &done);
// Record the actual write.
- RecordWriteHelper(object, offset, scratch);
+ RecordWriteHelper(object, offset, scratch0, scratch1);
bind(&done);
@@ -309,8 +377,8 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(offset, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
}
}
@@ -1460,6 +1528,16 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
}
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index) {
+ if (FLAG_debug_code) {
+ LoadRoot(ip, index);
+ cmp(reg, ip);
+ Check(eq, "Register did not match expected root");
+ }
+}
+
+
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
b(cc, &L);
@@ -1578,6 +1656,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
+ Register heap_number_map,
Label* gc_required) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
@@ -1588,9 +1667,9 @@ void MacroAssembler::AllocateHeapNumber(Register result,
gc_required,
TAG_OBJECT);
- // Get heap number map and store it in the allocated object.
- LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ // Store heap number map in the allocated object.
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 87f7b5fe..e02a6c8a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -93,6 +93,15 @@ class MacroAssembler: public Assembler {
Register scratch = no_reg,
Condition cond = al);
+
+ void And(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+ void Ubfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+ void Sbfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+ void Bfc(Register dst, int lsb, int width, Condition cond = al);
+
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// May do nothing if the registers are identical.
@@ -119,13 +128,19 @@ class MacroAssembler: public Assembler {
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object, Register offset, Register scratch);
+ void RecordWriteHelper(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
- // The 'scratch' register is used in the implementation and all 3 registers
+ // The 'scratch' registers are used in the implementation and all 3 registers
// are clobbered by the operation, as well as the ip register.
- void RecordWrite(Register object, Register offset, Register scratch);
+ void RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
@@ -364,6 +379,7 @@ class MacroAssembler: public Assembler {
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
+ Register heap_number_map,
Label* gc_required);
// ---------------------------------------------------------------------------
@@ -543,6 +559,7 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
+ void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 3bdca38e..77776c2b 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1859,7 +1859,9 @@ void Simulator::DecodeType01(Instr* instr) {
SetNZFlags(alu_out);
SetCFlag(shifter_carry_out);
} else {
- UNIMPLEMENTED();
+ // Format(instr, "movw'cond 'rd, 'imm").
+ alu_out = instr->ImmedMovwMovtField();
+ set_register(rd, alu_out);
}
break;
}
@@ -1888,7 +1890,10 @@ void Simulator::DecodeType01(Instr* instr) {
SetCFlag(!BorrowFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
} else {
- UNIMPLEMENTED();
+ // Format(instr, "movt'cond 'rd, 'imm").
+ alu_out = (get_register(rd) & 0xffff) |
+ (instr->ImmedMovwMovtField() << 16);
+ set_register(rd, alu_out);
}
break;
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 3992d6c5..3e5ba112 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -336,9 +336,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ b(eq, &exit);
// Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(offset));
- __ RecordWrite(receiver_reg, name_reg, scratch);
+ // Pass the now unused name_reg as a scratch register.
+ __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -352,8 +351,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, Operand(offset));
- __ RecordWrite(scratch, name_reg, receiver_reg);
+ __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
}
// Return the value (register r0).
@@ -1019,6 +1017,14 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+ if (kind_ == Code::KEYED_CALL_IC) {
+ __ cmp(r2, Operand(Handle<String>(name)));
+ __ b(ne, miss);
+ }
+}
+
+
void CallStubCompiler::GenerateMissBranch() {
Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1035,6 +1041,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
const int argc = arguments().immediate();
// Get the receiver of the function from the stack into r0.
@@ -1078,6 +1086,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
@@ -1127,6 +1137,8 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
@@ -1198,6 +1210,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
Label miss_in_smi_check;
+ GenerateNameCheck(name, &miss_in_smi_check);
+
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
@@ -1337,6 +1351,8 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -1384,6 +1400,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the number of arguments.
const int argc = arguments().immediate();
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 334ca35d..8b90f424 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -367,6 +367,7 @@ void VirtualFrame::CallCodeObject(Handle<Code> code,
int dropped_args) {
switch (code->kind()) {
case Code::CALL_IC:
+ case Code::KEYED_CALL_IC:
case Code::FUNCTION:
break;
case Code::KEYED_LOAD_IC:
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index e97ad496..d8dc5c6c 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -212,10 +212,9 @@ class VirtualFrame : public ZoneObject {
void Enter();
void Exit();
- // Prepare for returning from the frame by spilling locals and
- // dropping all non-locals elements in the virtual frame. This
+ // Prepare for returning from the frame by elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
+ // shared return site. No spill code emitted. Value to return should be in r0.
inline void PrepareForReturn();
// Number of local variables after when we use a loop for allocating.