summaryrefslogtreecommitdiffstats
path: root/src/x64/macro-assembler-x64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/x64/macro-assembler-x64.cc')
-rw-r--r--src/x64/macro-assembler-x64.cc1117
1 files changed, 589 insertions, 528 deletions
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 38ada92a..b2f69bb7 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -38,16 +38,15 @@ namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
- unresolved_(0),
- generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ : Assembler(buffer, size),
+ unresolved_(0),
+ generating_stub_(false),
+ allow_stub_calls_(true),
+ code_object_(Heap::undefined_value()) {
}
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index) {
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
movq(destination, Operand(r13, index << kPointerSizeLog2));
}
@@ -57,14 +56,12 @@ void MacroAssembler::PushRoot(Heap::RootListIndex index) {
}
-void MacroAssembler::CompareRoot(Register with,
- Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
cmpq(with, Operand(r13, index << kPointerSizeLog2));
}
-void MacroAssembler::CompareRoot(Operand with,
- Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
LoadRoot(kScratchRegister, index);
cmpq(with, kScratchRegister);
}
@@ -144,9 +141,9 @@ class RecordWriteStub : public CodeStub {
// Minor key encoding in 12 bits of three registers (object, address and
// scratch) OOOOAAAASSSS.
- class ScratchBits: public BitField<uint32_t, 0, 4> {};
- class AddressBits: public BitField<uint32_t, 4, 4> {};
- class ObjectBits: public BitField<uint32_t, 8, 4> {};
+ class ScratchBits : public BitField<uint32_t, 0, 4> {};
+ class AddressBits : public BitField<uint32_t, 4, 4> {};
+ class ObjectBits : public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
@@ -167,33 +164,45 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
// Set the remembered set bit for [object+offset].
// object is the object being stored into, value is the object being stored.
-// If offset is zero, then the scratch register contains the array index into
-// the elements array represented as a Smi.
+// If offset is zero, then the smi_index register contains the array index into
+// the elements array represented as a smi. Otherwise it can be used as a
+// scratch register.
// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
- Register scratch) {
+ Register smi_index) {
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits.
Label done;
+ JumpIfSmi(value, &done);
+ RecordWriteNonSmi(object, offset, value, smi_index);
+ bind(&done);
+}
+
+
+void MacroAssembler::RecordWriteNonSmi(Register object,
+ int offset,
+ Register scratch,
+ Register smi_index) {
+ Label done;
// Test that the object address is not in the new space. We cannot
// set remembered set bits in the new space.
- movq(value, object);
+ movq(scratch, object);
ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
- and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+ and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
movq(kScratchRegister, ExternalReference::new_space_start());
- cmpq(value, kScratchRegister);
+ cmpq(scratch, kScratchRegister);
j(equal, &done);
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
- lea(value, Operand(object, offset));
+ lea(scratch, Operand(object, offset));
ASSERT(is_int32(Page::kPageAlignmentMask));
- and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
- shr(value, Immediate(kObjectAlignmentBits));
+ and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
+ shr(scratch, Immediate(kObjectAlignmentBits));
// Compute the page address from the heap object pointer, leave it in
// 'object' (immediate value is sign extended).
@@ -203,24 +212,26 @@ void MacroAssembler::RecordWrite(Register object,
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
- bts(Operand(object, Page::kRSetOffset), value);
+ bts(Operand(object, Page::kRSetOffset), scratch);
} else {
- Register dst = scratch;
+ Register dst = smi_index;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
// array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric. Multiply a smi by 4 to get an offset
- // into an array of pointers.
- lea(dst, Operand(object, dst, times_half_pointer_size,
+ // KeyedStoreIC::GenerateGeneric.
+ SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
+ lea(dst, Operand(object,
+ index.reg,
+ index.scale,
FixedArray::kHeaderSize - kHeapObjectTag));
}
// If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory.
if (generating_stub()) {
- RecordWriteHelper(this, object, dst, value);
+ RecordWriteHelper(this, object, dst, scratch);
} else {
- RecordWriteStub stub(object, dst, value);
+ RecordWriteStub stub(object, dst, scratch);
CallStub(&stub);
}
}
@@ -348,8 +359,7 @@ void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
// Set the entry point and jump to the C entry runtime stub.
movq(rbx, ext);
CEntryStub ces(result_size);
- movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
- jmp(kScratchRegister);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -364,7 +374,6 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
unresolved_.Add(entry);
@@ -372,7 +381,6 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
}
-
Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
bool* resolved) {
// Move the builtin function into the temporary function slot by
@@ -386,7 +394,6 @@ Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
movq(rdi, FieldOperand(rdx, builtins_offset));
-
return Builtins::GetCode(id, resolved);
}
@@ -418,244 +425,185 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
}
-
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
+static int kSmiShift = kSmiTagSize + kSmiShiftSize;
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
- ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
-#ifdef DEBUG
- cmpq(src, Immediate(0xC0000000u));
- Check(positive, "Smi conversion overflow");
-#endif
- if (dst.is(src)) {
- addl(dst, src);
- } else {
- lea(dst, Operand(src, src, times_1, 0));
+ if (!dst.is(src)) {
+ movl(dst, src);
}
+ shl(dst, Immediate(kSmiShift));
}
void MacroAssembler::Integer32ToSmi(Register dst,
Register src,
Label* on_overflow) {
- ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
+ // 32-bit integer always fits in a long smi.
if (!dst.is(src)) {
movl(dst, src);
}
- addl(dst, src);
- j(overflow, on_overflow);
+ shl(dst, Immediate(kSmiShift));
}
-void MacroAssembler::Integer64AddToSmi(Register dst,
- Register src,
- int constant) {
-#ifdef DEBUG
- movl(kScratchRegister, src);
- addl(kScratchRegister, Immediate(constant));
- Check(no_overflow, "Add-and-smi-convert overflow");
- Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
- Check(valid, "Add-and-smi-convert overflow");
-#endif
- lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
+void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
+ Register src,
+ int constant) {
+ if (dst.is(src)) {
+ addq(dst, Immediate(constant));
+ } else {
+ lea(dst, Operand(src, constant));
+ }
+ shl(dst, Immediate(kSmiShift));
}
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
- ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
- movl(dst, src);
+ movq(dst, src);
}
- sarl(dst, Immediate(kSmiTagSize));
+ shr(dst, Immediate(kSmiShift));
}
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
- ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
- movsxlq(dst, src);
- sar(dst, Immediate(kSmiTagSize));
-}
-
-
-void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
- Register src,
- int power) {
- ASSERT(power >= 0);
- ASSERT(power < 64);
- if (power == 0) {
- SmiToInteger64(dst, src);
- return;
+ if (!dst.is(src)) {
+ movq(dst, src);
}
- movsxlq(dst, src);
- shl(dst, Immediate(power - 1));
-}
-
-void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
- ASSERT_EQ(0, kSmiTag);
- testl(src, Immediate(kSmiTagMask));
- j(zero, on_smi);
+ sar(dst, Immediate(kSmiShift));
}
-void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
- Condition not_smi = CheckNotSmi(src);
- j(not_smi, on_not_smi);
+void MacroAssembler::SmiTest(Register src) {
+ testq(src, src);
}
-void MacroAssembler::JumpIfNotPositiveSmi(Register src,
- Label* on_not_positive_smi) {
- Condition not_positive_smi = CheckNotPositiveSmi(src);
- j(not_positive_smi, on_not_positive_smi);
+void MacroAssembler::SmiCompare(Register dst, Register src) {
+ cmpq(dst, src);
}
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
- int constant,
- Label* on_equals) {
- if (Smi::IsValid(constant)) {
- Condition are_equal = CheckSmiEqualsConstant(src, constant);
- j(are_equal, on_equals);
+void MacroAssembler::SmiCompare(Register dst, Smi* src) {
+ ASSERT(!dst.is(kScratchRegister));
+ if (src->value() == 0) {
+ testq(dst, dst);
+ } else {
+ Move(kScratchRegister, src);
+ cmpq(dst, kScratchRegister);
}
}
-void MacroAssembler::JumpIfSmiGreaterEqualsConstant(Register src,
- int constant,
- Label* on_greater_equals) {
- if (Smi::IsValid(constant)) {
- Condition are_greater_equal = CheckSmiGreaterEqualsConstant(src, constant);
- j(are_greater_equal, on_greater_equals);
- } else if (constant < Smi::kMinValue) {
- jmp(on_greater_equals);
- }
+void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
+ cmpq(dst, src);
}
-void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
- Condition is_valid = CheckInteger32ValidSmiValue(src);
- j(ReverseCondition(is_valid), on_invalid);
+void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
+ if (src->value() == 0) {
+ // Only tagged long smi to have 32-bit representation.
+ cmpq(dst, Immediate(0));
+ } else {
+ Move(kScratchRegister, src);
+ cmpq(dst, kScratchRegister);
+ }
}
-
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
- Register src2,
- Label* on_not_both_smi) {
- Condition not_both_smi = CheckNotBothSmi(src1, src2);
- j(not_both_smi, on_not_both_smi);
-}
-
-Condition MacroAssembler::CheckSmi(Register src) {
- testb(src, Immediate(kSmiTagMask));
- return zero;
+void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
+ Register src,
+ int power) {
+ ASSERT(power >= 0);
+ ASSERT(power < 64);
+ if (power == 0) {
+ SmiToInteger64(dst, src);
+ return;
+ }
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ if (power < kSmiShift) {
+ sar(dst, Immediate(kSmiShift - power));
+ } else if (power > kSmiShift) {
+ shl(dst, Immediate(power - kSmiShift));
+ }
}
-Condition MacroAssembler::CheckNotSmi(Register src) {
+Condition MacroAssembler::CheckSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testb(src, Immediate(kSmiTagMask));
- return not_zero;
+ return zero;
}
Condition MacroAssembler::CheckPositiveSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
- testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
+ movq(kScratchRegister, src);
+ rol(kScratchRegister, Immediate(1));
+ testl(kScratchRegister, Immediate(0x03));
return zero;
}
-Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
- ASSERT_EQ(0, kSmiTag);
- testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
- return not_zero;
-}
-
-
Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
}
movl(kScratchRegister, first);
orl(kScratchRegister, second);
- return CheckSmi(kScratchRegister);
-}
-
-
-Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
- ASSERT_EQ(0, kSmiTag);
- if (first.is(second)) {
- return CheckNotSmi(first);
- }
- movl(kScratchRegister, first);
- or_(kScratchRegister, second);
- return CheckNotSmi(kScratchRegister);
+ testb(kScratchRegister, Immediate(kSmiTagMask));
+ return zero;
}
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- cmpl(src, Immediate(0x40000000));
+ movq(kScratchRegister, src);
+ rol(kScratchRegister, Immediate(1));
+ cmpq(kScratchRegister, Immediate(1));
return equal;
}
-Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
- if (constant == 0) {
- testl(src, src);
- return zero;
- }
- if (Smi::IsValid(constant)) {
- cmpl(src, Immediate(Smi::FromInt(constant)));
- return zero;
- }
- // Can't be equal.
- UNREACHABLE();
- return no_condition;
-}
-
-Condition MacroAssembler::CheckSmiGreaterEqualsConstant(Register src,
- int constant) {
- if (constant == 0) {
- testl(src, Immediate(static_cast<uint32_t>(0x80000000u)));
- return positive;
- }
- if (Smi::IsValid(constant)) {
- cmpl(src, Immediate(Smi::FromInt(constant)));
- return greater_equal;
- }
- // Can't be equal.
- UNREACHABLE();
- return no_condition;
+Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
+ // A 32-bit integer value can always be converted to a smi.
+ return always;
}
-Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
- // A 32-bit integer value can be converted to a smi if it is in the
- // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
- // representation have bits 30 and 31 be equal.
- cmpl(src, Immediate(0xC0000000u));
- return positive;
+Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
+ // An unsigned 32-bit integer value is valid as long as the high bit
+ // is not set.
+ testq(src, Immediate(0x80000000));
+ return zero;
}
-void MacroAssembler::SmiNeg(Register dst,
- Register src,
- Label* on_not_smi_result) {
- if (!dst.is(src)) {
- movl(dst, src);
+void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ movq(kScratchRegister, src);
+ neg(dst); // Low 32 bits are retained as zero by negation.
+ // Test if result is zero or Smi::kMinValue.
+ cmpq(dst, kScratchRegister);
+ j(not_equal, on_smi_result);
+ movq(src, kScratchRegister);
+ } else {
+ movq(dst, src);
+ neg(dst);
+ cmpq(dst, src);
+ // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+ j(not_equal, on_smi_result);
}
- negl(dst);
- testl(dst, Immediate(0x7fffffff));
- // If the result is zero or 0x80000000, negation failed to create a smi.
- j(equal, on_not_smi_result);
}
@@ -664,42 +612,39 @@ void MacroAssembler::SmiAdd(Register dst,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movl(dst, src1);
- }
- addl(dst, src2);
- if (!dst.is(src1)) {
- j(overflow, on_not_smi_result);
- } else {
+ if (dst.is(src1)) {
+ addq(dst, src2);
Label smi_result;
j(no_overflow, &smi_result);
// Restore src1.
- subl(src1, src2);
+ subq(src1, src2);
jmp(on_not_smi_result);
bind(&smi_result);
+ } else {
+ movq(dst, src1);
+ addq(dst, src2);
+ j(overflow, on_not_smi_result);
}
}
-
void MacroAssembler::SmiSub(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movl(dst, src1);
- }
- subl(dst, src2);
- if (!dst.is(src1)) {
- j(overflow, on_not_smi_result);
- } else {
+ if (dst.is(src1)) {
+ subq(dst, src2);
Label smi_result;
j(no_overflow, &smi_result);
// Restore src1.
- addl(src1, src2);
+ addq(src1, src2);
jmp(on_not_smi_result);
bind(&smi_result);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result);
}
}
@@ -709,80 +654,137 @@ void MacroAssembler::SmiMul(Register dst,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(dst, src1);
+ Label failure, zero_correct_result;
+ movq(kScratchRegister, src1); // Create backup for later testing.
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, &failure);
+
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ Label correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result);
- imull(dst, src2);
- j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ xor_(dst, src2);
+ j(positive, &zero_correct_result); // Result was positive zero.
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case. The frame is unchanged
- // in this block, so local control flow can use a Label rather
- // than a JumpTarget.
- Label non_zero_result;
- testl(dst, dst);
- j(not_zero, &non_zero_result);
+ bind(&failure); // Reused failure exit, restores src1.
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
- // Test whether either operand is negative (the other must be zero).
- orl(kScratchRegister, src2);
- j(negative, on_not_smi_result);
- bind(&non_zero_result);
+ bind(&zero_correct_result);
+ xor_(dst, dst);
+
+ bind(&correct_result);
+ } else {
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, on_not_smi_result);
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ Label correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result);
+ // One of src1 and src2 is zero, the check whether the other is
+ // negative.
+ movq(kScratchRegister, src1);
+ xor_(kScratchRegister, src2);
+ j(negative, on_not_smi_result);
+ bind(&correct_result);
+ }
}
void MacroAssembler::SmiTryAddConstant(Register dst,
Register src,
- int32_t constant,
+ Smi* constant,
Label* on_not_smi_result) {
// Does not assume that src is a smi.
- ASSERT_EQ(1, kSmiTagMask);
+ ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
ASSERT_EQ(0, kSmiTag);
- ASSERT(Smi::IsValid(constant));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src.is(kScratchRegister));
- Register tmp = (src.is(dst) ? kScratchRegister : dst);
- movl(tmp, src);
- addl(tmp, Immediate(Smi::FromInt(constant)));
- if (tmp.is(kScratchRegister)) {
- j(overflow, on_not_smi_result);
- testl(tmp, Immediate(kSmiTagMask));
- j(not_zero, on_not_smi_result);
- movl(dst, tmp);
+ JumpIfNotSmi(src, on_not_smi_result);
+ Register tmp = (dst.is(src) ? kScratchRegister : dst);
+ Move(tmp, constant);
+ addq(tmp, src);
+ j(overflow, on_not_smi_result);
+ if (dst.is(src)) {
+ movq(dst, tmp);
+ }
+}
+
+
+void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ Move(kScratchRegister, constant);
+ addq(dst, kScratchRegister);
} else {
- movl(kScratchRegister, Immediate(kSmiTagMask));
- cmovl(overflow, dst, kScratchRegister);
- testl(dst, kScratchRegister);
- j(not_zero, on_not_smi_result);
+ Move(dst, constant);
+ addq(dst, src);
}
}
void MacroAssembler::SmiAddConstant(Register dst,
Register src,
- int32_t constant,
+ Smi* constant,
Label* on_not_smi_result) {
- ASSERT(Smi::IsValid(constant));
- if (on_not_smi_result == NULL) {
- if (dst.is(src)) {
- movl(dst, src);
- } else {
- lea(dst, Operand(src, constant << kSmiTagSize));
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
}
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ Move(kScratchRegister, constant);
+ addq(dst, kScratchRegister);
+ Label result_ok;
+ j(no_overflow, &result_ok);
+ subq(dst, kScratchRegister);
+ jmp(on_not_smi_result);
+ bind(&result_ok);
} else {
+ Move(dst, constant);
+ addq(dst, src);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
+ if (constant->value() == 0) {
if (!dst.is(src)) {
- movl(dst, src);
+ movq(dst, src);
}
- addl(dst, Immediate(Smi::FromInt(constant)));
- if (!dst.is(src)) {
- j(overflow, on_not_smi_result);
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ Move(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ } else {
+ // Subtract by adding the negative, to do it in two operations.
+ if (constant->value() == Smi::kMinValue) {
+ Move(kScratchRegister, constant);
+ movq(dst, src);
+ subq(dst, kScratchRegister);
} else {
- Label result_ok;
- j(no_overflow, &result_ok);
- subl(dst, Immediate(Smi::FromInt(constant)));
- jmp(on_not_smi_result);
- bind(&result_ok);
+ Move(dst, Smi::FromInt(-constant->value()));
+ addq(dst, src);
}
}
}
@@ -790,24 +792,33 @@ void MacroAssembler::SmiAddConstant(Register dst,
void MacroAssembler::SmiSubConstant(Register dst,
Register src,
- int32_t constant,
+ Smi* constant,
Label* on_not_smi_result) {
- ASSERT(Smi::IsValid(constant));
- Smi* smi_value = Smi::FromInt(constant);
- if (dst.is(src)) {
- // Optimistic subtract - may change value of dst register,
- // if it has garbage bits in the higher half, but will not change
- // the value as a tagged smi.
- subl(dst, Immediate(smi_value));
- if (on_not_smi_result != NULL) {
- Label add_success;
- j(no_overflow, &add_success);
- addl(dst, Immediate(smi_value));
- jmp(on_not_smi_result);
- bind(&add_success);
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
}
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ Move(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ Label sub_success;
+ j(no_overflow, &sub_success);
+ addq(src, kScratchRegister);
+ jmp(on_not_smi_result);
+ bind(&sub_success);
} else {
- UNIMPLEMENTED(); // Not used yet.
+ if (constant->value() == Smi::kMinValue) {
+ Move(kScratchRegister, constant);
+ movq(dst, src);
+ subq(dst, kScratchRegister);
+ j(overflow, on_not_smi_result);
+ } else {
+ Move(dst, Smi::FromInt(-(constant->value())));
+ addq(dst, src);
+ j(overflow, on_not_smi_result);
+ }
}
}
@@ -816,38 +827,61 @@ void MacroAssembler::SmiDiv(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(kScratchRegister));
ASSERT(!src2.is(rax));
ASSERT(!src2.is(rdx));
ASSERT(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
Label positive_divisor;
- testl(src2, src2);
+ testq(src2, src2);
j(zero, on_not_smi_result);
- j(positive, &positive_divisor);
- // Check for negative zero result. If the dividend is zero, and the
- // divisor is negative, return a floating point negative zero.
- testl(src1, src1);
- j(zero, on_not_smi_result);
- bind(&positive_divisor);
- // Sign extend src1 into edx:eax.
- if (!src1.is(rax)) {
- movl(rax, src1);
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
}
- cdq();
+ SmiToInteger32(rax, src1);
+ // We need to rule out dividing Smi::kMinValue by -1, since that would
+ // overflow in idiv and raise an exception.
+ // We combine this with negative zero test (negative zero only happens
+ // when dividing zero by a negative number).
+
+ // We overshoot a little and go to slow case if we divide min-value
+ // by any negative value, not just -1.
+ Label safe_div;
+ testl(rax, Immediate(0x7fffffff));
+ j(not_zero, &safe_div);
+ testq(src2, src2);
+ if (src1.is(rax)) {
+ j(positive, &safe_div);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+ } else {
+ j(negative, on_not_smi_result);
+ }
+ bind(&safe_div);
+ SmiToInteger32(src2, src2);
+ // Sign extend src1 into edx:eax.
+ cdq();
idivl(src2);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by
- // idiv instruction.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- cmpl(rax, Immediate(0x40000000));
- j(equal, on_not_smi_result);
+ Integer32ToSmi(src2, src2);
// Check that the remainder is zero.
testl(rdx, rdx);
- j(not_zero, on_not_smi_result);
- // Tag the result and store it in the destination register.
+ if (src1.is(rax)) {
+ Label smi_result;
+ j(zero, &smi_result);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+ bind(&smi_result);
+ } else {
+ j(not_zero, on_not_smi_result);
+ }
+ if (!dst.is(src1) && src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
Integer32ToSmi(dst, rax);
}
@@ -862,109 +896,136 @@ void MacroAssembler::SmiMod(Register dst,
ASSERT(!src2.is(rax));
ASSERT(!src2.is(rdx));
ASSERT(!src1.is(rdx));
+ ASSERT(!src1.is(src2));
- testl(src2, src2);
+ testq(src2, src2);
j(zero, on_not_smi_result);
if (src1.is(rax)) {
- // Mist remember the value to see if a zero result should
- // be a negative zero.
- movl(kScratchRegister, rax);
- } else {
- movl(rax, src1);
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ SmiToInteger32(src2, src2);
+
+ // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+ Label safe_div;
+ cmpl(rax, Immediate(Smi::kMinValue));
+ j(not_equal, &safe_div);
+ cmpl(src2, Immediate(-1));
+ j(not_equal, &safe_div);
+ // Retag inputs and go slow case.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
}
+ jmp(on_not_smi_result);
+ bind(&safe_div);
+
// Sign extend eax into edx:eax.
cdq();
idivl(src2);
- // Check for a negative zero result. If the result is zero, and the
- // dividend is negative, return a floating point negative zero.
- Label non_zero_result;
- testl(rdx, rdx);
- j(not_zero, &non_zero_result);
+ // Restore smi tags on inputs.
+ Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- testl(kScratchRegister, kScratchRegister);
- } else {
- testl(src1, src1);
+ movq(src1, kScratchRegister);
}
+ // Check for a negative zero result. If the result is zero, and the
+ // dividend is negative, go slow to return a floating point negative zero.
+ Label smi_result;
+ testl(rdx, rdx);
+ j(not_zero, &smi_result);
+ testq(src1, src1);
j(negative, on_not_smi_result);
- bind(&non_zero_result);
- if (!dst.is(rdx)) {
- movl(dst, rdx);
- }
+ bind(&smi_result);
+ Integer32ToSmi(dst, rdx);
}
void MacroAssembler::SmiNot(Register dst, Register src) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src.is(kScratchRegister));
+ // Set tag and padding bits before negating, so that they are zero afterwards.
+ movl(kScratchRegister, Immediate(~0));
if (dst.is(src)) {
- not_(dst);
- // Remove inverted smi-tag. The mask is sign-extended to 64 bits.
- xor_(src, Immediate(kSmiTagMask));
+ xor_(dst, kScratchRegister);
} else {
- ASSERT_EQ(0, kSmiTag);
- lea(dst, Operand(src, kSmiTagMask));
- not_(dst);
+ lea(dst, Operand(src, kScratchRegister, times_1, 0));
}
+ not_(dst);
}
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
+ ASSERT(!dst.is(src2));
if (!dst.is(src1)) {
- movl(dst, src1);
+ movq(dst, src1);
}
and_(dst, src2);
}
-void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
- ASSERT(Smi::IsValid(constant));
- if (!dst.is(src)) {
- movl(dst, src);
+void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
+ if (constant->value() == 0) {
+ xor_(dst, dst);
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ Move(kScratchRegister, constant);
+ and_(dst, kScratchRegister);
+ } else {
+ Move(dst, constant);
+ and_(dst, src);
}
- and_(dst, Immediate(Smi::FromInt(constant)));
}
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
- movl(dst, src1);
+ movq(dst, src1);
}
or_(dst, src2);
}
-void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
- ASSERT(Smi::IsValid(constant));
- if (!dst.is(src)) {
- movl(dst, src);
+void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ Move(kScratchRegister, constant);
+ or_(dst, kScratchRegister);
+ } else {
+ Move(dst, constant);
+ or_(dst, src);
}
- or_(dst, Immediate(Smi::FromInt(constant)));
}
+
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
- movl(dst, src1);
+ movq(dst, src1);
}
xor_(dst, src2);
}
-void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
- ASSERT(Smi::IsValid(constant));
- if (!dst.is(src)) {
- movl(dst, src);
+void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ Move(kScratchRegister, constant);
+ xor_(dst, kScratchRegister);
+ } else {
+ Move(dst, constant);
+ xor_(dst, src);
}
- xor_(dst, Immediate(Smi::FromInt(constant)));
}
-
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value) {
+ ASSERT(is_uint5(shift_value));
if (shift_value > 0) {
if (dst.is(src)) {
- sarl(dst, Immediate(shift_value));
- and_(dst, Immediate(~kSmiTagMask));
+ sar(dst, Immediate(shift_value + kSmiShift));
+ shl(dst, Immediate(kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
@@ -980,20 +1041,13 @@ void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
- movl(dst, src);
- // Untag the smi.
- sarl(dst, Immediate(kSmiTagSize));
- if (shift_value < 2) {
- // A negative Smi shifted right two is in the positive Smi range,
- // but if shifted only by zero or one, it never is.
+ movq(dst, src);
+ if (shift_value == 0) {
+ testq(dst, dst);
j(negative, on_not_smi_result);
}
- if (shift_value > 0) {
- // Do the right shift on the integer value.
- shrl(dst, Immediate(shift_value));
- }
- // Re-tag the result.
- addl(dst, dst);
+ shr(dst, Immediate(shift_value + kSmiShift));
+ shl(dst, Immediate(kSmiShift));
}
}
@@ -1002,20 +1056,11 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result) {
- if (dst.is(src)) {
- UNIMPLEMENTED(); // Not used.
- } else {
- movl(dst, src);
- if (shift_value > 0) {
- // Treat dst as an untagged integer value equal to two times the
- // smi value of src, i.e., already shifted left by one.
- if (shift_value > 1) {
- shll(dst, Immediate(shift_value - 1));
- }
- // Convert int result to Smi, checking that it is in smi range.
- ASSERT(kSmiTagSize == 1); // adjust code if not the case
- Integer32ToSmi(dst, dst, on_not_smi_result);
- }
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ if (shift_value > 0) {
+ shl(dst, Immediate(shift_value));
}
}
@@ -1026,23 +1071,14 @@ void MacroAssembler::SmiShiftLeft(Register dst,
Label* on_not_smi_result) {
ASSERT(!dst.is(rcx));
Label result_ok;
- // Untag both operands.
- SmiToInteger32(dst, src1);
- SmiToInteger32(rcx, src2);
- shll(dst);
- // Check that the *signed* result fits in a smi.
- Condition is_valid = CheckInteger32ValidSmiValue(dst);
- j(is_valid, &result_ok);
- // Restore the relevant bits of the source registers
- // and call the slow version.
- if (dst.is(src1)) {
- shrl(dst);
- Integer32ToSmi(dst, dst);
+ // Untag shift amount.
+ if (!dst.is(src1)) {
+ movq(dst, src1);
}
- Integer32ToSmi(rcx, rcx);
- jmp(on_not_smi_result);
- bind(&result_ok);
- Integer32ToSmi(dst, dst);
+ SmiToInteger32(rcx, src2);
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ and_(rcx, Immediate(0x1f));
+ shl(dst);
}
@@ -1050,48 +1086,62 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
Label result_ok;
- // Untag both operands.
- SmiToInteger32(dst, src1);
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
SmiToInteger32(rcx, src2);
-
- shrl(dst);
- // Check that the *unsigned* result fits in a smi.
- // I.e., that it is a valid positive smi value. The positive smi
- // values are 0..0x3fffffff, i.e., neither of the top-most two
- // bits can be set.
- //
- // These two cases can only happen with shifts by 0 or 1 when
- // handed a valid smi. If the answer cannot be represented by a
- // smi, restore the left and right arguments, and jump to slow
- // case. The low bit of the left argument may be lost, but only
- // in a case where it is dropped anyway.
- testl(dst, Immediate(0xc0000000));
- j(zero, &result_ok);
- if (dst.is(src1)) {
- shll(dst);
- Integer32ToSmi(dst, dst);
+ orl(rcx, Immediate(kSmiShift));
+ shr(dst); // Shift is rcx modulo 0x1f + 32.
+ shl(dst, Immediate(kSmiShift));
+ testq(dst, dst);
+ if (src1.is(rcx) || src2.is(rcx)) {
+ Label positive_result;
+ j(positive, &positive_result);
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ jmp(on_not_smi_result);
+ bind(&positive_result);
+ } else {
+ j(negative, on_not_smi_result); // src2 was zero and src1 negative.
}
- Integer32ToSmi(rcx, rcx);
- jmp(on_not_smi_result);
- bind(&result_ok);
- // Smi-tag the result in answer.
- Integer32ToSmi(dst, dst);
}
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
- // Untag both operands.
- SmiToInteger32(dst, src1);
+ if (src1.is(rcx)) {
+ movq(kScratchRegister, src1);
+ } else if (src2.is(rcx)) {
+ movq(kScratchRegister, src2);
+ }
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
SmiToInteger32(rcx, src2);
- // Shift as integer.
- sarl(dst);
- // Retag result.
- Integer32ToSmi(dst, dst);
+ orl(rcx, Immediate(kSmiShift));
+ sar(dst); // Shift 32 + original rcx & 0x1f.
+ shl(dst, Immediate(kSmiShift));
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else if (src2.is(rcx)) {
+ movq(src2, kScratchRegister);
+ }
}
@@ -1099,21 +1149,27 @@ void MacroAssembler::SelectNonSmi(Register dst,
Register src1,
Register src2,
Label* on_not_smis) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(src1));
ASSERT(!dst.is(src2));
// Both operands must not be smis.
#ifdef DEBUG
- Condition not_both_smis = CheckNotBothSmi(src1, src2);
- Check(not_both_smis, "Both registers were smis.");
+ if (allow_stub_calls()) { // Check contains a stub call.
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+ }
#endif
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(0, Smi::FromInt(0));
- movq(kScratchRegister, Immediate(kSmiTagMask));
+ movl(kScratchRegister, Immediate(kSmiTagMask));
and_(kScratchRegister, src1);
testl(kScratchRegister, src2);
+ // If non-zero then both are smis.
j(not_zero, on_not_smis);
- // One operand is a smi.
+ // Exactly one operand is a smi.
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
subq(kScratchRegister, Immediate(1));
@@ -1123,71 +1179,96 @@ void MacroAssembler::SelectNonSmi(Register dst,
and_(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
xor_(dst, src1);
- // If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
+ // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
-
-SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
+SmiIndex MacroAssembler::SmiToIndex(Register dst,
+ Register src,
+ int shift) {
ASSERT(is_uint6(shift));
- if (shift == 0) { // times_1.
- SmiToInteger32(dst, src);
- return SmiIndex(dst, times_1);
- }
- if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
- // We expect that all smis are actually zero-padded. If this holds after
- // checking, this line can be omitted.
- movl(dst, src); // Ensure that the smi is zero-padded.
- return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
- }
- // Shift by shift-kSmiTagSize.
- movl(dst, src); // Ensure that the smi is zero-padded.
- shl(dst, Immediate(shift - kSmiTagSize));
+ // There is a possible optimization if shift is in the range 60-63, but that
+ // will (and must) never happen.
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ if (shift < kSmiShift) {
+ sar(dst, Immediate(kSmiShift - shift));
+ } else {
+ shl(dst, Immediate(shift - kSmiShift));
+ }
return SmiIndex(dst, times_1);
}
-
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
Register src,
int shift) {
// Register src holds a positive smi.
ASSERT(is_uint6(shift));
- if (shift == 0) { // times_1.
- SmiToInteger32(dst, src);
- neg(dst);
- return SmiIndex(dst, times_1);
- }
- if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
- movl(dst, src);
- neg(dst);
- return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
+ if (!dst.is(src)) {
+ movq(dst, src);
}
- // Shift by shift-kSmiTagSize.
- movl(dst, src);
neg(dst);
- shl(dst, Immediate(shift - kSmiTagSize));
+ if (shift < kSmiShift) {
+ sar(dst, Immediate(kSmiShift - shift));
+ } else {
+ shl(dst, Immediate(shift - kSmiShift));
+ }
return SmiIndex(dst, times_1);
}
+void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
+ ASSERT_EQ(0, kSmiTag);
+ Condition smi = CheckSmi(src);
+ j(smi, on_smi);
+}
+
-bool MacroAssembler::IsUnsafeSmi(Smi* value) {
- return false;
+void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
+ Condition smi = CheckSmi(src);
+ j(NegateCondition(smi), on_not_smi);
}
-void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
- UNIMPLEMENTED();
+
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+ Label* on_not_positive_smi) {
+ Condition positive_smi = CheckPositiveSmi(src);
+ j(NegateCondition(positive_smi), on_not_positive_smi);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+ Smi* constant,
+ Label* on_equals) {
+ SmiCompare(src, constant);
+ j(equal, on_equals);
+}
+
+
+void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid);
+}
+
+
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+ Label* on_invalid) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid);
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
+ Label* on_not_both_smi) {
+ Condition both_smi = CheckBothSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
}
void MacroAssembler::Move(Register dst, Handle<Object> source) {
ASSERT(!source->IsFailure());
if (source->IsSmi()) {
- if (IsUnsafeSmi(source)) {
- LoadUnsafeSmi(dst, source);
- } else {
- int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
- movq(dst, Immediate(smi));
- }
+ Move(dst, Smi::cast(*source));
} else {
movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
}
@@ -1195,9 +1276,9 @@ void MacroAssembler::Move(Register dst, Handle<Object> source) {
void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
+ ASSERT(!source->IsFailure());
if (source->IsSmi()) {
- int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
- movq(dst, Immediate(smi));
+ Move(dst, Smi::cast(*source));
} else {
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
movq(dst, kScratchRegister);
@@ -1206,21 +1287,18 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
- Move(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ if (source->IsSmi()) {
+ SmiCompare(dst, Smi::cast(*source));
+ } else {
+ Move(kScratchRegister, source);
+ cmpq(dst, kScratchRegister);
+ }
}
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
- if (IsUnsafeSmi(source)) {
- LoadUnsafeSmi(kScratchRegister, source);
- cmpl(dst, kScratchRegister);
- } else {
- // For smi-comparison, it suffices to compare the low 32 bits.
- int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
- cmpl(dst, Immediate(smi));
- }
+ SmiCompare(dst, Smi::cast(*source));
} else {
ASSERT(source->IsHeapObject());
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
@@ -1231,13 +1309,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
void MacroAssembler::Push(Handle<Object> source) {
if (source->IsSmi()) {
- if (IsUnsafeSmi(source)) {
- LoadUnsafeSmi(kScratchRegister, source);
- push(kScratchRegister);
- } else {
- int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
- push(Immediate(smi));
- }
+ Push(Smi::cast(*source));
} else {
ASSERT(source->IsHeapObject());
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
@@ -1247,12 +1319,23 @@ void MacroAssembler::Push(Handle<Object> source) {
void MacroAssembler::Push(Smi* source) {
- if (IsUnsafeSmi(source)) {
- LoadUnsafeSmi(kScratchRegister, source);
+ intptr_t smi = reinterpret_cast<intptr_t>(source);
+ if (is_int32(smi)) {
+ push(Immediate(static_cast<int32_t>(smi)));
+ } else {
+ Set(kScratchRegister, smi);
push(kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::Test(const Operand& src, Smi* source) {
+ intptr_t smi = reinterpret_cast<intptr_t>(source);
+ if (is_int32(smi)) {
+ testl(src, Immediate(static_cast<int32_t>(smi)));
} else {
- int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
- push(Immediate(smi));
+ Move(kScratchRegister, source);
+ testq(src, kScratchRegister);
}
}
@@ -1270,17 +1353,8 @@ void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- movq(kScratchRegister, code_object, rmode);
-#ifdef DEBUG
- Label target;
- bind(&target);
-#endif
- jmp(kScratchRegister);
-#ifdef DEBUG
- ASSERT_EQ(kCallTargetAddressOffset,
- SizeOfCodeGeneratedSince(&target) + kPointerSize);
-#endif
+ // TODO(X64): Inline this
+ jmp(code_object, rmode);
}
@@ -1299,17 +1373,7 @@ void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
WriteRecordedPositions();
- movq(kScratchRegister, code_object, rmode);
-#ifdef DEBUG
- // Patch target is kPointer size bytes *before* target label.
- Label target;
- bind(&target);
-#endif
- call(kScratchRegister);
-#ifdef DEBUG
- ASSERT_EQ(kCallTargetAddressOffset,
- SizeOfCodeGeneratedSince(&target) + kPointerSize);
-#endif
+ call(code_object, rmode);
}
@@ -1357,18 +1421,9 @@ void MacroAssembler::Ret() {
void MacroAssembler::FCmp() {
- fucompp();
- push(rax);
- fnstsw_ax();
- if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
- sahf();
- } else {
- shrl(rax, Immediate(8));
- and_(rax, Immediate(0xFF));
- push(rax);
- popfq();
- }
- pop(rax);
+ fucomip();
+ ffree(0);
+ fincstp();
}
@@ -1467,7 +1522,6 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
}
-
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::PushRegistersFromMemory(RegList regs) {
@@ -1484,6 +1538,7 @@ void MacroAssembler::PushRegistersFromMemory(RegList regs) {
}
}
+
void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of registers to memory location.
@@ -1566,8 +1621,11 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- InvokeCode(Handle<Code>(code), expected, expected,
- RelocInfo::CODE_TARGET, flag);
+ InvokeCode(Handle<Code>(code),
+ expected,
+ expected,
+ RelocInfo::CODE_TARGET,
+ flag);
const char* name = Builtins::GetName(id);
int argc = Builtins::GetArgumentsCount(id);
@@ -1576,7 +1634,6 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry =
{ pc_offset() - kCallTargetAddressOffset, flags, name };
@@ -1600,7 +1657,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
} else {
movq(rax, Immediate(actual.immediate()));
if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
// Don't worry about adapting arguments for built-ins that
// don't want that done. Skip adaption code by making it look
// like we have a match between expected and actual number of
@@ -1706,7 +1763,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(rbp);
movq(rbp, rsp);
push(rsi); // Context.
- push(Immediate(Smi::FromInt(type)));
+ Push(Smi::FromInt(type));
movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
push(kScratchRegister);
if (FLAG_debug_code) {
@@ -1721,7 +1778,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (FLAG_debug_code) {
- movq(kScratchRegister, Immediate(Smi::FromInt(type)));
+ Move(kScratchRegister, Smi::FromInt(type));
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, "stack frame types must match");
}
@@ -1730,7 +1787,6 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-
void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
@@ -1743,7 +1799,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
movq(rbp, rsp);
// Reserve room for entry stack pointer and push the debug marker.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call
push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
@@ -1824,16 +1880,6 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type, int result_size) {
movq(rcx, Operand(rbp, 1 * kPointerSize));
movq(rbp, Operand(rbp, 0 * kPointerSize));
-#ifdef _WIN64
- // If return value is on the stack, pop it to registers.
- if (result_size > 1) {
- ASSERT_EQ(2, result_size);
- // Position above 4 argument mirrors and arguments object.
- movq(rax, Operand(rsp, 6 * kPointerSize));
- movq(rdx, Operand(rsp, 7 * kPointerSize));
- }
-#endif
-
// Pop everything up to and including the arguments and the receiver
// from the caller stack.
lea(rsp, Operand(r15, 1 * kPointerSize));
@@ -1856,8 +1902,10 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type, int result_size) {
}
-Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
+Register MacroAssembler::CheckMaps(JSObject* object,
+ Register object_reg,
+ JSObject* holder,
+ Register holder_reg,
Register scratch,
Label* miss) {
// Make sure there's no overlap between scratch and the other
@@ -1923,8 +1971,7 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
}
// Check the holder map.
- Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(holder->map()));
+ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
j(not_equal, miss);
// Log the check depth.
@@ -1941,8 +1988,6 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
@@ -1996,8 +2041,8 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
movq(kScratchRegister,
FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
+ int token_offset =
+ Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
movq(scratch, FieldOperand(scratch, token_offset));
cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
@@ -2164,6 +2209,23 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
}
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Register scratch,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch,
+ no_reg,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
@@ -2182,5 +2244,4 @@ CodePatcher::~CodePatcher() {
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
-
} } // namespace v8::internal