summaryrefslogtreecommitdiffstats
path: root/src/arm/macro-assembler-arm.cc
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2010-07-08 12:39:36 +0100
committerSteve Block <steveblock@google.com>2010-07-08 12:41:04 +0100
commit8defd9ff6930b4e24729971a61cf7469daf119be (patch)
tree3be589af44201dcaead530f4046cb63e7c9b68c4 /src/arm/macro-assembler-arm.cc
parent85dec77e821ae98054f8e09ba3180c148a9264d6 (diff)
downloadandroid_external_v8-8defd9ff6930b4e24729971a61cf7469daf119be.tar.gz
android_external_v8-8defd9ff6930b4e24729971a61cf7469daf119be.tar.bz2
android_external_v8-8defd9ff6930b4e24729971a61cf7469daf119be.zip
Update V8 to r5017 as required by WebKit r62496
Change-Id: I1b4b7718d1d77ceef07f543e9150a2cb3a628f3a
Diffstat (limited to 'src/arm/macro-assembler-arm.cc')
-rw-r--r--src/arm/macro-assembler-arm.cc143
1 files changed, 127 insertions, 16 deletions
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 29e168c5..81fc11ef 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -310,32 +310,28 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::RecordWriteHelper(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1) {
+ Register address,
+ Register scratch) {
if (FLAG_debug_code) {
// Check that the object is not in new space.
Label not_in_new_space;
- InNewSpace(object, scratch1, ne, &not_in_new_space);
+ InNewSpace(object, scratch, ne, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
}
- // Add offset into the object.
- add(scratch0, object, offset);
-
// Calculate page address.
Bfc(object, 0, kPageSizeBits);
// Calculate region number.
- Ubfx(scratch0, scratch0, Page::kRegionSizeLog2,
+ Ubfx(address, address, Page::kRegionSizeLog2,
kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
- ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
+ ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
- orr(scratch1, scratch1, Operand(ip, LSL, scratch0));
- str(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
+ orr(scratch, scratch, Operand(ip, LSL, address));
+ str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
@@ -368,8 +364,11 @@ void MacroAssembler::RecordWrite(Register object,
// region marks for new space pages.
InNewSpace(object, scratch0, eq, &done);
+ // Add offset into the object.
+ add(scratch0, object, offset);
+
// Record the actual write.
- RecordWriteHelper(object, offset, scratch0, scratch1);
+ RecordWriteHelper(object, scratch0, scratch1);
bind(&done);
@@ -383,6 +382,38 @@ void MacroAssembler::RecordWrite(Register object,
}
+// Will clobber 4 registers: object, address, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register scratch) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+
+ Label done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch, eq, &done);
+
+ // Record the actual write.
+ RecordWriteHelper(object, address, scratch);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Operand(BitCast<int32_t>(kZapValue)));
+ mov(address, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg));
@@ -1369,6 +1400,56 @@ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
}
+void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
+ DwVfpRegister result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ SwVfpRegister scratch3,
+ Label* not_number,
+ ObjectToDoubleFlags flags) {
+ Label done;
+ if ((flags & OBJECT_NOT_SMI) == 0) {
+ Label not_smi;
+ BranchOnNotSmi(object, &not_smi);
+ // Remove smi tag and convert to double.
+ mov(scratch1, Operand(object, ASR, kSmiTagSize));
+ vmov(scratch3, scratch1);
+ vcvt_f64_s32(result, scratch3);
+ b(&done);
+ bind(&not_smi);
+ }
+ // Check for heap number and load double value from it.
+ ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ sub(scratch2, object, Operand(kHeapObjectTag));
+ cmp(scratch1, heap_number_map);
+ b(ne, not_number);
+ if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
+ // If exponent is all ones the number is either a NaN or +/-Infinity.
+ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ Sbfx(scratch1,
+ scratch1,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // All-one value sign extend to -1.
+ cmp(scratch1, Operand(-1));
+ b(eq, not_number);
+ }
+ vldr(result, scratch2, HeapNumber::kValueOffset);
+ bind(&done);
+}
+
+
+void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
+ DwVfpRegister value,
+ Register scratch1,
+ SwVfpRegister scratch2) {
+ mov(scratch1, Operand(smi, ASR, kSmiTagSize));
+ vmov(scratch2, scratch1);
+ vcvt_f64_s32(value, scratch2);
+}
+
+
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
@@ -1548,6 +1629,8 @@ void MacroAssembler::Check(Condition cc, const char* msg) {
void MacroAssembler::Abort(const char* msg) {
+ Label abort_start;
+ bind(&abort_start);
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
@@ -1571,6 +1654,17 @@ void MacroAssembler::Abort(const char* msg) {
push(r0);
CallRuntime(Runtime::kAbort, 2);
// will not return here
+ if (is_const_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ static const int kExpectedAbortInstructions = 10;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ ASSERT(abort_instructions <= kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
}
@@ -1673,14 +1767,31 @@ void MacroAssembler::AllocateHeapNumber(Register result,
}
-void MacroAssembler::CountLeadingZeros(Register source,
- Register scratch,
- Register zeros) {
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+ DwVfpRegister value,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* gc_required) {
+ AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
+ sub(scratch1, result, Operand(kHeapObjectTag));
+ vstr(value, scratch1, HeapNumber::kValueOffset);
+}
+
+
+void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
+ Register source, // Input.
+ Register scratch) {
+ ASSERT(!zeros.is(source) || !source.is(zeros));
+ ASSERT(!zeros.is(scratch));
+ ASSERT(!scratch.is(ip));
+ ASSERT(!source.is(ip));
+ ASSERT(!zeros.is(ip));
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
clz(zeros, source); // This instruction is only supported after ARM5.
#else
mov(zeros, Operand(0));
- mov(scratch, source);
+ Move(scratch, source);
// Top 16.
tst(scratch, Operand(0xffff0000));
add(zeros, zeros, Operand(16), LeaveCC, eq);