summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--V8_MERGE_REVISION4
-rw-r--r--src/arm/full-codegen-arm.cc2
-rw-r--r--src/assembler.h4
-rw-r--r--src/isolate.cc3
-rw-r--r--src/mips/assembler-mips.cc33
-rw-r--r--src/mips/assembler-mips.h90
-rw-r--r--src/mips/code-stubs-mips.cc65
-rw-r--r--src/mips/lithium-gap-resolver-mips.cc32
-rw-r--r--src/runtime.cc6
-rw-r--r--src/runtime.h1
-rw-r--r--src/runtime.js2
-rw-r--r--src/version.cc2
-rw-r--r--test/test262/test262.status8
13 files changed, 150 insertions, 102 deletions
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 9bdc5bd3..ed1c0be9 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,2 +1,2 @@
-V8 3.9.24.15
-http://v8.googlecode.com/svn/branches/3.9@11368
+V8 3.9.24.17
+http://v8.googlecode.com/svn/branches/3.9@11450
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 0cbd46ed..831467ac 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -70,6 +70,7 @@ class JumpPatchSite BASE_EMBEDDED {
// the inlined smi code.
void EmitJumpIfNotSmi(Register reg, Label* target) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
// Don't use b(al, ...) as that might emit the constant pool right after the
@@ -82,6 +83,7 @@ class JumpPatchSite BASE_EMBEDDED {
// the inlined smi code.
void EmitJumpIfSmi(Register reg, Label* target) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
__ b(ne, target); // Never taken before patched.
diff --git a/src/assembler.h b/src/assembler.h
index 918a2a67..f960b586 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -62,6 +62,10 @@ class AssemblerBase: public Malloced {
Isolate* isolate() const { return isolate_; }
int jit_cookie() { return jit_cookie_; }
+ // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
+ // cross-snapshotting.
+ static void QuietNaN(HeapObject* nan) { }
+
private:
Isolate* isolate_;
int jit_cookie_;
diff --git a/src/isolate.cc b/src/isolate.cc
index bf9b3450..e8051223 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1842,6 +1842,9 @@ bool Isolate::Init(Deserializer* des) {
// stack guard.
heap_.SetStackLimits();
+ // Quiet the heap NaN if needed on target platform.
+ if (des != NULL) Assembler::QuietNaN(heap_.nan_value());
+
deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
runtime_profiler_->SetUp();
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 0d7f9218..f347fdc5 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -143,7 +143,7 @@ int ToNumber(Register reg) {
27, // k1
28, // gp
29, // sp
- 30, // s8_fp
+ 30, // fp
31, // ra
};
return kNumbers[reg.code()];
@@ -163,7 +163,7 @@ Register ToRegister(int num) {
k0, k1,
gp,
sp,
- s8_fp,
+ fp,
ra
};
return kRegisters[num];
@@ -237,28 +237,28 @@ MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
static const int kNegOffset = 0x00008000;
// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
-const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
- | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
+const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
+ | (kRegister_sp_Code << kRtShift) | (kPointerSize & kImm16Mask);
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
- | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
+const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
+ | (kRegister_sp_Code << kRtShift) | (-kPointerSize & kImm16Mask);
// sw(r, MemOperand(sp, 0))
-const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
+const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
| (0 & kImm16Mask);
// lw(r, MemOperand(sp, 0))
-const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
+const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
| (0 & kImm16Mask);
-const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
+const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
| (0 & kImm16Mask);
-const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
+const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
| (0 & kImm16Mask);
-const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
+const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
| (kNegOffset & kImm16Mask);
-const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
+const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
| (kNegOffset & kImm16Mask);
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
@@ -2137,6 +2137,15 @@ Address Assembler::target_address_at(Address pc) {
}
+// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
+// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
+// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
+// OS::nan_value() returns a qNaN.
+void Assembler::QuietNaN(HeapObject* object) {
+ HeapNumber::cast(object)->set_value(OS::nan_value());
+}
+
+
// On Mips, a target address is stored in a lui/ori instruction pair, each
// of which load 16 bits of the 32-bit address to a register.
// Patching the address must replace both instr, and flush the i-cache.
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 8b877f65..84714e50 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -125,40 +125,59 @@ struct Register {
int code_;
};
-const Register no_reg = { -1 };
-
-const Register zero_reg = { 0 }; // Always zero.
-const Register at = { 1 }; // at: Reserved for synthetic instructions.
-const Register v0 = { 2 }; // v0, v1: Used when returning multiple values
-const Register v1 = { 3 }; // from subroutines.
-const Register a0 = { 4 }; // a0 - a4: Used to pass non-FP parameters.
-const Register a1 = { 5 };
-const Register a2 = { 6 };
-const Register a3 = { 7 };
-const Register t0 = { 8 }; // t0 - t9: Can be used without reservation, act
-const Register t1 = { 9 }; // as temporary registers and are allowed to
-const Register t2 = { 10 }; // be destroyed by subroutines.
-const Register t3 = { 11 };
-const Register t4 = { 12 };
-const Register t5 = { 13 };
-const Register t6 = { 14 };
-const Register t7 = { 15 };
-const Register s0 = { 16 }; // s0 - s7: Subroutine register variables.
-const Register s1 = { 17 }; // Subroutines that write to these registers
-const Register s2 = { 18 }; // must restore their values before exiting so
-const Register s3 = { 19 }; // that the caller can expect the values to be
-const Register s4 = { 20 }; // preserved.
-const Register s5 = { 21 };
-const Register s6 = { 22 };
-const Register s7 = { 23 };
-const Register t8 = { 24 };
-const Register t9 = { 25 };
-const Register k0 = { 26 }; // k0, k1: Reserved for system calls and
-const Register k1 = { 27 }; // interrupt handlers.
-const Register gp = { 28 }; // gp: Reserved.
-const Register sp = { 29 }; // sp: Stack pointer.
-const Register s8_fp = { 30 }; // fp: Frame pointer.
-const Register ra = { 31 }; // ra: Return address pointer.
+#define REGISTER(N, C) \
+ const int kRegister_ ## N ## _Code = C; \
+ const Register N = { C }
+
+REGISTER(no_reg, -1);
+// Always zero.
+REGISTER(zero_reg, 0);
+// at: Reserved for synthetic instructions.
+REGISTER(at, 1);
+// v0, v1: Used when returning multiple values from subroutines.
+REGISTER(v0, 2);
+REGISTER(v1, 3);
+// a0 - a4: Used to pass non-FP parameters.
+REGISTER(a0, 4);
+REGISTER(a1, 5);
+REGISTER(a2, 6);
+REGISTER(a3, 7);
+// t0 - t9: Can be used without reservation, act as temporary registers and are
+// allowed to be destroyed by subroutines.
+REGISTER(t0, 8);
+REGISTER(t1, 9);
+REGISTER(t2, 10);
+REGISTER(t3, 11);
+REGISTER(t4, 12);
+REGISTER(t5, 13);
+REGISTER(t6, 14);
+REGISTER(t7, 15);
+// s0 - s7: Subroutine register variables. Subroutines that write to these
+// registers must restore their values before exiting so that the caller can
+// expect the values to be preserved.
+REGISTER(s0, 16);
+REGISTER(s1, 17);
+REGISTER(s2, 18);
+REGISTER(s3, 19);
+REGISTER(s4, 20);
+REGISTER(s5, 21);
+REGISTER(s6, 22);
+REGISTER(s7, 23);
+REGISTER(t8, 24);
+REGISTER(t9, 25);
+// k0, k1: Reserved for system calls and interrupt handlers.
+REGISTER(k0, 26);
+REGISTER(k1, 27);
+// gp: Reserved.
+REGISTER(gp, 28);
+// sp: Stack pointer.
+REGISTER(sp, 29);
+// fp: Frame pointer.
+REGISTER(fp, 30);
+// ra: Return address pointer.
+REGISTER(ra, 31);
+
+#undef REGISTER
int ToNumber(Register reg);
@@ -303,7 +322,6 @@ static const Register& kLithiumScratchReg = s3; // Scratch register.
static const Register& kLithiumScratchReg2 = s4; // Scratch register.
static const Register& kRootRegister = s6; // Roots array pointer.
static const Register& cp = s7; // JavaScript context pointer.
-static const Register& fp = s8_fp; // Alias for fp.
static const DoubleRegister& kLithiumScratchDouble = f30;
static const FPURegister& kDoubleRegZero = f28;
@@ -552,6 +570,8 @@ class Assembler : public AssemblerBase {
static void JumpLabelToJumpRegister(Address pc);
+ static void QuietNaN(HeapObject* nan);
+
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 1b3242cf..3e7b5bf6 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -481,7 +481,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ Branch(&not_special, gt, source_, Operand(1));
// For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- static const uint32_t exponent_word_for_1 =
+ const uint32_t exponent_word_for_1 =
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
// Safe to use 'at' as dest reg here.
__ Or(at, exponent, Operand(exponent_word_for_1));
@@ -4421,7 +4421,7 @@ Register InstanceofStub::right() { return a1; }
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
- static const int kDisplacement =
+ const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
// Check that the key is a smiGenerateReadElement.
@@ -4833,10 +4833,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// sp[8]: subject string
// sp[12]: JSRegExp object
- static const int kLastMatchInfoOffset = 0 * kPointerSize;
- static const int kPreviousIndexOffset = 1 * kPointerSize;
- static const int kSubjectOffset = 2 * kPointerSize;
- static const int kJSRegExpOffset = 3 * kPointerSize;
+ const int kLastMatchInfoOffset = 0 * kPointerSize;
+ const int kPreviousIndexOffset = 1 * kPointerSize;
+ const int kSubjectOffset = 2 * kPointerSize;
+ const int kJSRegExpOffset = 3 * kPointerSize;
Isolate* isolate = masm->isolate();
@@ -5045,8 +5045,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 8;
- static const int kParameterRegisters = 4;
+ const int kRegExpExecuteArguments = 8;
+ const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
@@ -5931,7 +5931,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// scratch: -
// Perform a number of probes in the symbol table.
- static const int kProbes = 4;
+ const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
@@ -6059,9 +6059,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// 0 <= from <= to <= string.length.
// If any of these assumptions fail, we call the runtime system.
- static const int kToOffset = 0 * kPointerSize;
- static const int kFromOffset = 1 * kPointerSize;
- static const int kStringOffset = 2 * kPointerSize;
+ const int kToOffset = 0 * kPointerSize;
+ const int kFromOffset = 1 * kPointerSize;
+ const int kStringOffset = 2 * kPointerSize;
__ lw(a2, MemOperand(sp, kToOffset));
__ lw(a3, MemOperand(sp, kFromOffset));
@@ -7356,43 +7356,46 @@ struct AheadOfTimeWriteBarrierStubList {
RememberedSetAction action;
};
+#define REG(Name) { kRegister_ ## Name ## _Code }
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
- { s2, s0, t3, EMIT_REMEMBERED_SET },
- { s2, a2, t3, EMIT_REMEMBERED_SET },
+ { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
+ { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
- { a3, t0, t1, EMIT_REMEMBERED_SET },
+ { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal.
- { t0, a1, a2, OMIT_REMEMBERED_SET },
+ { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { a1, a2, a3, EMIT_REMEMBERED_SET },
- { a3, a2, a1, EMIT_REMEMBERED_SET },
+ { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
+ { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
// Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { a2, a1, a3, EMIT_REMEMBERED_SET },
- { a3, a1, a2, EMIT_REMEMBERED_SET },
+ { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
+ { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { a3, a2, t0, EMIT_REMEMBERED_SET },
- { a2, a3, t0, EMIT_REMEMBERED_SET },
+ { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
+ { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
- { a2, a3, t5, EMIT_REMEMBERED_SET },
- { a2, a3, t5, OMIT_REMEMBERED_SET },
+ { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
+ { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject
- { t2, a2, a0, EMIT_REMEMBERED_SET },
- { a2, t2, t5, EMIT_REMEMBERED_SET },
+ { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
+ { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
// StoreArrayLiteralElementStub::Generate
- { t1, a0, t2, EMIT_REMEMBERED_SET },
+ { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
// Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+ { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
+#undef REG
+
bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
@@ -7419,7 +7422,7 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,
diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc
index 41b060de..4a5fbe39 100644
--- a/src/mips/lithium-gap-resolver-mips.cc
+++ b/src/mips/lithium-gap-resolver-mips.cc
@@ -33,8 +33,6 @@
namespace v8 {
namespace internal {
-static const Register kSavedValueRegister = kLithiumScratchReg;
-
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
moves_(32),
@@ -170,9 +168,9 @@ void LGapResolver::BreakCycle(int index) {
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
- __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+ __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
- __ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
+ __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
@@ -189,11 +187,11 @@ void LGapResolver::RestoreValue() {
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
- // Spilled value is in kSavedValueRegister or kLithiumScratchDouble.
+ // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
if (saved_destination_->IsRegister()) {
- __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+ __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
} else if (saved_destination_->IsStackSlot()) {
- __ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+ __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
@@ -245,8 +243,8 @@ void LGapResolver::EmitMove(int index) {
__ sw(at, destination_operand);
}
} else {
- __ lw(kSavedValueRegister, source_operand);
- __ sw(kSavedValueRegister, destination_operand);
+ __ lw(kLithiumScratchReg, source_operand);
+ __ sw(kLithiumScratchReg, destination_operand);
}
}
@@ -263,13 +261,13 @@ void LGapResolver::EmitMove(int index) {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsInteger32(constant_source)) {
- __ li(kSavedValueRegister,
+ __ li(kLithiumScratchReg,
Operand(cgen_->ToInteger32(constant_source)));
} else {
- __ LoadObject(kSavedValueRegister,
+ __ LoadObject(kLithiumScratchReg,
cgen_->ToHandle(constant_source));
}
- __ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
+ __ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
@@ -291,15 +289,15 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kLithiumScratchDouble was used to break the cycle,
- // but kSavedValueRegister is free.
+ // but kLithiumScratchReg is free.
MemOperand source_high_operand =
cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
- __ lw(kSavedValueRegister, source_operand);
- __ sw(kSavedValueRegister, destination_operand);
- __ lw(kSavedValueRegister, source_high_operand);
- __ sw(kSavedValueRegister, destination_high_operand);
+ __ lw(kLithiumScratchReg, source_operand);
+ __ sw(kLithiumScratchReg, destination_operand);
+ __ lw(kLithiumScratchReg, source_high_operand);
+ __ sw(kLithiumScratchReg, destination_high_operand);
} else {
__ ldc1(kLithiumScratchDouble, source_operand);
__ sdc1(kLithiumScratchDouble, destination_operand);
diff --git a/src/runtime.cc b/src/runtime.cc
index 82ffdd05..77c1cd8d 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -8398,6 +8398,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetRootNaN) {
+ RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+ return isolate->heap()->nan_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
HandleScope scope(isolate);
ASSERT(args.length() >= 2);
diff --git a/src/runtime.h b/src/runtime.h
index fe9cfd9b..2e06263d 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -77,6 +77,7 @@ namespace internal {
\
/* Utilities */ \
F(CheckIsBootstrapping, 0, 1) \
+ F(GetRootNaN, 0, 1) \
F(Call, -1 /* >= 2 */, 1) \
F(Apply, 5, 1) \
F(GetFunctionDelegate, 1, 1) \
diff --git a/src/runtime.js b/src/runtime.js
index 53d9a397..6b487349 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -47,7 +47,7 @@ var $String = global.String;
var $Number = global.Number;
var $Function = global.Function;
var $Boolean = global.Boolean;
-var $NaN = 0/0;
+var $NaN = %GetRootNaN();
var builtins = this;
// ECMA-262 Section 11.9.3.
diff --git a/src/version.cc b/src/version.cc
index b7fd7cb4..a5b9279f 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 3
#define MINOR_VERSION 9
#define BUILD_NUMBER 24
-#define PATCH_LEVEL 15
+#define PATCH_LEVEL 17
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/test/test262/test262.status b/test/test262/test262.status
index 67607fff..3f395bdc 100644
--- a/test/test262/test262.status
+++ b/test/test262/test262.status
@@ -65,10 +65,12 @@ S7.8.4_A7.2_T4: FAIL_OK
S7.8.4_A7.2_T5: FAIL_OK
S7.8.4_A7.2_T6: FAIL_OK
-# Linux and Mac defaults to extended 80 bit floating point format in the FPU.
+# Linux for ia32 (and therefore simulators) default to extended 80 bit floating
+# point formats, so these tests checking 64-bit FP precision fail. The other
+# platforms/arch's pass these tests.
# We follow the other major JS engines by keeping this default.
-S8.5_A2.2: PASS if ($system != linux || $arch == x64), FAIL_OK if ($system == linux && $arch != x64)
-S8.5_A2.1: PASS if ($system != linux || $arch == x64), FAIL_OK if ($system == linux && $arch != x64)
+S8.5_A2.1: PASS || FAIL_OK
+S8.5_A2.2: PASS || FAIL_OK
############################ INVALID TESTS #############################