summaryrefslogtreecommitdiffstats
path: root/compiler/utils
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-04-22 13:56:20 -0700
committerMathieu Chartier <mathieuc@google.com>2015-06-02 09:21:27 -0700
commit3d21bdf8894e780d349c481e5c9e29fe1556051c (patch)
tree61a5231f36c0dabd73457fec81df103462a05aff /compiler/utils
parent71f0a8a123fa27bdc857a98afebbaf0ed09dac15 (diff)
downloadandroid_art-3d21bdf8894e780d349c481e5c9e29fe1556051c.tar.gz
android_art-3d21bdf8894e780d349c481e5c9e29fe1556051c.tar.bz2
android_art-3d21bdf8894e780d349c481e5c9e29fe1556051c.zip
Move mirror::ArtMethod to native
Optimizing + quick tests are passing, devices boot. TODO: Test and fix bugs in mips64. Saves 16 bytes per most ArtMethod, 7.5MB reduction in system PSS. Some of the savings are from removal of virtual methods and direct methods object arrays. Bug: 19264997 (cherry picked from commit e401d146407d61eeb99f8d6176b2ac13c4df1e33) Change-Id: I622469a0cfa0e7082a2119f3d6a9491eb61e3f3d Fix some ArtMethod related bugs Added root visiting for runtime methods, not currently required since the GcRoots in these methods are null. Added missing GetInterfaceMethodIfProxy in GetMethodLine, fixes --trace run-tests 005, 044. Fixed optimizing compiler bug where we used a normal stack location instead of double on ARM64, this fixes the debuggable tests. TODO: Fix JDWP tests. Bug: 19264997 Change-Id: I7c55f69c61d1b45351fd0dc7185ffe5efad82bd3 ART: Fix casts for 64-bit pointers on 32-bit compiler. Bug: 19264997 Change-Id: Ief45cdd4bae5a43fc8bfdfa7cf744e2c57529457 Fix JDWP tests after ArtMethod change Fixes Throwable::GetStackDepth for exception event detection after internal stack trace representation change. Adds missing ArtMethod::GetInterfaceMethodIfProxy call in case of proxy method. Bug: 19264997 Change-Id: I363e293796848c3ec491c963813f62d868da44d2 Fix accidental IMT and root marking regression Was always using the conflict trampoline. Also included fix for regression in GC time caused by extra roots. Most of the regression was IMT. Fixed bug in DumpGcPerformanceInfo where we would get SIGABRT due to detached thread. EvaluateAndApplyChanges: From ~2500 -> ~1980 GC time: 8.2s -> 7.2s due to 1s less of MarkConcurrentRoots Bug: 19264997 Change-Id: I4333e80a8268c2ed1284f87f25b9f113d4f2c7e0 Fix bogus image test assert Previously we were comparing the size of the non moving space to size of the image file. Now we properly compare the size of the image space against the size of the image file. Bug: 19264997 Change-Id: I7359f1f73ae3df60c5147245935a24431c04808a [MIPS64] Fix art_quick_invoke_stub argument offsets. ArtMethod reference's size got bigger, so we need to move other args and leave enough space for ArtMethod* and 'this' pointer. This fixes mips64 boot. Bug: 19264997 Change-Id: I47198d5f39a4caab30b3b77479d5eedaad5006ab
Diffstat (limited to 'compiler/utils')
-rw-r--r--compiler/utils/arm/assembler_arm.cc10
-rw-r--r--compiler/utils/arm/assembler_arm.h5
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc28
-rw-r--r--compiler/utils/arm64/assembler_arm64.h5
-rw-r--r--compiler/utils/assembler.h6
-rw-r--r--compiler/utils/dex_cache_arrays_layout-inl.h13
-rw-r--r--compiler/utils/mips/assembler_mips.cc6
-rw-r--r--compiler/utils/mips/assembler_mips.h5
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc11
-rw-r--r--compiler/utils/mips64/assembler_mips64.h5
-rw-r--r--compiler/utils/x86/assembler_x86.cc21
-rw-r--r--compiler/utils/x86/assembler_x86.h5
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc22
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h3
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc2
15 files changed, 73 insertions, 74 deletions
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 1da0563264..cbbc116033 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -378,7 +378,7 @@ static dwarf::Reg DWARFReg(SRegister reg) {
return dwarf::Reg::ArmFp(static_cast<int>(reg));
}
-constexpr size_t kFramePointerSize = 4;
+constexpr size_t kFramePointerSize = kArmPointerSize;
void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
@@ -415,7 +415,7 @@ void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
StoreToOffset(kStoreWord, R0, SP, 0);
// Write out entry spills.
- int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
+ int32_t offset = frame_size + kFramePointerSize;
for (size_t i = 0; i < entry_spills.size(); ++i) {
ArmManagedRegister reg = entry_spills.at(i).AsArm();
if (reg.IsNoRegister()) {
@@ -528,13 +528,13 @@ void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
- MemberOffset offs) {
+void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) {
ArmManagedRegister dst = mdest.AsArm();
CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
base.AsArm().AsCoreRegister(), offs.Int32Value());
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
rsb(dst.AsCoreRegister(), dst.AsCoreRegister(), ShifterOperand(0));
}
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index ce4c741918..c673c6b81e 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -693,9 +693,10 @@ class ArmAssembler : public Assembler {
void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 3ee79a103f..7d98a30ff3 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -293,14 +293,14 @@ void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
}
-void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base,
- MemberOffset offs) {
+void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs,
+ bool poison_reference) {
Arm64ManagedRegister dst = m_dst.AsArm64();
Arm64ManagedRegister base = m_base.AsArm64();
CHECK(dst.IsXRegister() && base.IsXRegister());
LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
offs.Int32Value());
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
WRegister ref_reg = dst.AsOverlappingWRegister();
___ Neg(reg_w(ref_reg), vixl::Operand(reg_w(ref_reg)));
}
@@ -535,7 +535,7 @@ void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scrat
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
// Call *(*(SP + base) + offset)
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, base.Int32Value());
+ LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
___ Blr(reg_x(scratch.AsXRegister()));
}
@@ -544,8 +544,9 @@ void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegiste
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
-void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffset handle_scope_offs,
- ManagedRegister m_in_reg, bool null_allowed) {
+void Arm64Assembler::CreateHandleScopeEntry(
+ ManagedRegister m_out_reg, FrameOffset handle_scope_offs, ManagedRegister m_in_reg,
+ bool null_allowed) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
// For now we only hold stale handle scope entries in x registers.
@@ -571,7 +572,7 @@ void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffs
}
void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
- ManagedRegister m_scratch, bool null_allowed) {
+ ManagedRegister m_scratch, bool null_allowed) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
if (null_allowed) {
@@ -590,7 +591,7 @@ void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset han
}
void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
- ManagedRegister m_in_reg) {
+ ManagedRegister m_in_reg) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
CHECK(out_reg.IsXRegister()) << out_reg;
@@ -706,7 +707,7 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
// Increase frame to required size.
DCHECK_ALIGNED(frame_size, kStackAlignment);
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>));
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
IncreaseFrameSize(frame_size);
// Save callee-saves.
@@ -720,13 +721,12 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
DCHECK(core_reg_list.IncludesAliasOf(reg_x(ETR)));
___ Mov(reg_x(ETR), reg_x(TR));
- // Write StackReference<Method>.
+ // Write ArtMethod*
DCHECK(X0 == method_reg.AsArm64().AsXRegister());
- DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
- StoreWToOffset(StoreOperandType::kStoreWord, W0, SP, 0);
+ StoreToOffset(X0, SP, 0);
// Write out entry spills
- int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
+ int32_t offset = frame_size + kArm64PointerSize;
for (size_t i = 0; i < entry_spills.size(); ++i) {
Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
if (reg.IsNoRegister()) {
@@ -768,7 +768,7 @@ void Arm64Assembler::RemoveFrame(size_t frame_size,
// For now we only check that the size of the frame is large enough to hold spills and method
// reference.
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>));
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
DCHECK_ALIGNED(frame_size, kStackAlignment);
// Note: This is specific to JNI method frame.
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index b1b66ed49a..fa9faed66b 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -113,8 +113,9 @@ class Arm64Assembler FINAL : public Assembler {
// Load routines.
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 2e3a47bb91..672e1503be 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -434,8 +434,10 @@ class Assembler {
virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size);
virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size);
- virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
- virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) = 0;
+ virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
+ // If poison_reference is true and kPoisonReference is true, then we negate the read reference.
+ virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) = 0;
virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
diff --git a/compiler/utils/dex_cache_arrays_layout-inl.h b/compiler/utils/dex_cache_arrays_layout-inl.h
index a71eeceafb..fec981a03c 100644
--- a/compiler/utils/dex_cache_arrays_layout-inl.h
+++ b/compiler/utils/dex_cache_arrays_layout-inl.h
@@ -25,12 +25,6 @@
#include "mirror/array-inl.h"
#include "primitive.h"
-namespace mirror {
-class ArtMethod;
-class Class;
-class String;
-} // namespace mirror
-
namespace art {
inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
@@ -40,7 +34,7 @@ inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const Dex
strings_offset_(methods_offset_ + MethodsSize(dex_file->NumMethodIds())),
fields_offset_(strings_offset_ + StringsSize(dex_file->NumStringIds())),
size_(fields_offset_ + FieldsSize(dex_file->NumFieldIds())) {
- DCHECK(pointer_size == 4u || pointer_size == 8u);
+ DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
}
inline size_t DexCacheArraysLayout::TypeOffset(uint32_t type_idx) const {
@@ -52,12 +46,11 @@ inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
}
inline size_t DexCacheArraysLayout::MethodOffset(uint32_t method_idx) const {
- return methods_offset_ + ElementOffset(
- sizeof(mirror::HeapReference<mirror::ArtMethod>), method_idx);
+ return methods_offset_ + ElementOffset(pointer_size_, method_idx);
}
inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const {
- return ArraySize(sizeof(mirror::HeapReference<mirror::ArtMethod>), num_elements);
+ return ArraySize(pointer_size_, num_elements);
}
inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const {
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index e769489479..e55b461127 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -696,13 +696,13 @@ void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value());
}
-void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
- MemberOffset offs) {
+void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) {
MipsManagedRegister dest = mdest.AsMips();
CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
base.AsMips().AsCoreRegister(), offs.Int32Value());
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
Subu(dest.AsCoreRegister(), ZERO, dest.AsCoreRegister());
}
}
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 34713e1305..7b0fc39d17 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -189,9 +189,10 @@ class MipsAssembler FINAL : public Assembler {
void LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index b95e436897..a8b55d1097 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -601,10 +601,10 @@ void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
}
// Write out Method*.
- StoreToOffset(kStoreWord, method_reg.AsMips64().AsGpuRegister(), SP, 0);
+ StoreToOffset(kStoreDoubleword, method_reg.AsMips64().AsGpuRegister(), SP, 0);
// Write out entry spills.
- int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
+ int32_t offset = frame_size + kFramePointerSize;
for (size_t i = 0; i < entry_spills.size(); ++i) {
Mips64ManagedRegister reg = entry_spills.at(i).AsMips64();
ManagedRegisterSpill spill = entry_spills.at(i);
@@ -750,12 +750,13 @@ void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), SP, src.Int32Value());
}
-void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) {
+void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) {
Mips64ManagedRegister dest = mdest.AsMips64();
CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(),
base.AsMips64().AsGpuRegister(), offs.Int32Value());
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
Subu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
}
}
@@ -1004,7 +1005,7 @@ void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscr
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
// Call *(*(SP + base) + offset)
- LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(),
+ LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
SP, base.Int32Value());
LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
scratch.AsGpuRegister(), offset.Int32Value());
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 95ba967646..38419abbac 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -188,9 +188,10 @@ class Mips64Assembler FINAL : public Assembler {
void LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 7e7520066d..390d46ede4 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1724,9 +1724,9 @@ void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
}
// return address then method on stack.
- int32_t adjust = frame_size - (gpr_count * kFramePointerSize) -
- sizeof(StackReference<mirror::ArtMethod>) /*method*/ -
- kFramePointerSize /*return address*/;
+ int32_t adjust = frame_size - gpr_count * kFramePointerSize -
+ kFramePointerSize /*method*/ -
+ kFramePointerSize /*return address*/;
addl(ESP, Immediate(-adjust));
cfi_.AdjustCFAOffset(adjust);
pushl(method_reg.AsX86().AsCpuRegister());
@@ -1750,12 +1750,11 @@ void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
}
}
-void X86Assembler::RemoveFrame(size_t frame_size,
- const std::vector<ManagedRegister>& spill_regs) {
+void X86Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& spill_regs) {
CHECK_ALIGNED(frame_size, kStackAlignment);
cfi_.RememberState();
- int adjust = frame_size - (spill_regs.size() * kFramePointerSize) -
- sizeof(StackReference<mirror::ArtMethod>);
+ // -kFramePointerSize for ArtMethod*.
+ int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
addl(ESP, Immediate(adjust));
cfi_.AdjustCFAOffset(-adjust);
for (size_t i = 0; i < spill_regs.size(); ++i) {
@@ -1904,18 +1903,18 @@ void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src,
}
}
-void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
X86ManagedRegister dest = mdest.AsX86();
CHECK(dest.IsCpuRegister());
movl(dest.AsCpuRegister(), Address(ESP, src));
}
-void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
- MemberOffset offs) {
+void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) {
X86ManagedRegister dest = mdest.AsX86();
CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
negl(dest.AsCpuRegister());
}
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 5319dacab7..1c1c023711 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -538,9 +538,10 @@ class X86Assembler FINAL : public Assembler {
void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index feceecac68..ac95c7127a 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2388,9 +2388,9 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
}
}
- DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
+ DCHECK_EQ(kX86_64PointerSize, kFramePointerSize);
- movl(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
+ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
for (size_t i = 0; i < entry_spills.size(); ++i) {
ManagedRegisterSpill spill = entry_spills.at(i);
@@ -2590,18 +2590,18 @@ void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> sr
}
}
-void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
X86_64ManagedRegister dest = mdest.AsX86_64();
CHECK(dest.IsCpuRegister());
movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
}
-void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
- MemberOffset offs) {
+void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) {
X86_64ManagedRegister dest = mdest.AsX86_64();
CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
negl(dest.AsCpuRegister());
}
}
@@ -2667,8 +2667,7 @@ void X86_64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t s
}
}
-void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch) {
+void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
CHECK(scratch.IsCpuRegister());
movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
@@ -2693,9 +2692,8 @@ void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs,
gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
}
-void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch,
- size_t size) {
+void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch,
+ size_t size) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
if (scratch.IsCpuRegister() && size == 8) {
Load(scratch, src, 4);
@@ -2834,7 +2832,7 @@ void X86_64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister
void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- movl(scratch, Address(CpuRegister(RSP), base));
+ movq(scratch, Address(CpuRegister(RSP), base));
call(Address(scratch, offset));
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 7daf994900..6b2b65d6c1 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -668,7 +668,8 @@ class X86_64Assembler FINAL : public Assembler {
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index dcffe35113..b86bc85489 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1127,7 +1127,7 @@ std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBU
ssize_t displacement = static_cast<ssize_t>(frame_size) - (spill_regs.size() * 8 + 8);
str << "subq $" << displacement << ", %rsp\n";
// 3) Store method reference.
- str << "movl %edi, (%rsp)\n";
+ str << "movq %rdi, (%rsp)\n";
// 4) Entry spills.
str << "movq %rax, " << frame_size + 0 << "(%rsp)\n";
str << "movq %rbx, " << frame_size + 8 << "(%rsp)\n";