summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-11-10 11:08:06 -0800
committerMathieu Chartier <mathieuc@google.com>2014-11-18 12:27:37 -0800
commit2d7210188805292e463be4bcf7a133b654d7e0ea (patch)
tree7705a3bf841ae44b2396728fa22ed0b5dcb44dbf
parente0491682d101c69bf88c3c24a965312129cbfa38 (diff)
downloadart-2d7210188805292e463be4bcf7a133b654d7e0ea.tar.gz
art-2d7210188805292e463be4bcf7a133b654d7e0ea.tar.bz2
art-2d7210188805292e463be4bcf7a133b654d7e0ea.zip
Change 64 bit ArtMethod fields to be pointer sized
Changed the 64 bit entrypoint and gc map fields in ArtMethod to be pointer sized. This saves a large amount of memory on 32 bit systems. Reduces ArtMethod size by 16 bytes on 32 bit. Total number of ArtMethod on low memory mako: 169957 Image size: 49203 methods -> 787248 image size reduction. Zygote space size: 1070 methods -> 17120 size reduction. App methods: ~120k -> 2 MB savings. Savings per app on low memory mako: 125K+ per app (less active apps -> more image methods per app). Savings depend on how often the shared methods are on dirty pages vs shared. TODO in another CL, delete gc map field from ArtMethod since we should be able to get it from the Oat method header. Bug: 17643507 Change-Id: Ie9508f05907a9f693882d4d32a564460bf273ee8 (cherry picked from commit e832e64a7e82d7f72aedbd7d798fb929d458ee8f)
-rw-r--r--compiler/compilers.cc159
-rw-r--r--compiler/dex/quick/arm/call_arm.cc4
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc4
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc5
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc3
-rw-r--r--compiler/image_writer.cc75
-rw-r--r--compiler/image_writer.h6
-rw-r--r--compiler/jni/quick/jni_compiler.cc4
-rw-r--r--compiler/oat_writer.cc5
-rw-r--r--compiler/optimizing/code_generator_arm.cc9
-rw-r--r--compiler/optimizing/code_generator_arm64.cc5
-rw-r--r--compiler/optimizing/code_generator_x86.cc7
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc9
-rw-r--r--patchoat/patchoat.cc35
-rw-r--r--patchoat/patchoat.h19
-rw-r--r--runtime/arch/arm/portable_entrypoints_arm.S2
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S2
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S2
-rw-r--r--runtime/arch/instruction_set.h4
-rw-r--r--runtime/arch/mips/portable_entrypoints_mips.S2
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S2
-rw-r--r--runtime/arch/x86/portable_entrypoints_x86.S2
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S2
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S4
-rw-r--r--runtime/asm_support.h20
-rw-r--r--runtime/class_linker.cc44
-rw-r--r--runtime/class_linker.h3
-rw-r--r--runtime/class_linker_test.cc5
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc2
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/interpreter/interpreter.cc37
-rw-r--r--runtime/mirror/art_method-inl.h12
-rw-r--r--runtime/mirror/art_method.cc2
-rw-r--r--runtime/mirror/art_method.h214
-rw-r--r--runtime/mirror/class.h7
-rw-r--r--runtime/mirror/object-inl.h4
-rw-r--r--runtime/mirror/object.h45
-rw-r--r--runtime/mirror/object_test.cc8
-rw-r--r--runtime/native_bridge_art_interface.cc4
39 files changed, 583 insertions, 197 deletions
diff --git a/compiler/compilers.cc b/compiler/compilers.cc
new file mode 100644
index 0000000000..2481128e4d
--- /dev/null
+++ b/compiler/compilers.cc
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compilers.h"
+
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir.h"
+#include "elf_writer_quick.h"
+#include "mirror/art_method-inl.h"
+
+namespace art {
+
+extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver* driver);
+extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver* driver);
+extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver* driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file);
+
+extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver* driver,
+ uint32_t access_flags, uint32_t method_idx,
+ const art::DexFile& dex_file);
+
+// Hack for CFI CIE initialization
+extern std::vector<uint8_t>* X86CFIInitialization(bool is_x86_64);
+
+void QuickCompiler::Init() const {
+ ArtInitQuickCompilerContext(GetCompilerDriver());
+}
+
+void QuickCompiler::UnInit() const {
+ ArtUnInitQuickCompilerContext(GetCompilerDriver());
+}
+
+CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompileWithSeaIR(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+ if (method != nullptr) {
+ return method;
+ }
+
+ return ArtQuickCompileMethod(GetCompilerDriver(),
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+}
+
+CompiledMethod* QuickCompiler::JniCompile(uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) const {
+ return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
+}
+
+uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
+ size_t pointer_size = InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet());
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
+ pointer_size));
+}
+
+bool QuickCompiler::WriteElf(art::File* file,
+ OatWriter* oat_writer,
+ const std::vector<const art::DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host) const {
+ return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host,
+ *GetCompilerDriver());
+}
+
+Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+ Mir2Lir* mir_to_lir = nullptr;
+ switch (cu->instruction_set) {
+ case kThumb2:
+ mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ case kArm64:
+ mir_to_lir = Arm64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ case kMips:
+ mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ case kX86:
+ // Fall-through.
+ case kX86_64:
+ mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
+ }
+
+ /* The number of compiler temporaries depends on backend so set it up now if possible */
+ if (mir_to_lir) {
+ size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
+ bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
+ CHECK(set_max);
+ }
+ return mir_to_lir;
+}
+
+std::vector<uint8_t>* QuickCompiler::GetCallFrameInformationInitialization(
+ const CompilerDriver& driver) const {
+ if (driver.GetInstructionSet() == kX86) {
+ return X86CFIInitialization(false);
+ }
+ if (driver.GetInstructionSet() == kX86_64) {
+ return X86CFIInitialization(true);
+ }
+ return nullptr;
+}
+
+CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, class_loader, dex_file);
+ if (method != nullptr) {
+ return method;
+ }
+
+ return QuickCompiler::Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ class_loader, dex_file);
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index b4eebb320e..a3b4df3c7e 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -536,8 +536,8 @@ static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSE
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
- cg->TargetPtrReg(kInvokeTgt));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
default:
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 106996ea44..3e5b7bfe0a 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -473,8 +473,8 @@ static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
- cg->TargetPtrReg(kInvokeTgt));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
default:
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 4cb12f1dc9..a7900ae7df 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -476,9 +476,10 @@ static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
const CompilationUnit* cu, Mir2Lir* cg) {
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
+ int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ InstructionSetPointerSize(cu->instruction_set)).Int32Value();
// Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
- cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
+ cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
cg->TargetPtrReg(kInvokeTgt));
return true;
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index db2f272436..f5f71132e4 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1006,7 +1006,8 @@ LIR* X86Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType());
} else {
call_insn = OpMem(kOpBlx, TargetReg(kArg0, kRef),
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ cu_->target64 ? 8 : 4).Int32Value());
}
} else {
call_insn = GenInvokeNoInlineCall(this, method_info.GetSharpType());
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 6096625aa1..64d2de1b45 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -68,6 +68,7 @@ using ::art::mirror::String;
namespace art {
bool ImageWriter::PrepareImageAddressSpace() {
+ target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
{
Thread::Current()->TransitionFromSuspendedToRunnable();
PruneNonImageClasses(); // Remove junk
@@ -214,7 +215,14 @@ void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) {
void ImageWriter::AssignImageOffset(mirror::Object* object) {
DCHECK(object != nullptr);
SetImageOffset(object, image_end_);
- image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment
+ size_t object_size;
+ if (object->IsArtMethod()) {
+ // Methods are sized based on the target pointer size.
+ object_size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
+ } else {
+ object_size = object->SizeOf();
+ }
+ image_end_ += RoundUp(object_size, 8); // 64-bit alignment
DCHECK_LT(image_end_, image_->Size());
}
@@ -754,7 +762,14 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
size_t offset = image_writer->GetImageOffset(obj);
uint8_t* dst = image_writer->image_->Begin() + offset;
const uint8_t* src = reinterpret_cast<const uint8_t*>(obj);
- size_t n = obj->SizeOf();
+ size_t n;
+ if (obj->IsArtMethod()) {
+ // Size without pointer fields since we don't want to overrun the buffer if target art method
+ // is 32 bits but source is 64 bits.
+ n = mirror::ArtMethod::SizeWithoutPointerFields();
+ } else {
+ n = obj->SizeOf();
+ }
DCHECK_LT(offset + n, image_writer->image_->Size());
memcpy(dst, src, n);
Object* copy = reinterpret_cast<Object*>(dst);
@@ -834,6 +849,10 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
}
if (orig->IsArtMethod<kVerifyNone>()) {
FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy));
+ } else if (orig->IsClass() && orig->AsClass()->IsArtMethodClass()) {
+ // Set the right size for the target.
+ size_t size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
+ down_cast<mirror::Class*>(copy)->SetObjectSizeWithoutChecks(size);
}
}
@@ -892,29 +911,48 @@ const uint8_t* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
// oat_begin_
+ // For 64 bit targets we need to repack the current runtime pointer sized fields to the right
+ // locations.
+ // Copy all of the fields from the runtime methods to the target methods first since we did a
+ // bytewise copy earlier.
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ orig->GetEntryPointFromPortableCompiledCode(), target_ptr_size_);
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(orig->GetEntryPointFromInterpreter(),
+ target_ptr_size_);
+ copy->SetEntryPointFromJniPtrSize<kVerifyNone>(orig->GetEntryPointFromJni(), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ orig->GetEntryPointFromQuickCompiledCode(), target_ptr_size_);
+ copy->SetNativeGcMapPtrSize<kVerifyNone>(orig->GetNativeGcMap(), target_ptr_size_);
// The resolution method has a special trampoline to call.
Runtime* runtime = Runtime::Current();
if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_resolution_trampoline_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_resolution_trampoline_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_);
} else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
orig == runtime->GetImtUnimplementedMethod())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_imt_conflict_trampoline_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_imt_conflict_trampoline_offset_));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_imt_conflict_trampoline_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_);
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
if (UNLIKELY(orig->IsAbstract())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_to_interpreter_bridge_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
- copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*>
- (const_cast<uint8_t*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_to_interpreter_bridge_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_);
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+ reinterpret_cast<EntryPointFromInterpreter*>(const_cast<uint8_t*>(
+ GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_);
} else {
bool quick_is_interpreted;
const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted);
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(quick_code, target_ptr_size_);
// Portable entrypoint:
const uint8_t* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
@@ -937,18 +975,19 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// initialization.
portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
}
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(portable_code);
-
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ portable_code, target_ptr_size_);
// JNI entrypoint:
if (orig->IsNative()) {
// The native method's pointer is set to a stub to lookup via dlsym.
// Note this is not the code_ pointer, that is handled above.
- copy->SetNativeMethod<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_));
+ copy->SetEntryPointFromJniPtrSize<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_),
+ target_ptr_size_);
} else {
// Normal (non-abstract non-native) methods have various tables to relocate.
uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
const uint8_t* native_gc_map = GetOatAddress(native_gc_map_offset);
- copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map));
+ copy->SetNativeGcMapPtrSize<kVerifyNone>(native_gc_map, target_ptr_size_);
}
// Interpreter entrypoint:
@@ -956,9 +995,11 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted)
? interpreter_to_interpreter_bridge_offset_
: interpreter_to_compiled_code_bridge_offset_;
- copy->SetEntryPointFromInterpreter<kVerifyNone>(
+ EntryPointFromInterpreter* interpreter_entrypoint =
reinterpret_cast<EntryPointFromInterpreter*>(
- const_cast<uint8_t*>(GetOatAddress(interpreter_code))));
+ const_cast<uint8_t*>(GetOatAddress(interpreter_code)));
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+ interpreter_entrypoint, target_ptr_size_);
}
}
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 12171451ae..2fec0aa82d 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -47,7 +47,8 @@ class ImageWriter FINAL {
portable_imt_conflict_trampoline_offset_(0), portable_resolution_trampoline_offset_(0),
portable_to_interpreter_bridge_offset_(0), quick_generic_jni_trampoline_offset_(0),
quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0),
- quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic) {
+ quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
+ target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())) {
CHECK_NE(image_begin, 0U);
}
@@ -224,6 +225,9 @@ class ImageWriter FINAL {
uint32_t quick_to_interpreter_bridge_offset_;
const bool compile_pic_;
+ // Size of pointers on the target architecture.
+ size_t target_ptr_size_;
+
friend class FixupVisitor;
friend class FixupClassVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 3c3aa02502..c3fe75b3f1 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -307,7 +307,9 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
}
// 9. Plant call to native code associated with method.
- __ Call(main_jni_conv->MethodStackOffset(), mirror::ArtMethod::NativeMethodOffset(),
+ MemberOffset jni_entrypoint_offset = mirror::ArtMethod::EntryPointFromJniOffset(
+ InstructionSetPointerSize(instruction_set));
+ __ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset,
mr_conv->InterproceduralScratchRegister());
// 10. Fix differences in result widths.
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 659c3328fc..c6beb36178 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -309,7 +309,7 @@ class OatWriter::Thumb2RelativeCallPatcher FINAL : public ArmBaseRelativeCallPat
arm::Thumb2Assembler assembler;
assembler.LoadFromOffset(
arm::kLoadWord, arm::PC, arm::R0,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
assembler.bkpt(0);
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
@@ -363,7 +363,8 @@ class OatWriter::Arm64RelativeCallPatcher FINAL : public ArmBaseRelativeCallPatc
// The thunk just uses the entry point in the ArtMethod. This works even for calls
// to the generic JNI and interpreter trampolines.
arm64::Arm64Assembler assembler;
- Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value());
assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 39543a24a0..dc0a829f65 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1188,7 +1188,8 @@ void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache()));
// LR = temp[offset_of_quick_compiled_code]
__ LoadFromOffset(kLoadWord, LR, temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value());
// LR()
__ blx(LR);
@@ -1229,7 +1230,8 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
__ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
}
// temp = temp->GetMethodAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
@@ -1265,7 +1267,8 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
__ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
}
// temp = temp->GetImtEntryAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index fc9bdba52b..42eaf490f9 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1449,7 +1449,8 @@ void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
__ Ldr(temp, MemOperand(temp.X(), index_in_cache));
// lr = temp->entry_point_from_quick_compiled_code_;
__ Ldr(lr, MemOperand(temp.X(),
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).SizeValue()));
// lr();
__ Blr(lr);
@@ -1464,7 +1465,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset();
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 36fe063109..b0f36ce0b4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1120,7 +1120,7 @@ void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
// temp = temp[index_in_cache]
__ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1184,7 +1184,7 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1219,7 +1219,8 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86PointerSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 29479a2105..e9c67e3e6d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1112,7 +1112,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
// temp = temp[index_in_cache]
__ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64PointerSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1171,7 +1172,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64PointerSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1206,7 +1208,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64PointerSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 6b6d11e96c..281649e071 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -175,7 +175,7 @@ bool PatchOat::Patch(const std::string& image_location, off_t delta,
}
gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
- PatchOat p(image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
+ PatchOat p(isa, image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
delta, timings);
t.NewTiming("Patching files");
if (!p.PatchImage()) {
@@ -297,7 +297,7 @@ bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t d
CHECK(is_oat_pic == NOT_PIC);
}
- PatchOat p(elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
+ PatchOat p(isa, elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
delta, timings);
t.NewTiming("Patching files");
if (!skip_patching_oat && !p.PatchElf()) {
@@ -532,39 +532,44 @@ void PatchOat::VisitObject(mirror::Object* object) {
PatchOat::PatchVisitor visitor(this, copy);
object->VisitReferences<true, kVerifyNone>(visitor, visitor);
if (object->IsArtMethod<kVerifyNone>()) {
- FixupMethod(static_cast<mirror::ArtMethod*>(object),
- static_cast<mirror::ArtMethod*>(copy));
+ FixupMethod(down_cast<mirror::ArtMethod*>(object), down_cast<mirror::ArtMethod*>(copy));
}
}
void PatchOat::FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) {
+ const size_t pointer_size = InstructionSetPointerSize(isa_);
// Just update the entry points if it looks like we should.
// TODO: sanity check all the pointers' values
uintptr_t portable = reinterpret_cast<uintptr_t>(
- object->GetEntryPointFromPortableCompiledCode<kVerifyNone>());
+ object->GetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(pointer_size));
if (portable != 0) {
- copy->SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(portable + delta_));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize(reinterpret_cast<void*>(portable + delta_),
+ pointer_size);
}
uintptr_t quick= reinterpret_cast<uintptr_t>(
- object->GetEntryPointFromQuickCompiledCode<kVerifyNone>());
+ object->GetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(pointer_size));
if (quick != 0) {
- copy->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(quick + delta_));
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(reinterpret_cast<void*>(quick + delta_),
+ pointer_size);
}
uintptr_t interpreter = reinterpret_cast<uintptr_t>(
- object->GetEntryPointFromInterpreter<kVerifyNone>());
+ object->GetEntryPointFromInterpreterPtrSize<kVerifyNone>(pointer_size));
if (interpreter != 0) {
- copy->SetEntryPointFromInterpreter(
- reinterpret_cast<mirror::EntryPointFromInterpreter*>(interpreter + delta_));
+ copy->SetEntryPointFromInterpreterPtrSize(
+ reinterpret_cast<mirror::EntryPointFromInterpreter*>(interpreter + delta_), pointer_size);
}
- uintptr_t native_method = reinterpret_cast<uintptr_t>(object->GetNativeMethod());
+ uintptr_t native_method = reinterpret_cast<uintptr_t>(
+ object->GetEntryPointFromJniPtrSize(pointer_size));
if (native_method != 0) {
- copy->SetNativeMethod(reinterpret_cast<void*>(native_method + delta_));
+ copy->SetEntryPointFromJniPtrSize(reinterpret_cast<void*>(native_method + delta_),
+ pointer_size);
}
- uintptr_t native_gc_map = reinterpret_cast<uintptr_t>(object->GetNativeGcMap());
+ uintptr_t native_gc_map = reinterpret_cast<uintptr_t>(
+ object->GetNativeGcMapPtrSize(pointer_size));
if (native_gc_map != 0) {
- copy->SetNativeGcMap(reinterpret_cast<uint8_t*>(native_gc_map + delta_));
+ copy->SetNativeGcMapPtrSize(reinterpret_cast<uint8_t*>(native_gc_map + delta_), pointer_size);
}
}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 5a3545bb7b..578df3aef7 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -61,15 +61,16 @@ class PatchOat {
// Takes ownership only of the ElfFile. All other pointers are only borrowed.
PatchOat(ElfFile* oat_file, off_t delta, TimingLogger* timings)
: oat_file_(oat_file), image_(nullptr), bitmap_(nullptr), heap_(nullptr), delta_(delta),
- timings_(timings) {}
- PatchOat(MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
+ isa_(kNone), timings_(timings) {}
+ PatchOat(InstructionSet isa, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
MemMap* heap, off_t delta, TimingLogger* timings)
: image_(image), bitmap_(bitmap), heap_(heap),
- delta_(delta), timings_(timings) {}
- PatchOat(ElfFile* oat_file, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
- MemMap* heap, off_t delta, TimingLogger* timings)
+ delta_(delta), isa_(isa), timings_(timings) {}
+ PatchOat(InstructionSet isa, ElfFile* oat_file, MemMap* image,
+ gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
+ TimingLogger* timings)
: oat_file_(oat_file), image_(image), bitmap_(bitmap), heap_(heap),
- delta_(delta), timings_(timings) {}
+ delta_(delta), isa_(isa), timings_(timings) {}
~PatchOat() {}
// Was the .art image at image_path made with --compile-pic ?
@@ -156,8 +157,10 @@ class PatchOat {
const MemMap* const heap_;
// The amount we are changing the offset by.
const off_t delta_;
- // Timing splits.
- TimingLogger* const timings_;
+ // Active instruction set, used to know the entrypoint size.
+ const InstructionSet isa_;
+
+ TimingLogger* timings_;
DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
};
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
index d37e7604eb..89ac1f7a03 100644
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -53,7 +53,7 @@ ENTRY art_portable_invoke_stub
mov ip, #0 @ set ip to 0
str ip, [sp] @ store NULL for method* at bottom of frame
add sp, #16 @ first 4 args are not passed on stack for portable
- ldr ip, [r0, #MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET] @ get pointer to the code
+ ldr ip, [r0, #MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32] @ get pointer to the code
blx ip @ call the method
mov sp, r11 @ restore the stack pointer
ldr ip, [sp, #24] @ load the result pointer
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 632b41435f..1782db50b9 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -422,7 +422,7 @@ ENTRY art_quick_invoke_stub_internal
mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
#endif
- ldr ip, [r0, #MIRROR_ART_METHOD_QUICK_CODE_OFFSET] @ get pointer to the code
+ ldr ip, [r0, #MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32] @ get pointer to the code
blx ip @ call the method
mov sp, r11 @ restore the stack pointer
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 147d4348be..44159354ab 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -564,7 +564,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
.macro INVOKE_STUB_CALL_AND_RETURN
// load method-> METHOD_QUICK_CODE_OFFSET
- ldr x9, [x0 , #MIRROR_ART_METHOD_QUICK_CODE_OFFSET]
+ ldr x9, [x0 , #MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64]
// Branch to method.
blr x9
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 46622eb37d..e413880094 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -127,6 +127,10 @@ static inline bool Is64BitInstructionSet(InstructionSet isa) {
}
}
+static inline size_t InstructionSetPointerSize(InstructionSet isa) {
+ return Is64BitInstructionSet(isa) ? 8U : 4U;
+}
+
static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
switch (isa) {
case kArm:
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
index d7e7a8e96b..8d418e8dd1 100644
--- a/runtime/arch/mips/portable_entrypoints_mips.S
+++ b/runtime/arch/mips/portable_entrypoints_mips.S
@@ -98,7 +98,7 @@ ENTRY art_portable_invoke_stub
lw $a1, 4($sp) # copy arg value for a1
lw $a2, 8($sp) # copy arg value for a2
lw $a3, 12($sp) # copy arg value for a3
- lw $t9, MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET($a0) # get pointer to the code
+ lw $t9, MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
sw $zero, 0($sp) # store NULL for method* at bottom of frame
move $sp, $fp # restore the stack
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index e878ef7186..482485747a 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -512,7 +512,7 @@ ENTRY art_quick_invoke_stub
lw $a1, 4($sp) # copy arg value for a1
lw $a2, 8($sp) # copy arg value for a2
lw $a3, 12($sp) # copy arg value for a3
- lw $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET($a0) # get pointer to the code
+ lw $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
sw $zero, 0($sp) # store NULL for method* at bottom of frame
move $sp, $fp # restore the stack
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index a7c4124a47..1f0900e86d 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -46,7 +46,7 @@ DEFINE_FUNCTION art_portable_invoke_stub
addl LITERAL(12), %esp // pop arguments to memcpy
mov 12(%ebp), %eax // move method pointer into eax
mov %eax, (%esp) // push method pointer onto stack
- call *MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET(%eax) // call the method
+ call *MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
POP ebx // pop ebx
POP ebp // pop ebp
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 2a7ef570a5..1ce01c46f1 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -316,7 +316,7 @@ DEFINE_FUNCTION art_quick_invoke_stub
mov 4(%esp), %ecx // copy arg1 into ecx
mov 8(%esp), %edx // copy arg2 into edx
mov 12(%esp), %ebx // copy arg3 into ebx
- call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET(%eax) // call the method
+ call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
CFI_DEF_CFA_REGISTER(esp)
POP ebx // pop ebx
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 9d3a8cc713..a80e7d2989 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -518,7 +518,7 @@ DEFINE_FUNCTION art_quick_invoke_stub
LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished
.Lgpr_setup_finished:
- call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET(%rdi) // Call the method.
+ call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
CFI_DEF_CFA_REGISTER(rsp)
POP r9 // Pop r9 - shorty*.
@@ -601,7 +601,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished2
.Lgpr_setup_finished2:
- call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET(%rdi) // Call the method.
+ call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
CFI_DEF_CFA_REGISTER(rsp)
POP r9 // Pop r9 - shorty*.
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 26df045a25..4b4c8855d4 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -148,13 +148,21 @@ ADD_TEST_EQ(MIRROR_STRING_OFFSET_OFFSET, art::mirror::String::OffsetOffset().Int
ADD_TEST_EQ(MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET,
art::mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())
-#define MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET (32 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET,
- art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset().Int32Value())
+#define MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32 (48 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32,
+ art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset(4).Int32Value())
-#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET (40 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET,
- art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value())
+#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32 (40 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
+ art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value())
+
+#define MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64 (64 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64,
+ art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset(8).Int32Value())
+
+#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64 (48 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64,
+ art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value())
#if defined(__cplusplus)
} // End of CheckAsmSupportOffsets.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index aaf2da7fcd..68e20f2763 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -243,7 +243,8 @@ ClassLinker::ClassLinker(InternTable* intern_table)
portable_imt_conflict_trampoline_(nullptr),
quick_imt_conflict_trampoline_(nullptr),
quick_generic_jni_trampoline_(nullptr),
- quick_to_interpreter_bridge_trampoline_(nullptr) {
+ quick_to_interpreter_bridge_trampoline_(nullptr),
+ image_pointer_size_(sizeof(void*)) {
memset(find_array_class_cache_, 0, kFindArrayCacheSize * sizeof(mirror::Class*));
}
@@ -378,10 +379,9 @@ void ClassLinker::InitWithoutImage(const std::vector<const DexFile*>& boot_class
Handle<mirror::Class> java_lang_reflect_ArtMethod(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(), mirror::ArtMethod::ClassSize())));
CHECK(java_lang_reflect_ArtMethod.Get() != nullptr);
- java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize());
+ java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize(sizeof(void*)));
SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get());
java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusResolved, self);
-
mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.Get());
// Set up array classes for string, field, method
@@ -407,8 +407,7 @@ void ClassLinker::InitWithoutImage(const std::vector<const DexFile*>& boot_class
// DexCache instances. Needs to be after String, Field, Method arrays since AllocDexCache uses
// these roots.
CHECK_NE(0U, boot_class_path.size());
- for (size_t i = 0; i != boot_class_path.size(); ++i) {
- const DexFile* dex_file = boot_class_path[i];
+ for (const DexFile* dex_file : boot_class_path) {
CHECK(dex_file != nullptr);
AppendToBootClassPath(self, *dex_file);
}
@@ -1682,6 +1681,20 @@ void ClassLinker::InitFromImage() {
// Set classes on AbstractMethod early so that IsMethod tests can be performed during the live
// bitmap walk.
mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod));
+ size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize();
+ if (!Runtime::Current()->IsCompiler()) {
+ // Compiler supports having an image with a different pointer size than the runtime. This
+ // happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may
+ // also use 32 bit dex2oat on a system with 64 bit apps.
+ CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(sizeof(void*)))
+ << sizeof(void*);
+ }
+ if (art_method_object_size == mirror::ArtMethod::InstanceSize(4)) {
+ image_pointer_size_ = 4;
+ } else {
+ CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(8));
+ image_pointer_size_ = 8;
+ }
// Set entry point to interpreter if in InterpretOnly mode.
if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
@@ -1695,7 +1708,7 @@ void ClassLinker::InitFromImage() {
// reinit array_iftable_ from any array class instance, they should be ==
array_iftable_ = GcRoot<mirror::IfTable>(GetClassRoot(kObjectArrayClass)->GetIfTable());
- DCHECK(array_iftable_.Read() == GetClassRoot(kBooleanArrayClass)->GetIfTable());
+ DCHECK_EQ(array_iftable_.Read(), GetClassRoot(kBooleanArrayClass)->GetIfTable());
// String class root was set above
mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField));
@@ -5312,14 +5325,18 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_
} else {
klass->SetNumReferenceInstanceFields(num_reference_fields);
if (!klass->IsVariableSize()) {
- std::string temp;
- DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
- size_t previous_size = klass->GetObjectSize();
- if (previous_size != 0) {
- // Make sure that we didn't originally have an incorrect size.
- CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
+ if (klass->DescriptorEquals("Ljava/lang/reflect/ArtMethod;")) {
+ klass->SetObjectSize(mirror::ArtMethod::InstanceSize(sizeof(void*)));
+ } else {
+ std::string temp;
+ DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
+ size_t previous_size = klass->GetObjectSize();
+ if (previous_size != 0) {
+ // Make sure that we didn't originally have an incorrect size.
+ CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
+ }
+ klass->SetObjectSize(size);
}
- klass->SetObjectSize(size);
}
}
@@ -5372,7 +5389,6 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_
}
CHECK_EQ(current_ref_offset.Uint32Value(), end_ref_offset.Uint32Value());
}
-
return true;
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 385f135192..006354f948 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -805,6 +805,9 @@ class ClassLinker {
const void* quick_generic_jni_trampoline_;
const void* quick_to_interpreter_bridge_trampoline_;
+ // Image pointer size.
+ size_t image_pointer_size_;
+
friend class ImageWriter; // for GetClassRoots
friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index ba5aa3d75a..0c86761979 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -499,11 +499,6 @@ struct ArtMethodOffsets : public CheckOffsets<mirror::ArtMethod> {
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_strings_), "dexCacheStrings"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_interpreter_), "entryPointFromInterpreter"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_jni_), "entryPointFromJni"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_portable_compiled_code_), "entryPointFromPortableCompiledCode"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_quick_compiled_code_), "entryPointFromQuickCompiledCode"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, gc_map_), "gcMap"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex"));
};
};
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index c4bc969038..0b7d382deb 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1659,7 +1659,7 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self,
*(sp32 - 1) = cookie;
// Retrieve the stored native code.
- const void* nativeCode = called->GetNativeMethod();
+ void* nativeCode = called->GetEntryPointFromJni();
// There are two cases for the content of nativeCode:
// 1) Pointer to the native function.
diff --git a/runtime/image.cc b/runtime/image.cc
index aee84bc3c0..b83eeb16d0 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '2', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '3', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 18de133147..b17f3039c6 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -138,7 +138,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
if (method->IsStatic()) {
if (shorty == "L") {
typedef jobject (fntype)(JNIEnv*, jclass);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
jobject jresult;
@@ -149,35 +149,35 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jclass);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get());
} else if (shorty == "Z") {
typedef jboolean (fntype)(JNIEnv*, jclass);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get()));
} else if (shorty == "BI") {
typedef jbyte (fntype)(JNIEnv*, jclass, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetB(fn(soa.Env(), klass.get(), args[0]));
} else if (shorty == "II") {
typedef jint (fntype)(JNIEnv*, jclass, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), args[0]));
} else if (shorty == "LL") {
typedef jobject (fntype)(JNIEnv*, jclass, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -190,14 +190,15 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "IIZ") {
typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), args[0], args[1]));
} else if (shorty == "ILI") {
typedef jint (fntype)(JNIEnv*, jclass, jobject, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(
+ method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -206,21 +207,21 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
} else if (shorty == "SIZ") {
typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetS(fn(soa.Env(), klass.get(), args[0], args[1]));
} else if (shorty == "VIZ") {
typedef void (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], args[1]);
} else if (shorty == "ZLL") {
typedef jboolean (fntype)(JNIEnv*, jclass, jobject, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -231,7 +232,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
} else if (shorty == "ZILL") {
typedef jboolean (fntype)(JNIEnv*, jclass, jint, jobject, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
@@ -242,7 +243,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
} else if (shorty == "VILII") {
typedef void (fntype)(JNIEnv*, jclass, jint, jobject, jint, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
@@ -251,7 +252,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
} else if (shorty == "VLILII") {
typedef void (fntype)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -267,7 +268,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
} else {
if (shorty == "L") {
typedef jobject (fntype)(JNIEnv*, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
jobject jresult;
@@ -278,14 +279,14 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), rcvr.get());
} else if (shorty == "LL") {
typedef jobject (fntype)(JNIEnv*, jobject, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -299,7 +300,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
} else if (shorty == "III") {
typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedThreadStateChange tsc(self, kNative);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 494fa2fca2..62d17abaa2 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -307,12 +307,6 @@ inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) {
return pc - reinterpret_cast<uintptr_t>(code);
}
-template<VerifyObjectFlags kVerifyFlags>
-inline void ArtMethod::SetNativeMethod(const void* native_method) {
- SetFieldPtr<false, true, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method);
-}
-
inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) {
DCHECK(code_pointer != nullptr);
DCHECK_EQ(code_pointer, GetQuickOatCodePointer());
@@ -485,6 +479,12 @@ inline mirror::Class* ArtMethod::GetReturnType(bool resolve) {
return type;
}
+inline void ArtMethod::CheckObjectSizeEqualsMirrorSize() {
+ // Using the default, check the class object size to make sure it matches the size of the
+ // object.
+ DCHECK_EQ(GetClass()->GetObjectSize(), sizeof(*this));
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index a742aaad9a..3b4d5f317d 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -483,7 +483,7 @@ void ArtMethod::RegisterNative(const void* native_method, bool is_fast) {
if (is_fast) {
SetAccessFlags(GetAccessFlags() | kAccFastNative);
}
- SetNativeMethod(native_method);
+ SetEntryPointFromJni(native_method);
}
void ArtMethod::UnregisterNative() {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index d92d00a8cb..4a7831fd62 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -48,11 +48,6 @@ class MANAGED ArtMethod FINAL : public Object {
// Size of java.lang.reflect.ArtMethod.class.
static uint32_t ClassSize();
- // Size of an instance of java.lang.reflect.ArtMethod not including its value array.
- static constexpr uint32_t InstanceSize() {
- return sizeof(ArtMethod);
- }
-
static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -258,49 +253,92 @@ class MANAGED ArtMethod FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
EntryPointFromInterpreter* GetEntryPointFromInterpreter()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<EntryPointFromInterpreter*, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_));
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromInterpreterPtrSize(sizeof(void*));
}
-
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<EntryPointFromInterpreter*, kVerifyFlags>(
+ EntryPointFromInterpreterOffset(pointer_size), pointer_size);
+ }
+
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_),
- entry_point_from_interpreter);
+ CheckObjectSizeEqualsMirrorSize();
+ SetEntryPointFromInterpreterPtrSize(entry_point_from_interpreter, sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetEntryPointFromInterpreterPtrSize(EntryPointFromInterpreter* entry_point_from_interpreter,
+ size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter, pointer_size);
}
- static MemberOffset EntryPointFromPortableCompiledCodeOffset() {
- return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_portable_compiled_code_));
+ ALWAYS_INLINE static MemberOffset EntryPointFromPortableCompiledCodeOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_portable_compiled_code_) / sizeof(void*) * pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- const void* GetEntryPointFromPortableCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<const void*, kVerifyFlags>(
- EntryPointFromPortableCompiledCodeOffset());
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ const void* GetEntryPointFromPortableCompiledCode()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromPortableCompiledCodePtrSize(sizeof(void*));
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE const void* GetEntryPointFromPortableCompiledCodePtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<const void*, kVerifyFlags>(
+ EntryPointFromPortableCompiledCodeOffset(pointer_size), pointer_size);
+ }
+
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromPortableCompiledCode(const void* entry_point_from_portable_compiled_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(
- EntryPointFromPortableCompiledCodeOffset(), entry_point_from_portable_compiled_code);
+ CheckObjectSizeEqualsMirrorSize();
+ return SetEntryPointFromPortableCompiledCodePtrSize(entry_point_from_portable_compiled_code,
+ sizeof(void*));
}
- static MemberOffset EntryPointFromQuickCompiledCodeOffset() {
- return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_quick_compiled_code_));
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetEntryPointFromPortableCompiledCodePtrSize(
+ const void* entry_point_from_portable_compiled_code, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromPortableCompiledCodeOffset(pointer_size),
+ entry_point_from_portable_compiled_code, pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
const void* GetEntryPointFromQuickCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<const void*, kVerifyFlags>(EntryPointFromQuickCompiledCodeOffset());
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<const void*, kVerifyFlags>(
+ EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(
- EntryPointFromQuickCompiledCodeOffset(), entry_point_from_quick_compiled_code);
+ CheckObjectSizeEqualsMirrorSize();
+ SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code,
+ sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize(
+ const void* entry_point_from_quick_compiled_code, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromQuickCompiledCodeOffset(pointer_size), entry_point_from_quick_compiled_code,
+ pointer_size);
}
uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -365,11 +403,23 @@ class MANAGED ArtMethod FINAL : public Object {
CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const uint8_t* GetNativeGcMap() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_));
+ CheckObjectSizeEqualsMirrorSize();
+ return GetNativeGcMapPtrSize(sizeof(void*));
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE const uint8_t* GetNativeGcMapPtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<uint8_t*>(GcMapOffset(pointer_size), pointer_size);
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetNativeGcMap(const uint8_t* data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), data);
+ CheckObjectSizeEqualsMirrorSize();
+ SetNativeGcMapPtrSize(data, sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE void SetNativeGcMapPtrSize(const uint8_t* data, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(GcMapOffset(pointer_size), data,
+ pointer_size);
}
// When building the oat need a convenient place to stuff the offset of the native GC map.
@@ -409,16 +459,46 @@ class MANAGED ArtMethod FINAL : public Object {
void UnregisterNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static MemberOffset NativeMethodOffset() {
- return OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_);
+ static MemberOffset EntryPointFromInterpreterOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_interpreter_) / sizeof(void*) * pointer_size);
}
- const void* GetNativeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<const void*>(NativeMethodOffset());
+ static MemberOffset EntryPointFromJniOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_jni_) / sizeof(void*) * pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetNativeMethod(const void*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static MemberOffset EntryPointFromQuickCompiledCodeOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
+ }
+
+ static MemberOffset GcMapOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, gc_map_) / sizeof(void*) * pointer_size);
+ }
+
+ void* GetEntryPointFromJni() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromJniPtrSize(sizeof(void*));
+ }
+ ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
+ }
+
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckObjectSizeEqualsMirrorSize();
+ SetEntryPointFromJniPtrSize<kVerifyFlags>(entrypoint, sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size);
+ }
static MemberOffset GetMethodIndexOffset() {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
@@ -521,7 +601,16 @@ class MANAGED ArtMethod FINAL : public Object {
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- private:
+ static size_t SizeWithoutPointerFields() {
+ return sizeof(ArtMethod) - sizeof(PtrSizedFields);
+ }
+
+ // Size of an instance of java.lang.reflect.ArtMethod not including its value array.
+ static size_t InstanceSize(size_t pointer_size) {
+ return SizeWithoutPointerFields() + (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
+ }
+
+ protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
HeapReference<Class> declaring_class_;
@@ -535,26 +624,6 @@ class MANAGED ArtMethod FINAL : public Object {
// Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
HeapReference<ObjectArray<String>> dex_cache_strings_;
- // Method dispatch from the interpreter invokes this pointer which may cause a bridge into
- // compiled code.
- uint64_t entry_point_from_interpreter_;
-
- // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
- uint64_t entry_point_from_jni_;
-
- // Method dispatch from portable compiled code invokes this pointer which may cause bridging into
- // quick compiled code or the interpreter.
- uint64_t entry_point_from_portable_compiled_code_;
-
- // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
- // portable compiled code or the interpreter.
- uint64_t entry_point_from_quick_compiled_code_;
-
- // Pointer to a data structure created by the compiler and used by the garbage collector to
- // determine which registers hold live references to objects within the heap. Keyed by native PC
- // offsets for the quick compiler and dex PCs for the portable.
- uint64_t gc_map_;
-
// Access flags; low 16 bits are defined by spec.
uint32_t access_flags_;
@@ -573,15 +642,46 @@ class MANAGED ArtMethod FINAL : public Object {
// ifTable.
uint32_t method_index_;
+ // Add alignment word here if necessary.
+
+ // Must be the last fields in the method.
+ struct PACKED(4) PtrSizedFields {
+ // Method dispatch from the interpreter invokes this pointer which may cause a bridge into
+ // compiled code.
+ void* entry_point_from_interpreter_;
+
+ // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
+ void* entry_point_from_jni_;
+
+ // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
+ // portable compiled code or the interpreter.
+ void* entry_point_from_quick_compiled_code_;
+
+ // Pointer to a data structure created by the compiler and used by the garbage collector to
+ // determine which registers hold live references to objects within the heap. Keyed by native PC
+ // offsets for the quick compiler and dex PCs for the portable.
+ void* gc_map_;
+
+ // Method dispatch from portable compiled code invokes this pointer which may cause bridging
+ // into quick compiled code or the interpreter. Last to simplify entrypoint logic.
+ void* entry_point_from_portable_compiled_code_;
+ } ptr_sized_fields_;
+
static GcRoot<Class> java_lang_reflect_ArtMethod_;
private:
+ ALWAYS_INLINE void CheckObjectSizeEqualsMirrorSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
ALWAYS_INLINE ObjectArray<ArtMethod>* GetDexCacheResolvedMethods()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE ObjectArray<Class>* GetDexCacheResolvedTypes()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static size_t PtrSizedFieldsOffset() {
+ return OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_);
+ }
+
friend struct art::ArtMethodOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
};
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index f45ea8571f..82425b559d 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -528,6 +528,13 @@ class MANAGED Class FINAL : public Object {
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
+ void SetObjectSizeWithoutChecks(uint32_t new_object_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Not called within a transaction.
+ return SetField32<false, false, kVerifyNone>(
+ OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
+ }
+
// Returns true if this class is in the same packages as that class.
bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 4199eef619..121947ddee 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -404,8 +404,7 @@ inline size_t Object::SizeOf() {
}
DCHECK_GE(result, sizeof(Object))
<< " class=" << PrettyTypeOf(GetClass<kNewFlags, kReadBarrierOption>());
- DCHECK(!(IsArtField<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtField));
- DCHECK(!(IsArtMethod<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtMethod));
+ DCHECK(!(IsArtField<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtField));
return result;
}
@@ -962,7 +961,6 @@ inline void Object::VisitReferences(const Visitor& visitor,
}
}
}
-
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 0ce5231b40..221feca6ad 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -392,15 +392,26 @@ class MANAGED LOCKABLE Object {
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
void SetFieldPtr(MemberOffset field_offset, T new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifndef __LP64__
- SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, reinterpret_cast<int32_t>(new_value));
-#else
- SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, reinterpret_cast<int64_t>(new_value));
-#endif
+ SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, new_value, sizeof(void*));
}
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
+ ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value,
+ size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
+ if (pointer_size == 4) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(new_value);
+ DCHECK_EQ(static_cast<int32_t>(ptr), ptr); // Check that we dont lose any non 0 bits.
+ SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, static_cast<int32_t>(ptr));
+ } else {
+ SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, static_cast<int64_t>(reinterpret_cast<intptr_t>(new_value)));
+ }
+ }
// TODO fix thread safety analysis broken by the use of template. This should be
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -413,11 +424,21 @@ class MANAGED LOCKABLE Object {
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr(MemberOffset field_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifndef __LP64__
- return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
-#else
- return reinterpret_cast<T>(GetField64<kVerifyFlags, kIsVolatile>(field_offset));
-#endif
+ return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*));
+ }
+
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+ ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
+ if (pointer_size == 4) {
+ return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
+ } else {
+ int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset);
+ // Check that we dont lose any non 0 bits.
+ DCHECK_EQ(reinterpret_cast<int64_t>(reinterpret_cast<T>(v)), v);
+ return reinterpret_cast<T>(v);
+ }
}
// TODO: Fixme when anotatalysis works with visitors.
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index a0aaa9e8c7..4402031ef1 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -78,6 +78,14 @@ class ObjectTest : public CommonRuntimeTest {
TEST_F(ObjectTest, Constants) {
EXPECT_EQ(kObjectReferenceSize, sizeof(HeapReference<Object>));
EXPECT_EQ(kObjectHeaderSize, sizeof(Object));
+ EXPECT_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32,
+ ArtMethod::EntryPointFromPortableCompiledCodeOffset(4).Int32Value());
+ EXPECT_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64,
+ ArtMethod::EntryPointFromPortableCompiledCodeOffset(8).Int32Value());
+ EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value());
+ EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value());
}
TEST_F(ObjectTest, IsInSamePackage) {
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index c2c6b12037..ffadfc61a7 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -72,7 +72,7 @@ static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* met
if (count < method_count) {
methods[count].name = m->GetName();
methods[count].signature = m->GetShorty();
- methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ methods[count].fnPtr = m->GetEntryPointFromJni();
count++;
} else {
LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
@@ -85,7 +85,7 @@ static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* met
if (count < method_count) {
methods[count].name = m->GetName();
methods[count].signature = m->GetShorty();
- methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ methods[count].fnPtr = m->GetEntryPointFromJni();
count++;
} else {
LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);