summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/compilers.cc159
-rw-r--r--compiler/dex/quick/arm/call_arm.cc4
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc4
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc5
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc3
-rw-r--r--compiler/image_writer.cc75
-rw-r--r--compiler/image_writer.h6
-rw-r--r--compiler/jni/quick/jni_compiler.cc4
-rw-r--r--compiler/oat_writer.cc5
-rw-r--r--compiler/optimizing/code_generator_arm.cc9
-rw-r--r--compiler/optimizing/code_generator_arm64.cc5
-rw-r--r--compiler/optimizing/code_generator_x86.cc7
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc9
13 files changed, 256 insertions, 39 deletions
diff --git a/compiler/compilers.cc b/compiler/compilers.cc
new file mode 100644
index 0000000000..2481128e4d
--- /dev/null
+++ b/compiler/compilers.cc
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compilers.h"
+
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir.h"
+#include "elf_writer_quick.h"
+#include "mirror/art_method-inl.h"
+
+namespace art {
+
+extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver* driver);
+extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver* driver);
+extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver* driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file);
+
+extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver* driver,
+ uint32_t access_flags, uint32_t method_idx,
+ const art::DexFile& dex_file);
+
+// Hack for CFI CIE initialization
+extern std::vector<uint8_t>* X86CFIInitialization(bool is_x86_64);
+
+void QuickCompiler::Init() const {
+ ArtInitQuickCompilerContext(GetCompilerDriver());
+}
+
+void QuickCompiler::UnInit() const {
+ ArtUnInitQuickCompilerContext(GetCompilerDriver());
+}
+
+CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompileWithSeaIR(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+ if (method != nullptr) {
+ return method;
+ }
+
+ return ArtQuickCompileMethod(GetCompilerDriver(),
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+}
+
+CompiledMethod* QuickCompiler::JniCompile(uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) const {
+ return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
+}
+
+uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
+ size_t pointer_size = InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet());
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
+ pointer_size));
+}
+
+bool QuickCompiler::WriteElf(art::File* file,
+ OatWriter* oat_writer,
+ const std::vector<const art::DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host) const {
+ return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host,
+ *GetCompilerDriver());
+}
+
+Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+ Mir2Lir* mir_to_lir = nullptr;
+ switch (cu->instruction_set) {
+ case kThumb2:
+ mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ case kArm64:
+ mir_to_lir = Arm64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ case kMips:
+ mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ case kX86:
+ // Fall-through.
+ case kX86_64:
+ mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
+ }
+
+ /* The number of compiler temporaries depends on backend so set it up now if possible */
+ if (mir_to_lir) {
+ size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
+ bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
+ CHECK(set_max);
+ }
+ return mir_to_lir;
+}
+
+std::vector<uint8_t>* QuickCompiler::GetCallFrameInformationInitialization(
+ const CompilerDriver& driver) const {
+ if (driver.GetInstructionSet() == kX86) {
+ return X86CFIInitialization(false);
+ }
+ if (driver.GetInstructionSet() == kX86_64) {
+ return X86CFIInitialization(true);
+ }
+ return nullptr;
+}
+
+CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, class_loader, dex_file);
+ if (method != nullptr) {
+ return method;
+ }
+
+ return QuickCompiler::Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ class_loader, dex_file);
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index b4eebb320e..a3b4df3c7e 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -536,8 +536,8 @@ static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSE
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
- cg->TargetPtrReg(kInvokeTgt));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
default:
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 106996ea44..3e5b7bfe0a 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -473,8 +473,8 @@ static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
- cg->TargetPtrReg(kInvokeTgt));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
default:
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 4cb12f1dc9..a7900ae7df 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -476,9 +476,10 @@ static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
const CompilationUnit* cu, Mir2Lir* cg) {
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
+ int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ InstructionSetPointerSize(cu->instruction_set)).Int32Value();
// Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
- cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
+ cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
cg->TargetPtrReg(kInvokeTgt));
return true;
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index db2f272436..f5f71132e4 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1006,7 +1006,8 @@ LIR* X86Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType());
} else {
call_insn = OpMem(kOpBlx, TargetReg(kArg0, kRef),
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ cu_->target64 ? 8 : 4).Int32Value());
}
} else {
call_insn = GenInvokeNoInlineCall(this, method_info.GetSharpType());
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 6096625aa1..64d2de1b45 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -68,6 +68,7 @@ using ::art::mirror::String;
namespace art {
bool ImageWriter::PrepareImageAddressSpace() {
+ target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
{
Thread::Current()->TransitionFromSuspendedToRunnable();
PruneNonImageClasses(); // Remove junk
@@ -214,7 +215,14 @@ void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) {
void ImageWriter::AssignImageOffset(mirror::Object* object) {
DCHECK(object != nullptr);
SetImageOffset(object, image_end_);
- image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment
+ size_t object_size;
+ if (object->IsArtMethod()) {
+ // Methods are sized based on the target pointer size.
+ object_size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
+ } else {
+ object_size = object->SizeOf();
+ }
+ image_end_ += RoundUp(object_size, 8); // 64-bit alignment
DCHECK_LT(image_end_, image_->Size());
}
@@ -754,7 +762,14 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
size_t offset = image_writer->GetImageOffset(obj);
uint8_t* dst = image_writer->image_->Begin() + offset;
const uint8_t* src = reinterpret_cast<const uint8_t*>(obj);
- size_t n = obj->SizeOf();
+ size_t n;
+ if (obj->IsArtMethod()) {
+ // Size without pointer fields since we don't want to overrun the buffer if target art method
+ // is 32 bits but source is 64 bits.
+ n = mirror::ArtMethod::SizeWithoutPointerFields();
+ } else {
+ n = obj->SizeOf();
+ }
DCHECK_LT(offset + n, image_writer->image_->Size());
memcpy(dst, src, n);
Object* copy = reinterpret_cast<Object*>(dst);
@@ -834,6 +849,10 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
}
if (orig->IsArtMethod<kVerifyNone>()) {
FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy));
+ } else if (orig->IsClass() && orig->AsClass()->IsArtMethodClass()) {
+ // Set the right size for the target.
+ size_t size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
+ down_cast<mirror::Class*>(copy)->SetObjectSizeWithoutChecks(size);
}
}
@@ -892,29 +911,48 @@ const uint8_t* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
// oat_begin_
+ // For 64 bit targets we need to repack the current runtime pointer sized fields to the right
+ // locations.
+ // Copy all of the fields from the runtime methods to the target methods first since we did a
+ // bytewise copy earlier.
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ orig->GetEntryPointFromPortableCompiledCode(), target_ptr_size_);
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(orig->GetEntryPointFromInterpreter(),
+ target_ptr_size_);
+ copy->SetEntryPointFromJniPtrSize<kVerifyNone>(orig->GetEntryPointFromJni(), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ orig->GetEntryPointFromQuickCompiledCode(), target_ptr_size_);
+ copy->SetNativeGcMapPtrSize<kVerifyNone>(orig->GetNativeGcMap(), target_ptr_size_);
// The resolution method has a special trampoline to call.
Runtime* runtime = Runtime::Current();
if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_resolution_trampoline_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_resolution_trampoline_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_);
} else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
orig == runtime->GetImtUnimplementedMethod())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_imt_conflict_trampoline_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_imt_conflict_trampoline_offset_));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_imt_conflict_trampoline_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_);
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
if (UNLIKELY(orig->IsAbstract())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_to_interpreter_bridge_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
- copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*>
- (const_cast<uint8_t*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_to_interpreter_bridge_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_);
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+ reinterpret_cast<EntryPointFromInterpreter*>(const_cast<uint8_t*>(
+ GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_);
} else {
bool quick_is_interpreted;
const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted);
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(quick_code, target_ptr_size_);
// Portable entrypoint:
const uint8_t* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
@@ -937,18 +975,19 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// initialization.
portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
}
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(portable_code);
-
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ portable_code, target_ptr_size_);
// JNI entrypoint:
if (orig->IsNative()) {
// The native method's pointer is set to a stub to lookup via dlsym.
// Note this is not the code_ pointer, that is handled above.
- copy->SetNativeMethod<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_));
+ copy->SetEntryPointFromJniPtrSize<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_),
+ target_ptr_size_);
} else {
// Normal (non-abstract non-native) methods have various tables to relocate.
uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
const uint8_t* native_gc_map = GetOatAddress(native_gc_map_offset);
- copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map));
+ copy->SetNativeGcMapPtrSize<kVerifyNone>(native_gc_map, target_ptr_size_);
}
// Interpreter entrypoint:
@@ -956,9 +995,11 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted)
? interpreter_to_interpreter_bridge_offset_
: interpreter_to_compiled_code_bridge_offset_;
- copy->SetEntryPointFromInterpreter<kVerifyNone>(
+ EntryPointFromInterpreter* interpreter_entrypoint =
reinterpret_cast<EntryPointFromInterpreter*>(
- const_cast<uint8_t*>(GetOatAddress(interpreter_code))));
+ const_cast<uint8_t*>(GetOatAddress(interpreter_code)));
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+ interpreter_entrypoint, target_ptr_size_);
}
}
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 12171451ae..2fec0aa82d 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -47,7 +47,8 @@ class ImageWriter FINAL {
portable_imt_conflict_trampoline_offset_(0), portable_resolution_trampoline_offset_(0),
portable_to_interpreter_bridge_offset_(0), quick_generic_jni_trampoline_offset_(0),
quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0),
- quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic) {
+ quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
+ target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())) {
CHECK_NE(image_begin, 0U);
}
@@ -224,6 +225,9 @@ class ImageWriter FINAL {
uint32_t quick_to_interpreter_bridge_offset_;
const bool compile_pic_;
+ // Size of pointers on the target architecture.
+ size_t target_ptr_size_;
+
friend class FixupVisitor;
friend class FixupClassVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 3c3aa02502..c3fe75b3f1 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -307,7 +307,9 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
}
// 9. Plant call to native code associated with method.
- __ Call(main_jni_conv->MethodStackOffset(), mirror::ArtMethod::NativeMethodOffset(),
+ MemberOffset jni_entrypoint_offset = mirror::ArtMethod::EntryPointFromJniOffset(
+ InstructionSetPointerSize(instruction_set));
+ __ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset,
mr_conv->InterproceduralScratchRegister());
// 10. Fix differences in result widths.
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 659c3328fc..c6beb36178 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -309,7 +309,7 @@ class OatWriter::Thumb2RelativeCallPatcher FINAL : public ArmBaseRelativeCallPat
arm::Thumb2Assembler assembler;
assembler.LoadFromOffset(
arm::kLoadWord, arm::PC, arm::R0,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
assembler.bkpt(0);
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
@@ -363,7 +363,8 @@ class OatWriter::Arm64RelativeCallPatcher FINAL : public ArmBaseRelativeCallPatc
// The thunk just uses the entry point in the ArtMethod. This works even for calls
// to the generic JNI and interpreter trampolines.
arm64::Arm64Assembler assembler;
- Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value());
assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 39543a24a0..dc0a829f65 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1188,7 +1188,8 @@ void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache()));
// LR = temp[offset_of_quick_compiled_code]
__ LoadFromOffset(kLoadWord, LR, temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value());
// LR()
__ blx(LR);
@@ -1229,7 +1230,8 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
__ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
}
// temp = temp->GetMethodAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
@@ -1265,7 +1267,8 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
__ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
}
// temp = temp->GetImtEntryAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index fc9bdba52b..42eaf490f9 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1449,7 +1449,8 @@ void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
__ Ldr(temp, MemOperand(temp.X(), index_in_cache));
// lr = temp->entry_point_from_quick_compiled_code_;
__ Ldr(lr, MemOperand(temp.X(),
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).SizeValue()));
// lr();
__ Blr(lr);
@@ -1464,7 +1465,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset();
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 36fe063109..b0f36ce0b4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1120,7 +1120,7 @@ void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
// temp = temp[index_in_cache]
__ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1184,7 +1184,7 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1219,7 +1219,8 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86PointerSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 29479a2105..e9c67e3e6d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1112,7 +1112,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
// temp = temp[index_in_cache]
__ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64PointerSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1171,7 +1172,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64PointerSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1206,7 +1208,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64PointerSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());