diff options
author | Ian Rogers <irogers@google.com> | 2014-01-06 12:55:46 -0800 |
---|---|---|
committer | Ian Rogers <irogers@google.com> | 2014-02-06 23:20:27 -0800 |
commit | ef7d42fca18c16fbaf103822ad16f23246e2905d (patch) | |
tree | c67eea52a349c2ea7f2c3bdda8e73933c05531a8 /compiler | |
parent | 822115a225185d2896607eb08d70ce5c7099adef (diff) | |
download | android_art-ef7d42fca18c16fbaf103822ad16f23246e2905d.tar.gz android_art-ef7d42fca18c16fbaf103822ad16f23246e2905d.tar.bz2 android_art-ef7d42fca18c16fbaf103822ad16f23246e2905d.zip |
Object model changes to support 64bit.
Modify mirror objects so that references between them use an ObjectReference
value type rather than an Object* so that functionality to compress larger
references can be captured in the ObjectRefererence implementation.
ObjectReferences are 32bit and all other aspects of object layout remain as
they are currently.
Expand fields in objects holding pointers so they can hold 64bit pointers. Its
expected the size of these will come down by improving where we hold compiler
meta-data.
Stub out x86_64 architecture specific runtime implementation.
Modify OutputStream so that reads and writes are of unsigned quantities.
Make the use of portable or quick code more explicit.
Templatize AtomicInteger to support more than just int32_t as a type.
Add missing, and fix issues relating to, missing annotalysis information on the
mutator lock.
Refactor and share implementations for array copy between System and uses
elsewhere in the runtime.
Fix numerous 64bit build issues.
Change-Id: I1a5694c251a42c9eff71084dfdd4b51fff716822
Diffstat (limited to 'compiler')
28 files changed, 338 insertions, 304 deletions
diff --git a/compiler/buffered_output_stream.cc b/compiler/buffered_output_stream.cc index 81a58f6284..0940a80cc1 100644 --- a/compiler/buffered_output_stream.cc +++ b/compiler/buffered_output_stream.cc @@ -23,7 +23,7 @@ namespace art { BufferedOutputStream::BufferedOutputStream(OutputStream* out) : OutputStream(out->GetLocation()), out_(out), used_(0) {} -bool BufferedOutputStream::WriteFully(const void* buffer, int64_t byte_count) { +bool BufferedOutputStream::WriteFully(const void* buffer, size_t byte_count) { if (byte_count > kBufferSize) { Flush(); return out_->WriteFully(buffer, byte_count); diff --git a/compiler/buffered_output_stream.h b/compiler/buffered_output_stream.h index 7d874fbc5c..75a3f24c70 100644 --- a/compiler/buffered_output_stream.h +++ b/compiler/buffered_output_stream.h @@ -31,7 +31,7 @@ class BufferedOutputStream : public OutputStream { delete out_; } - virtual bool WriteFully(const void* buffer, int64_t byte_count); + virtual bool WriteFully(const void* buffer, size_t byte_count); virtual off_t Seek(off_t offset, Whence whence); diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc index 29ff390678..f6d724ab56 100644 --- a/compiler/compiled_method.cc +++ b/compiler/compiled_method.cc @@ -20,14 +20,16 @@ namespace art { CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set, - const std::vector<uint8_t>& code) - : compiler_driver_(compiler_driver), instruction_set_(instruction_set), code_(nullptr) { - SetCode(code); + const std::vector<uint8_t>& quick_code) + : compiler_driver_(compiler_driver), instruction_set_(instruction_set), + portable_code_(nullptr), quick_code_(nullptr) { + SetCode(&quick_code, nullptr); } CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set, const std::string& elf_object, const std::string& symbol) - : compiler_driver_(compiler_driver), instruction_set_(instruction_set), symbol_(symbol) { + : compiler_driver_(compiler_driver), instruction_set_(instruction_set), + portable_code_(nullptr), quick_code_(nullptr), symbol_(symbol) { CHECK_NE(elf_object.size(), 0U); CHECK_NE(symbol.size(), 0U); std::vector<uint8_t> temp_code(elf_object.size()); @@ -38,12 +40,41 @@ CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instr // change to have different kinds of compiled methods. This is // being deferred until we work on hybrid execution or at least // until we work on batch compilation. - SetCode(temp_code); + SetCode(nullptr, &temp_code); } -void CompiledCode::SetCode(const std::vector<uint8_t>& code) { - CHECK(!code.empty()); - code_ = compiler_driver_->DeduplicateCode(code); +void CompiledCode::SetCode(const std::vector<uint8_t>* quick_code, + const std::vector<uint8_t>* portable_code) { + if (portable_code != nullptr) { + CHECK(!portable_code->empty()); + portable_code_ = compiler_driver_->DeduplicateCode(*portable_code); + } + if (quick_code != nullptr) { + CHECK(!quick_code->empty()); + quick_code_ = compiler_driver_->DeduplicateCode(*quick_code); + } +} + +bool CompiledCode::operator==(const CompiledCode& rhs) const { + if (quick_code_ != nullptr) { + if (rhs.quick_code_ == nullptr) { + return false; + } else if (quick_code_->size() != rhs.quick_code_->size()) { + return false; + } else { + return std::equal(quick_code_->begin(), quick_code_->end(), rhs.quick_code_->begin()); + } + } else if (portable_code_ != nullptr) { + if (rhs.portable_code_ == nullptr) { + return false; + } else if (portable_code_->size() != rhs.portable_code_->size()) { + return false; + } else { + return std::equal(portable_code_->begin(), portable_code_->end(), + rhs.portable_code_->begin()); + } + } + return (rhs.quick_code_ == nullptr) && (rhs.portable_code_ == nullptr); } uint32_t CompiledCode::AlignCode(uint32_t offset) const { @@ -100,7 +131,6 @@ const void* CompiledCode::CodePointer(const void* code_pointer, } } -#if defined(ART_USE_PORTABLE_COMPILER) const std::string& CompiledCode::GetSymbol() const { CHECK_NE(0U, symbol_.size()); return symbol_; @@ -114,18 +144,17 @@ const std::vector<uint32_t>& CompiledCode::GetOatdataOffsetsToCompliledCodeOffse void CompiledCode::AddOatdataOffsetToCompliledCodeOffset(uint32_t offset) { oatdata_offsets_to_compiled_code_offset_.push_back(offset); } -#endif CompiledMethod::CompiledMethod(CompilerDriver& driver, InstructionSet instruction_set, - const std::vector<uint8_t>& code, + const std::vector<uint8_t>& quick_code, const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, const std::vector<uint8_t>& mapping_table, const std::vector<uint8_t>& vmap_table, const std::vector<uint8_t>& native_gc_map) - : CompiledCode(&driver, instruction_set, code), frame_size_in_bytes_(frame_size_in_bytes), + : CompiledCode(&driver, instruction_set, quick_code), frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask), mapping_table_(driver.DeduplicateMappingTable(mapping_table)), vmap_table_(driver.DeduplicateVMapTable(vmap_table)), diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h index e4fedf1ab4..611230509a 100644 --- a/compiler/compiled_method.h +++ b/compiler/compiled_method.h @@ -36,7 +36,7 @@ class CompiledCode { public: // For Quick to supply an code blob CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set, - const std::vector<uint8_t>& code); + const std::vector<uint8_t>& quick_code); // For Portable to supply an ELF object CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set, @@ -46,16 +46,18 @@ class CompiledCode { return instruction_set_; } - const std::vector<uint8_t>& GetCode() const { - return *code_; + const std::vector<uint8_t>* GetPortableCode() const { + return portable_code_; } - void SetCode(const std::vector<uint8_t>& code); - - bool operator==(const CompiledCode& rhs) const { - return (code_ == rhs.code_); + const std::vector<uint8_t>* GetQuickCode() const { + return quick_code_; } + void SetCode(const std::vector<uint8_t>* quick_code, const std::vector<uint8_t>* portable_code); + + bool operator==(const CompiledCode& rhs) const; + // To align an offset from a page-aligned value to make it suitable // for code storage. For example on ARM, to ensure that PC relative // valu computations work out as expected. @@ -72,19 +74,20 @@ class CompiledCode { static const void* CodePointer(const void* code_pointer, InstructionSet instruction_set); -#if defined(ART_USE_PORTABLE_COMPILER) const std::string& GetSymbol() const; const std::vector<uint32_t>& GetOatdataOffsetsToCompliledCodeOffset() const; void AddOatdataOffsetToCompliledCodeOffset(uint32_t offset); -#endif private: - CompilerDriver* compiler_driver_; + CompilerDriver* const compiler_driver_; const InstructionSet instruction_set_; - // Used to store the PIC code for Quick and an ELF image for portable. - std::vector<uint8_t>* code_; + // The ELF image for portable. + std::vector<uint8_t>* portable_code_; + + // Used to store the PIC code for Quick. + std::vector<uint8_t>* quick_code_; // Used for the Portable ELF symbol name. const std::string symbol_; @@ -101,7 +104,7 @@ class CompiledMethod : public CompiledCode { // Constructs a CompiledMethod for the non-LLVM compilers. CompiledMethod(CompilerDriver& driver, InstructionSet instruction_set, - const std::vector<uint8_t>& code, + const std::vector<uint8_t>& quick_code, const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, @@ -109,10 +112,10 @@ class CompiledMethod : public CompiledCode { const std::vector<uint8_t>& vmap_table, const std::vector<uint8_t>& native_gc_map); - // Constructs a CompiledMethod for the JniCompiler. + // Constructs a CompiledMethod for the QuickJniCompiler. CompiledMethod(CompilerDriver& driver, InstructionSet instruction_set, - const std::vector<uint8_t>& code, + const std::vector<uint8_t>& quick_code, const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask); diff --git a/compiler/dex/arena_allocator.cc b/compiler/dex/arena_allocator.cc index 132831c3ef..8d24439277 100644 --- a/compiler/dex/arena_allocator.cc +++ b/compiler/dex/arena_allocator.cc @@ -52,7 +52,8 @@ Arena::Arena(size_t size) next_(nullptr) { if (kUseMemMap) { std::string error_msg; - map_ = MemMap::MapAnonymous("dalvik-arena", NULL, size, PROT_READ | PROT_WRITE, &error_msg); + map_ = MemMap::MapAnonymous("dalvik-arena", NULL, size, PROT_READ | PROT_WRITE, false, + &error_msg); CHECK(map_ != nullptr) << error_msg; memory_ = map_->Begin(); size_ = map_->Size(); diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 6382dd6608..6aaad6694c 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -453,7 +453,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, if (cu->instruction_set != kX86) { if (direct_code == 0) { cg->LoadWordDisp(cg->TargetReg(kArg0), - mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(), + mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), cg->TargetReg(kInvokeTgt)); } break; @@ -506,7 +506,7 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt] if (cu->instruction_set != kX86) { cg->LoadWordDisp(cg->TargetReg(kArg0), - mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(), + mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), cg->TargetReg(kInvokeTgt)); break; } @@ -561,7 +561,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, case 5: // Get the compiled code address [use kArg0, set kInvokeTgt] if (cu->instruction_set != kX86) { cg->LoadWordDisp(cg->TargetReg(kArg0), - mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(), + mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), cg->TargetReg(kInvokeTgt)); break; } @@ -1437,7 +1437,7 @@ void Mir2Lir::GenInvoke(CallInfo* info) { } else { if (fast_path) { call_inst = OpMem(kOpBlx, TargetReg(kArg0), - mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value()); + mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()); } else { ThreadOffset trampoline(-1); switch (info->type) { diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 37b668f455..9f48351645 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -556,12 +556,15 @@ static DexToDexCompilationLevel GetDexToDexCompilationlevel( } } -void CompilerDriver::CompileOne(const mirror::ArtMethod* method, TimingLogger& timings) { +void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger& timings) { DCHECK(!Runtime::Current()->IsStarted()); Thread* self = Thread::Current(); jobject jclass_loader; const DexFile* dex_file; uint16_t class_def_idx; + uint32_t method_idx = method->GetDexMethodIndex(); + uint32_t access_flags = method->GetAccessFlags(); + InvokeType invoke_type = method->GetInvokeType(); { ScopedObjectAccessUnchecked soa(self); ScopedLocalRef<jobject> @@ -573,6 +576,7 @@ void CompilerDriver::CompileOne(const mirror::ArtMethod* method, TimingLogger& t dex_file = &mh.GetDexFile(); class_def_idx = mh.GetClassDefIndex(); } + const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); self->TransitionFromRunnableToSuspended(kNative); std::vector<const DexFile*> dex_files; @@ -581,8 +585,6 @@ void CompilerDriver::CompileOne(const mirror::ArtMethod* method, TimingLogger& t UniquePtr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", 0U)); PreCompile(jclass_loader, dex_files, *thread_pool.get(), timings); - uint32_t method_idx = method->GetDexMethodIndex(); - const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); // Can we run DEX-to-DEX compiler on this class ? DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile; { @@ -592,8 +594,8 @@ void CompilerDriver::CompileOne(const mirror::ArtMethod* method, TimingLogger& t soa.Decode<mirror::ClassLoader*>(jclass_loader)); dex_to_dex_compilation_level = GetDexToDexCompilationlevel(class_loader, *dex_file, class_def); } - CompileMethod(code_item, method->GetAccessFlags(), method->GetInvokeType(), - class_def_idx, method_idx, jclass_loader, *dex_file, dex_to_dex_compilation_level); + CompileMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx, jclass_loader, + *dex_file, dex_to_dex_compilation_level); self->GetJniEnv()->DeleteGlobalRef(jclass_loader); @@ -1009,7 +1011,7 @@ bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompi if (referrer_class != NULL) { mirror::Class* fields_class = resolved_field->GetDeclaringClass(); bool access_ok = referrer_class->CanAccessResolvedField(fields_class, resolved_field, - *dex_cache, field_idx); + dex_cache.get(), field_idx); bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal() && fields_class != referrer_class; if (access_ok && !is_write_to_final_from_wrong_class) { @@ -1056,7 +1058,7 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila return true; // fast path } else { bool access_ok = referrer_class->CanAccessResolvedField(fields_class, resolved_field, - *dex_cache, field_idx); + dex_cache.get(), field_idx); bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal(); if (access_ok && !is_write_to_final_from_wrong_class) { // We have the resolved field, we must make it into a index for the referrer @@ -1198,13 +1200,23 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType CHECK(!method->IsAbstract()); *type = sharp_type; *direct_method = reinterpret_cast<uintptr_t>(method); - *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode()); + if (compiler_backend_ == kQuick) { + *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()); + } else { + CHECK_EQ(compiler_backend_, kPortable); + *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromPortableCompiledCode()); + } target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile(); target_method->dex_method_index = method->GetDexMethodIndex(); } else if (!must_use_direct_pointers) { // Set the code and rely on the dex cache for the method. *type = sharp_type; - *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode()); + if (compiler_backend_ == kQuick) { + *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()); + } else { + CHECK_EQ(compiler_backend_, kPortable); + *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromPortableCompiledCode()); + } } else { // Direct pointers were required but none were available. VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method); @@ -1239,8 +1251,8 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui bool icce = resolved_method->CheckIncompatibleClassChange(*invoke_type); if (referrer_class != NULL && !icce) { mirror::Class* methods_class = resolved_method->GetDeclaringClass(); - if (referrer_class->CanAccessResolvedMethod(methods_class, resolved_method, - *dex_cache, target_method->dex_method_index)) { + if (referrer_class->CanAccessResolvedMethod(methods_class, resolved_method, dex_cache.get(), + target_method->dex_method_index)) { const bool enableFinalBasedSharpening = enable_devirtualization; // Sharpen a virtual call into a direct call when the target is known not to have been // overridden (ie is final). diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index a8110e71d7..4307212256 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -106,8 +106,8 @@ class CompilerDriver { TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); - // Compile a single Method - void CompileOne(const mirror::ArtMethod* method, TimingLogger& timings) + // Compile a single Method. + void CompileOne(mirror::ArtMethod* method, TimingLogger& timings) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); VerificationResults* GetVerificationResults() const { diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index a5eb94f0e9..0d0c204d0a 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -122,7 +122,11 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) { EXPECT_TRUE(method != NULL) << "method_idx=" << i << " " << dex->GetMethodDeclaringClassDescriptor(dex->GetMethodId(i)) << " " << dex->GetMethodName(dex->GetMethodId(i)); - EXPECT_TRUE(method->GetEntryPointFromCompiledCode() != NULL) << "method_idx=" << i + EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != NULL) << "method_idx=" << i + << " " + << dex->GetMethodDeclaringClassDescriptor(dex->GetMethodId(i)) + << " " << dex->GetMethodName(dex->GetMethodId(i)); + EXPECT_TRUE(method->GetEntryPointFromPortableCompiledCode() != NULL) << "method_idx=" << i << " " << dex->GetMethodDeclaringClassDescriptor(dex->GetMethodId(i)) << " " << dex->GetMethodName(dex->GetMethodId(i)); diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc index c5712880c1..66c8da1dd8 100644 --- a/compiler/elf_fixup.cc +++ b/compiler/elf_fixup.cc @@ -177,7 +177,7 @@ bool ElfFixup::FixupDynamic(ElfFile& elf_file, uintptr_t base_address) { if (elf_dyn_needs_fixup) { uint32_t d_ptr = elf_dyn.d_un.d_ptr; if (DEBUG_FIXUP) { - LOG(INFO) << StringPrintf("In %s moving Elf32_Dyn[%d] from 0x%08x to 0x%08x", + LOG(INFO) << StringPrintf("In %s moving Elf32_Dyn[%d] from 0x%08x to 0x%08" PRIxPTR, elf_file.GetFile().GetPath().c_str(), i, d_ptr, d_ptr + base_address); } @@ -196,7 +196,7 @@ bool ElfFixup::FixupSectionHeaders(ElfFile& elf_file, uintptr_t base_address) { continue; } if (DEBUG_FIXUP) { - LOG(INFO) << StringPrintf("In %s moving Elf32_Shdr[%d] from 0x%08x to 0x%08x", + LOG(INFO) << StringPrintf("In %s moving Elf32_Shdr[%d] from 0x%08x to 0x%08" PRIxPTR, elf_file.GetFile().GetPath().c_str(), i, sh.sh_addr, sh.sh_addr + base_address); } @@ -213,7 +213,7 @@ bool ElfFixup::FixupProgramHeaders(ElfFile& elf_file, uintptr_t base_address) { CHECK((ph.p_align == 0) || (0 == ((ph.p_vaddr - ph.p_offset) & (ph.p_align - 1)))) << elf_file.GetFile().GetPath() << " i=" << i; if (DEBUG_FIXUP) { - LOG(INFO) << StringPrintf("In %s moving Elf32_Phdr[%d] from 0x%08x to 0x%08x", + LOG(INFO) << StringPrintf("In %s moving Elf32_Phdr[%d] from 0x%08x to 0x%08" PRIxPTR, elf_file.GetFile().GetPath().c_str(), i, ph.p_vaddr, ph.p_vaddr + base_address); } @@ -238,7 +238,7 @@ bool ElfFixup::FixupSymbols(ElfFile& elf_file, uintptr_t base_address, bool dyna ::llvm::ELF::Elf32_Sym& symbol = elf_file.GetSymbol(section_type, i); if (symbol.st_value != 0) { if (DEBUG_FIXUP) { - LOG(INFO) << StringPrintf("In %s moving Elf32_Sym[%d] from 0x%08x to 0x%08x", + LOG(INFO) << StringPrintf("In %s moving Elf32_Sym[%d] from 0x%08x to 0x%08" PRIxPTR, elf_file.GetFile().GetPath().c_str(), i, symbol.st_value, symbol.st_value + base_address); } @@ -255,7 +255,7 @@ bool ElfFixup::FixupRelocations(ElfFile& elf_file, uintptr_t base_address) { for (uint32_t i = 0; i < elf_file.GetRelNum(sh); i++) { llvm::ELF::Elf32_Rel& rel = elf_file.GetRel(sh, i); if (DEBUG_FIXUP) { - LOG(INFO) << StringPrintf("In %s moving Elf32_Rel[%d] from 0x%08x to 0x%08x", + LOG(INFO) << StringPrintf("In %s moving Elf32_Rel[%d] from 0x%08x to 0x%08" PRIxPTR, elf_file.GetFile().GetPath().c_str(), i, rel.r_offset, rel.r_offset + base_address); } @@ -265,7 +265,7 @@ bool ElfFixup::FixupRelocations(ElfFile& elf_file, uintptr_t base_address) { for (uint32_t i = 0; i < elf_file.GetRelaNum(sh); i++) { llvm::ELF::Elf32_Rela& rela = elf_file.GetRela(sh, i); if (DEBUG_FIXUP) { - LOG(INFO) << StringPrintf("In %s moving Elf32_Rela[%d] from 0x%08x to 0x%08x", + LOG(INFO) << StringPrintf("In %s moving Elf32_Rela[%d] from 0x%08x to 0x%08" PRIxPTR, elf_file.GetFile().GetPath().c_str(), i, rela.r_offset, rela.r_offset + base_address); } diff --git a/compiler/file_output_stream.cc b/compiler/file_output_stream.cc index 0e4a2949ed..3ee16f53e8 100644 --- a/compiler/file_output_stream.cc +++ b/compiler/file_output_stream.cc @@ -25,7 +25,7 @@ namespace art { FileOutputStream::FileOutputStream(File* file) : OutputStream(file->GetPath()), file_(file) {} -bool FileOutputStream::WriteFully(const void* buffer, int64_t byte_count) { +bool FileOutputStream::WriteFully(const void* buffer, size_t byte_count) { return file_->WriteFully(buffer, byte_count); } diff --git a/compiler/file_output_stream.h b/compiler/file_output_stream.h index bde9e68eaa..76b00fe129 100644 --- a/compiler/file_output_stream.h +++ b/compiler/file_output_stream.h @@ -29,7 +29,7 @@ class FileOutputStream : public OutputStream { virtual ~FileOutputStream() {} - virtual bool WriteFully(const void* buffer, int64_t byte_count); + virtual bool WriteFully(const void* buffer, size_t byte_count); virtual off_t Seek(off_t offset, Whence whence); diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 09bb70cd2f..67cd51bc54 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -208,12 +208,12 @@ void ImageWriter::AssignImageOffset(mirror::Object* object) { DCHECK_LT(image_end_, image_->Size()); } -bool ImageWriter::IsImageOffsetAssigned(const mirror::Object* object) const { +bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const { DCHECK(object != nullptr); return object->GetLockWord().GetState() == LockWord::kForwardingAddress; } -size_t ImageWriter::GetImageOffset(const mirror::Object* object) const { +size_t ImageWriter::GetImageOffset(mirror::Object* object) const { DCHECK(object != nullptr); DCHECK(IsImageOffsetAssigned(object)); LockWord lock_word = object->GetLockWord(); @@ -226,7 +226,7 @@ bool ImageWriter::AllocMemory() { size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize); std::string error_msg; image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE, - &error_msg)); + true, &error_msg)); if (UNLIKELY(image_.get() == nullptr)) { LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg; return false; @@ -281,7 +281,7 @@ void ImageWriter::ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mut Runtime::Current()->GetHeap()->VisitObjects(ComputeEagerResolvedStringsCallback, this); } -bool ImageWriter::IsImageClass(const Class* klass) { +bool ImageWriter::IsImageClass(Class* klass) { return compiler_driver_.IsImageClass(ClassHelper(klass).GetDescriptor()); } @@ -447,7 +447,7 @@ void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) for (size_t i = 0; i < num_reference_fields; ++i) { mirror::ArtField* field = sirt_class->GetInstanceField(i); MemberOffset field_offset = field->GetOffset(); - mirror::Object* value = obj->GetFieldObject<mirror::Object*>(field_offset, false); + mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset, false); if (value != nullptr) { WalkFieldsInOrder(value); } @@ -470,7 +470,7 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { for (size_t i = 0; i < num_static_fields; ++i) { mirror::ArtField* field = klass->GetStaticField(i); MemberOffset field_offset = field->GetOffset(); - mirror::Object* value = sirt_obj->GetFieldObject<mirror::Object*>(field_offset, false); + mirror::Object* value = sirt_obj->GetFieldObject<mirror::Object>(field_offset, false); if (value != nullptr) { WalkFieldsInOrder(value); } @@ -527,16 +527,16 @@ void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_d const size_t heap_bytes_per_bitmap_byte = kBitsPerByte * gc::accounting::SpaceBitmap::kAlignment; const size_t bitmap_bytes = RoundUp(image_end_, heap_bytes_per_bitmap_byte) / heap_bytes_per_bitmap_byte; - ImageHeader image_header(reinterpret_cast<uint32_t>(image_begin_), + ImageHeader image_header(PointerToLowMemUInt32(image_begin_), static_cast<uint32_t>(image_end_), RoundUp(image_end_, kPageSize), RoundUp(bitmap_bytes, kPageSize), - reinterpret_cast<uint32_t>(GetImageAddress(image_roots.get())), + PointerToLowMemUInt32(GetImageAddress(image_roots.get())), oat_file_->GetOatHeader().GetChecksum(), - reinterpret_cast<uint32_t>(oat_file_begin), - reinterpret_cast<uint32_t>(oat_data_begin_), - reinterpret_cast<uint32_t>(oat_data_end), - reinterpret_cast<uint32_t>(oat_file_end)); + PointerToLowMemUInt32(oat_file_begin), + PointerToLowMemUInt32(oat_data_begin_), + PointerToLowMemUInt32(oat_data_end), + PointerToLowMemUInt32(oat_file_end)); memcpy(image_->Begin(), &image_header, sizeof(image_header)); // Note that image_end_ is left at end of used space @@ -578,7 +578,7 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) { image_writer->FixupObject(obj, copy); } -void ImageWriter::FixupObject(const Object* orig, Object* copy) { +void ImageWriter::FixupObject(Object* orig, Object* copy) { DCHECK(orig != NULL); DCHECK(copy != NULL); copy->SetClass(down_cast<Class*>(GetImageAddress(orig->GetClass()))); @@ -594,12 +594,12 @@ void ImageWriter::FixupObject(const Object* orig, Object* copy) { } } -void ImageWriter::FixupClass(const Class* orig, Class* copy) { +void ImageWriter::FixupClass(Class* orig, Class* copy) { FixupInstanceFields(orig, copy); FixupStaticFields(orig, copy); } -void ImageWriter::FixupMethod(const ArtMethod* orig, ArtMethod* copy) { +void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) { FixupInstanceFields(orig, copy); // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to @@ -607,43 +607,36 @@ void ImageWriter::FixupMethod(const ArtMethod* orig, ArtMethod* copy) { // The resolution method has a special trampoline to call. if (UNLIKELY(orig == Runtime::Current()->GetResolutionMethod())) { -#if defined(ART_USE_PORTABLE_COMPILER) - copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_)); -#else - copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_)); -#endif + copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_)); + copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_)); } else if (UNLIKELY(orig == Runtime::Current()->GetImtConflictMethod())) { -#if defined(ART_USE_PORTABLE_COMPILER) - copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_imt_conflict_trampoline_offset_)); -#else - copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_imt_conflict_trampoline_offset_)); -#endif + copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_imt_conflict_trampoline_offset_)); + copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_imt_conflict_trampoline_offset_)); } else { // We assume all methods have code. If they don't currently then we set them to the use the // resolution trampoline. Abstract methods never have code and so we need to make sure their // use results in an AbstractMethodError. We use the interpreter to achieve this. if (UNLIKELY(orig->IsAbstract())) { -#if defined(ART_USE_PORTABLE_COMPILER) - copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_to_interpreter_bridge_offset_)); -#else - copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_to_interpreter_bridge_offset_)); -#endif + copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_to_interpreter_bridge_offset_)); + copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_to_interpreter_bridge_offset_)); copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*> - (const_cast<byte*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_)))); + (const_cast<byte*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_)))); } else { copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*> - (const_cast<byte*>(GetOatAddress(interpreter_to_compiled_code_bridge_offset_)))); + (const_cast<byte*>(GetOatAddress(interpreter_to_compiled_code_bridge_offset_)))); // Use original code if it exists. Otherwise, set the code pointer to the resolution // trampoline. - const byte* code = GetOatAddress(orig->GetOatCodeOffset()); - if (code != NULL) { - copy->SetEntryPointFromCompiledCode(code); + const byte* quick_code = GetOatAddress(orig->GetQuickOatCodeOffset()); + if (quick_code != nullptr) { + copy->SetEntryPointFromQuickCompiledCode(quick_code); } else { -#if defined(ART_USE_PORTABLE_COMPILER) - copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_)); -#else - copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_)); -#endif + copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_)); + } + const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset()); + if (portable_code != nullptr) { + copy->SetEntryPointFromPortableCompiledCode(portable_code); + } else { + copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_)); } if (orig->IsNative()) { // The native method's pointer is set to a stub to lookup via dlsym. @@ -667,14 +660,14 @@ void ImageWriter::FixupMethod(const ArtMethod* orig, ArtMethod* copy) { } } -void ImageWriter::FixupObjectArray(const ObjectArray<Object>* orig, ObjectArray<Object>* copy) { +void ImageWriter::FixupObjectArray(ObjectArray<Object>* orig, ObjectArray<Object>* copy) { for (int32_t i = 0; i < orig->GetLength(); ++i) { - const Object* element = orig->Get(i); - copy->SetPtrWithoutChecks(i, GetImageAddress(element)); + Object* element = orig->Get(i); + copy->SetWithoutChecksAndWriteBarrier(i, GetImageAddress(element)); } } -void ImageWriter::FixupInstanceFields(const Object* orig, Object* copy) { +void ImageWriter::FixupInstanceFields(Object* orig, Object* copy) { DCHECK(orig != NULL); DCHECK(copy != NULL); Class* klass = orig->GetClass(); @@ -682,13 +675,13 @@ void ImageWriter::FixupInstanceFields(const Object* orig, Object* copy) { FixupFields(orig, copy, klass->GetReferenceInstanceOffsets(), false); } -void ImageWriter::FixupStaticFields(const Class* orig, Class* copy) { +void ImageWriter::FixupStaticFields(Class* orig, Class* copy) { DCHECK(orig != NULL); DCHECK(copy != NULL); FixupFields(orig, copy, orig->GetReferenceStaticOffsets(), true); } -void ImageWriter::FixupFields(const Object* orig, +void ImageWriter::FixupFields(Object* orig, Object* copy, uint32_t ref_offsets, bool is_static) { @@ -697,9 +690,10 @@ void ImageWriter::FixupFields(const Object* orig, while (ref_offsets != 0) { size_t right_shift = CLZ(ref_offsets); MemberOffset byte_offset = CLASS_OFFSET_FROM_CLZ(right_shift); - const Object* ref = orig->GetFieldObject<const Object*>(byte_offset, false); - // Use SetFieldPtr to avoid card marking since we are writing to the image. - copy->SetFieldPtr(byte_offset, GetImageAddress(ref), false); + Object* ref = orig->GetFieldObject<Object>(byte_offset, false); + // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the + // image. + copy->SetFieldObjectWithoutWriteBarrier(byte_offset, GetImageAddress(ref), false); ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift); } } else { @@ -707,7 +701,7 @@ void ImageWriter::FixupFields(const Object* orig, // walk up the class inheritance hierarchy and find reference // offsets the hard way. In the static case, just consider this // class. - for (const Class *klass = is_static ? orig->AsClass() : orig->GetClass(); + for (Class *klass = is_static ? orig->AsClass() : orig->GetClass(); klass != NULL; klass = is_static ? NULL : klass->GetSuperClass()) { size_t num_reference_fields = (is_static @@ -718,9 +712,10 @@ void ImageWriter::FixupFields(const Object* orig, ? klass->GetStaticField(i) : klass->GetInstanceField(i)); MemberOffset field_offset = field->GetOffset(); - const Object* ref = orig->GetFieldObject<const Object*>(field_offset, false); - // Use SetFieldPtr to avoid card marking since we are writing to the image. - copy->SetFieldPtr(field_offset, GetImageAddress(ref), false); + Object* ref = orig->GetFieldObject<Object>(field_offset, false); + // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the + // image. + copy->SetFieldObjectWithoutWriteBarrier(field_offset, GetImageAddress(ref), false); } } } @@ -728,9 +723,10 @@ void ImageWriter::FixupFields(const Object* orig, // Fix-up referent, that isn't marked as an object field, for References. ArtField* field = orig->GetClass()->FindInstanceField("referent", "Ljava/lang/Object;"); MemberOffset field_offset = field->GetOffset(); - const Object* ref = orig->GetFieldObject<const Object*>(field_offset, false); - // Use SetFieldPtr to avoid card marking since we are writing to the image. - copy->SetFieldPtr(field_offset, GetImageAddress(ref), false); + Object* ref = orig->GetFieldObject<Object>(field_offset, false); + // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the + // image. + copy->SetFieldObjectWithoutWriteBarrier(field_offset, GetImageAddress(ref), false); } } @@ -786,17 +782,17 @@ void ImageWriter::PatchOatCodeAndMethods() { for (size_t i = 0; i < code_to_patch.size(); i++) { const CompilerDriver::CallPatchInformation* patch = code_to_patch[i]; ArtMethod* target = GetTargetMethod(patch); - uint32_t code = reinterpret_cast<uint32_t>(class_linker->GetOatCodeFor(target)); - uint32_t code_base = reinterpret_cast<uint32_t>(&oat_file_->GetOatHeader()); - uint32_t code_offset = code - code_base; - SetPatchLocation(patch, reinterpret_cast<uint32_t>(GetOatAddress(code_offset))); + uintptr_t quick_code = reinterpret_cast<uintptr_t>(class_linker->GetQuickOatCodeFor(target)); + uintptr_t code_base = reinterpret_cast<uintptr_t>(&oat_file_->GetOatHeader()); + uintptr_t code_offset = quick_code - code_base; + SetPatchLocation(patch, PointerToLowMemUInt32(GetOatAddress(code_offset))); } const CallPatches& methods_to_patch = compiler_driver_.GetMethodsToPatch(); for (size_t i = 0; i < methods_to_patch.size(); i++) { const CompilerDriver::CallPatchInformation* patch = methods_to_patch[i]; ArtMethod* target = GetTargetMethod(patch); - SetPatchLocation(patch, reinterpret_cast<uint32_t>(GetImageAddress(target))); + SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target))); } const std::vector<const CompilerDriver::TypePatchInformation*>& classes_to_patch = @@ -804,7 +800,7 @@ void ImageWriter::PatchOatCodeAndMethods() { for (size_t i = 0; i < classes_to_patch.size(); i++) { const CompilerDriver::TypePatchInformation* patch = classes_to_patch[i]; Class* target = GetTargetType(patch); - SetPatchLocation(patch, reinterpret_cast<uint32_t>(GetImageAddress(target))); + SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target))); } // Update the image header with the new checksum after patching @@ -815,18 +811,18 @@ void ImageWriter::PatchOatCodeAndMethods() { void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - const void* oat_code = class_linker->GetOatCodeFor(patch->GetDexFile(), - patch->GetReferrerClassDefIdx(), - patch->GetReferrerMethodIdx()); + const void* quick_oat_code = class_linker->GetQuickOatCodeFor(patch->GetDexFile(), + patch->GetReferrerClassDefIdx(), + patch->GetReferrerMethodIdx()); OatHeader& oat_header = const_cast<OatHeader&>(oat_file_->GetOatHeader()); // TODO: make this Thumb2 specific - uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uint32_t>(oat_code) & ~0x1); + uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(quick_oat_code) & ~0x1); uint32_t* patch_location = reinterpret_cast<uint32_t*>(base + patch->GetLiteralOffset()); if (kIsDebugBuild) { if (patch->IsCall()) { const CompilerDriver::CallPatchInformation* cpatch = patch->AsCall(); const DexFile::MethodId& id = cpatch->GetDexFile().GetMethodId(cpatch->GetTargetMethodIdx()); - uint32_t expected = reinterpret_cast<uint32_t>(&id); + uintptr_t expected = reinterpret_cast<uintptr_t>(&id); uint32_t actual = *patch_location; CHECK(actual == expected || actual == value) << std::hex << "actual=" << actual @@ -836,7 +832,7 @@ void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch if (patch->IsType()) { const CompilerDriver::TypePatchInformation* tpatch = patch->AsType(); const DexFile::TypeId& id = tpatch->GetDexFile().GetTypeId(tpatch->GetTargetTypeIdx()); - uint32_t expected = reinterpret_cast<uint32_t>(&id); + uintptr_t expected = reinterpret_cast<uintptr_t>(&id); uint32_t actual = *patch_location; CHECK(actual == expected || actual == value) << std::hex << "actual=" << actual diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 695f59b40e..a1504eeca8 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -66,17 +66,17 @@ class ImageWriter { void AssignImageOffset(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetImageOffset(mirror::Object* object, size_t offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsImageOffsetAssigned(const mirror::Object* object) const; - size_t GetImageOffset(const mirror::Object* object) const; + bool IsImageOffsetAssigned(mirror::Object* object) const; + size_t GetImageOffset(mirror::Object* object) const; - mirror::Object* GetImageAddress(const mirror::Object* object) const { + mirror::Object* GetImageAddress(mirror::Object* object) const { if (object == NULL) { return NULL; } return reinterpret_cast<mirror::Object*>(image_begin_ + GetImageOffset(object)); } - mirror::Object* GetLocalAddress(const mirror::Object* object) const { + mirror::Object* GetLocalAddress(mirror::Object* object) const { size_t offset = GetImageOffset(object); byte* dst = image_->Begin() + offset; return reinterpret_cast<mirror::Object*>(dst); @@ -96,7 +96,7 @@ class ImageWriter { } // Returns true if the class was in the original requested image classes list. - bool IsImageClass(const mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsImageClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Debug aid that list of requested image classes. void DumpImageClasses(); @@ -141,20 +141,20 @@ class ImageWriter { void CopyAndFixupObjects(); static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupClass(const mirror::Class* orig, mirror::Class* copy) + void FixupClass(mirror::Class* orig, mirror::Class* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupMethod(const mirror::ArtMethod* orig, mirror::ArtMethod* copy) + void FixupMethod(mirror::ArtMethod* orig, mirror::ArtMethod* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupObject(const mirror::Object* orig, mirror::Object* copy) + void FixupObject(mirror::Object* orig, mirror::Object* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupObjectArray(const mirror::ObjectArray<mirror::Object>* orig, + void FixupObjectArray(mirror::ObjectArray<mirror::Object>* orig, mirror::ObjectArray<mirror::Object>* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupInstanceFields(const mirror::Object* orig, mirror::Object* copy) + void FixupInstanceFields(mirror::Object* orig, mirror::Object* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupStaticFields(const mirror::Class* orig, mirror::Class* copy) + void FixupStaticFields(mirror::Class* orig, mirror::Class* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupFields(const mirror::Object* orig, mirror::Object* copy, uint32_t ref_offsets, + void FixupFields(mirror::Object* orig, mirror::Object* copy, uint32_t ref_offsets, bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index 1c8714a6c3..c77d319330 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -58,11 +58,14 @@ class JniCompilerTest : public CommonTest { method = c->FindVirtualMethod(method_name, method_sig); } ASSERT_TRUE(method != NULL) << method_name << " " << method_sig; - if (method->GetEntryPointFromCompiledCode() != NULL) { - return; + if (method->GetEntryPointFromQuickCompiledCode() == nullptr) { + ASSERT_TRUE(method->GetEntryPointFromPortableCompiledCode() == nullptr); + CompileMethod(method); + ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) + << method_name << " " << method_sig; + ASSERT_TRUE(method->GetEntryPointFromPortableCompiledCode() != nullptr) + << method_name << " " << method_sig; } - CompileMethod(method); - ASSERT_TRUE(method->GetEntryPointFromCompiledCode() != NULL) << method_name << " " << method_sig; } void SetUpForTest(bool direct, const char* method_name, const char* method_sig, @@ -122,19 +125,19 @@ jobject JniCompilerTest::class_loader_; int gJava_MyClassNatives_foo_calls = 0; void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) { // 1 = thisObj - EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); Locks::mutator_lock_->AssertNotHeld(Thread::Current()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); gJava_MyClassNatives_foo_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); } TEST_F(JniCompilerTest, CompileAndRunNoArgMethod) { TEST_DISABLED_FOR_PORTABLE(); - SetUpForTest(false, "foo", "()V", - reinterpret_cast<void*>(&Java_MyClassNatives_foo)); + SetUpForTest(false, "foo", "()V", reinterpret_cast<void*>(&Java_MyClassNatives_foo)); EXPECT_EQ(0, gJava_MyClassNatives_foo_calls); env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); @@ -178,12 +181,13 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) { int gJava_MyClassNatives_fooI_calls = 0; jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) { // 1 = thisObj - EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); gJava_MyClassNatives_fooI_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); return x; } @@ -204,12 +208,13 @@ TEST_F(JniCompilerTest, CompileAndRunIntMethod) { int gJava_MyClassNatives_fooII_calls = 0; jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) { // 1 = thisObj - EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); gJava_MyClassNatives_fooII_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); return x - y; // non-commutative operator } @@ -231,12 +236,13 @@ TEST_F(JniCompilerTest, CompileAndRunIntIntMethod) { int gJava_MyClassNatives_fooJJ_calls = 0; jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y) { // 1 = thisObj - EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); gJava_MyClassNatives_fooJJ_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); return x - y; // non-commutative operator } @@ -259,12 +265,13 @@ TEST_F(JniCompilerTest, CompileAndRunLongLongMethod) { int gJava_MyClassNatives_fooDD_calls = 0; jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdouble y) { // 1 = thisObj - EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); gJava_MyClassNatives_fooDD_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); return x - y; // non-commutative operator } @@ -288,12 +295,13 @@ TEST_F(JniCompilerTest, CompileAndRunDoubleDoubleMethod) { int gJava_MyClassNatives_fooJJ_synchronized_calls = 0; jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv* env, jobject thisObj, jlong x, jlong y) { // 1 = thisObj - EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); gJava_MyClassNatives_fooJJ_synchronized_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); return x | y; } @@ -314,12 +322,13 @@ int gJava_MyClassNatives_fooIOO_calls = 0; jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject y, jobject z) { // 3 = this + y + z - EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); gJava_MyClassNatives_fooIOO_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); switch (x) { case 1: return y; @@ -365,12 +374,13 @@ TEST_F(JniCompilerTest, CompileAndRunIntObjectObjectMethod) { int gJava_MyClassNatives_fooSII_calls = 0; jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) { // 1 = klass - EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(klass != NULL); EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); gJava_MyClassNatives_fooSII_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); return x + y; } @@ -388,12 +398,13 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntIntMethod) { int gJava_MyClassNatives_fooSDD_calls = 0; jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble y) { // 1 = klass - EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(klass != NULL); EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); gJava_MyClassNatives_fooSDD_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); return x - y; // non-commutative operator } @@ -417,12 +428,13 @@ int gJava_MyClassNatives_fooSIOO_calls = 0; jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y, jobject z) { // 3 = klass + y + z - EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(klass != NULL); EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); gJava_MyClassNatives_fooSIOO_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); switch (x) { case 1: return y; @@ -469,12 +481,13 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntObjectObjectMethod) { int gJava_MyClassNatives_fooSSIOO_calls = 0; jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject y, jobject z) { // 3 = klass + y + z - EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); EXPECT_EQ(kNative, Thread::Current()->GetState()); EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(klass != NULL); EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); gJava_MyClassNatives_fooSSIOO_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); switch (x) { case 1: return y; diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc index 94408bb39c..6563eb5475 100644 --- a/compiler/llvm/compiler_llvm.cc +++ b/compiler/llvm/compiler_llvm.cc @@ -126,7 +126,7 @@ LlvmCompilationUnit* CompilerLLVM::AllocateCompilationUnit() { MutexLock GUARD(Thread::Current(), next_cunit_id_lock_); LlvmCompilationUnit* cunit = new LlvmCompilationUnit(this, next_cunit_id_++); if (!bitcode_filename_.empty()) { - cunit->SetBitcodeFileName(StringPrintf("%s-%zu", + cunit->SetBitcodeFileName(StringPrintf("%s-%u", bitcode_filename_.c_str(), cunit->GetCompilationUnitId())); } diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc index 6423cd7dca..8f22a97968 100644 --- a/compiler/llvm/gbc_expander.cc +++ b/compiler/llvm/gbc_expander.cc @@ -897,7 +897,7 @@ llvm::Value* GBCExpanderPass::EmitInvoke(llvm::CallInst& call_inst) { } else { code_addr = irb_.LoadFromObjectOffset(callee_method_object_addr, - art::mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(), + art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset().Int32Value(), func_type->getPointerTo(), kTBAARuntimeInfo); } @@ -1234,7 +1234,7 @@ llvm::Value* GBCExpanderPass::Expand_Invoke(llvm::CallInst& call_inst) { llvm::Value* code_addr = irb_.LoadFromObjectOffset(callee_method_object_addr, - art::mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(), + art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset().Int32Value(), callee_method_type->getPointerTo(), kTBAARuntimeInfo); diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc index 038f5dc4eb..d23706d9f4 100644 --- a/compiler/llvm/llvm_compilation_unit.cc +++ b/compiler/llvm/llvm_compilation_unit.cc @@ -151,7 +151,7 @@ static std::string DumpDirectory() { void LlvmCompilationUnit::DumpBitcodeToFile() { std::string bitcode; DumpBitcodeToString(bitcode); - std::string filename(StringPrintf("%s/Art%u.bc", DumpDirectory().c_str(), cunit_id_)); + std::string filename(StringPrintf("%s/Art%zu.bc", DumpDirectory().c_str(), cunit_id_)); UniquePtr<File> output(OS::CreateEmptyFile(filename.c_str())); output->WriteFully(bitcode.data(), bitcode.size()); LOG(INFO) << ".bc file written successfully: " << filename; @@ -178,7 +178,7 @@ bool LlvmCompilationUnit::Materialize() { const bool kDumpELF = false; if (kDumpELF) { // Dump the ELF image for debugging - std::string filename(StringPrintf("%s/Art%u.o", DumpDirectory().c_str(), cunit_id_)); + std::string filename(StringPrintf("%s/Art%zu.o", DumpDirectory().c_str(), cunit_id_)); UniquePtr<File> output(OS::CreateEmptyFile(filename.c_str())); output->WriteFully(elf_object_.data(), elf_object_.size()); LOG(INFO) << ".o file written successfully: " << filename; diff --git a/compiler/llvm/llvm_compilation_unit.h b/compiler/llvm/llvm_compilation_unit.h index ced9f812c0..58aa6fd545 100644 --- a/compiler/llvm/llvm_compilation_unit.h +++ b/compiler/llvm/llvm_compilation_unit.h @@ -101,10 +101,10 @@ class LlvmCompilationUnit { private: LlvmCompilationUnit(const CompilerLLVM* compiler_llvm, - uint32_t cunit_id); + size_t cunit_id); const CompilerLLVM* compiler_llvm_; - const uint32_t cunit_id_; + const size_t cunit_id_; UniquePtr< ::llvm::LLVMContext> context_; UniquePtr<IRBuilder> irb_; diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index fc454127c3..b3070b6f48 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -39,29 +39,42 @@ class OatTest : public CommonTest { method->GetDexMethodIndex())); if (compiled_method == NULL) { - EXPECT_TRUE(oat_method.GetCode() == NULL) << PrettyMethod(method) << " " - << oat_method.GetCode(); -#if !defined(ART_USE_PORTABLE_COMPILER) - EXPECT_EQ(oat_method.GetFrameSizeInBytes(), kCompile ? kStackAlignment : 0); + EXPECT_TRUE(oat_method.GetQuickCode() == NULL) << PrettyMethod(method) << " " + << oat_method.GetQuickCode(); + EXPECT_TRUE(oat_method.GetPortableCode() == NULL) << PrettyMethod(method) << " " + << oat_method.GetPortableCode(); + EXPECT_EQ(oat_method.GetFrameSizeInBytes(), 0U); EXPECT_EQ(oat_method.GetCoreSpillMask(), 0U); EXPECT_EQ(oat_method.GetFpSpillMask(), 0U); -#endif } else { - const void* oat_code = oat_method.GetCode(); - EXPECT_TRUE(oat_code != NULL) << PrettyMethod(method); - uintptr_t oat_code_aligned = RoundDown(reinterpret_cast<uintptr_t>(oat_code), 2); - oat_code = reinterpret_cast<const void*>(oat_code_aligned); - - const std::vector<uint8_t>& code = compiled_method->GetCode(); - size_t code_size = code.size() * sizeof(code[0]); - EXPECT_EQ(0, memcmp(oat_code, &code[0], code_size)) - << PrettyMethod(method) << " " << code_size; - CHECK_EQ(0, memcmp(oat_code, &code[0], code_size)); -#if !defined(ART_USE_PORTABLE_COMPILER) - EXPECT_EQ(oat_method.GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes()); - EXPECT_EQ(oat_method.GetCoreSpillMask(), compiled_method->GetCoreSpillMask()); - EXPECT_EQ(oat_method.GetFpSpillMask(), compiled_method->GetFpSpillMask()); -#endif + const void* quick_oat_code = oat_method.GetQuickCode(); + if (quick_oat_code != nullptr) { + EXPECT_EQ(oat_method.GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes()); + EXPECT_EQ(oat_method.GetCoreSpillMask(), compiled_method->GetCoreSpillMask()); + EXPECT_EQ(oat_method.GetFpSpillMask(), compiled_method->GetFpSpillMask()); + uintptr_t oat_code_aligned = RoundDown(reinterpret_cast<uintptr_t>(quick_oat_code), 2); + quick_oat_code = reinterpret_cast<const void*>(oat_code_aligned); + const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode(); + EXPECT_TRUE(quick_code != nullptr); + size_t code_size = quick_code->size() * sizeof(quick_code[0]); + EXPECT_EQ(0, memcmp(quick_oat_code, &quick_code[0], code_size)) + << PrettyMethod(method) << " " << code_size; + CHECK_EQ(0, memcmp(quick_oat_code, &quick_code[0], code_size)); + } else { + const void* portable_oat_code = oat_method.GetPortableCode(); + EXPECT_TRUE(portable_oat_code != nullptr) << PrettyMethod(method); + EXPECT_EQ(oat_method.GetFrameSizeInBytes(), 0U); + EXPECT_EQ(oat_method.GetCoreSpillMask(), 0U); + EXPECT_EQ(oat_method.GetFpSpillMask(), 0U); + uintptr_t oat_code_aligned = RoundDown(reinterpret_cast<uintptr_t>(portable_oat_code), 2); + portable_oat_code = reinterpret_cast<const void*>(oat_code_aligned); + const std::vector<uint8_t>* portable_code = compiled_method->GetPortableCode(); + EXPECT_TRUE(portable_code != nullptr); + size_t code_size = portable_code->size() * sizeof(portable_code[0]); + EXPECT_EQ(0, memcmp(quick_oat_code, &portable_code[0], code_size)) + << PrettyMethod(method) << " " << code_size; + CHECK_EQ(0, memcmp(quick_oat_code, &portable_code[0], code_size)); + } } } }; @@ -70,12 +83,8 @@ TEST_F(OatTest, WriteRead) { TimingLogger timings("CommonTest::WriteRead", false, false); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - // TODO: make selectable -#if defined(ART_USE_PORTABLE_COMPILER) - CompilerBackend compiler_backend = kPortable; -#else - CompilerBackend compiler_backend = kQuick; -#endif + // TODO: make selectable. + CompilerBackend compiler_backend = kUsePortableCompiler ? kPortable : kQuick; InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86; InstructionSetFeatures insn_features; diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 7a902d86d6..7c5669a3ab 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -39,7 +39,7 @@ namespace art { OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files, uint32_t image_file_location_oat_checksum, - uint32_t image_file_location_oat_begin, + uintptr_t image_file_location_oat_begin, const std::string& image_file_location, const CompilerDriver* compiler, TimingLogger* timings) @@ -348,8 +348,8 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, bool __attribute__((unused)) is_native, InvokeType invoke_type, uint32_t method_idx, const DexFile& dex_file) { - // derived from CompiledMethod if available - uint32_t code_offset = 0; + // Derived from CompiledMethod if available. + uint32_t quick_code_offset = 0; uint32_t frame_size_in_bytes = kStackAlignment; uint32_t core_spill_mask = 0; uint32_t fp_spill_mask = 0; @@ -358,36 +358,38 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, uint32_t gc_map_offset = 0; OatClass* oat_class = oat_classes_[oat_class_index]; -#if defined(ART_USE_PORTABLE_COMPILER) - size_t oat_method_offsets_offset = - oat_class->GetOatMethodOffsetsOffsetFromOatHeader(class_def_method_index); -#endif - CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); + if (compiled_method != NULL) { -#if defined(ART_USE_PORTABLE_COMPILER) - compiled_method->AddOatdataOffsetToCompliledCodeOffset( - oat_method_offsets_offset + OFFSETOF_MEMBER(OatMethodOffsets, code_offset_)); -#else - const std::vector<uint8_t>& code = compiled_method->GetCode(); - offset = compiled_method->AlignCode(offset); - DCHECK_ALIGNED(offset, kArmAlignment); - uint32_t code_size = code.size() * sizeof(code[0]); - CHECK_NE(code_size, 0U); - uint32_t thumb_offset = compiled_method->CodeDelta(); - code_offset = offset + sizeof(code_size) + thumb_offset; - - // Deduplicate code arrays - SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator code_iter = code_offsets_.find(&code); - if (code_iter != code_offsets_.end()) { - code_offset = code_iter->second; + const std::vector<uint8_t>* portable_code = compiled_method->GetPortableCode(); + const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode(); + if (portable_code != nullptr) { + CHECK(quick_code == nullptr); + size_t oat_method_offsets_offset = + oat_class->GetOatMethodOffsetsOffsetFromOatHeader(class_def_method_index); + compiled_method->AddOatdataOffsetToCompliledCodeOffset( + oat_method_offsets_offset + OFFSETOF_MEMBER(OatMethodOffsets, code_offset_)); } else { - code_offsets_.Put(&code, code_offset); - offset += sizeof(code_size); // code size is prepended before code - offset += code_size; - oat_header_->UpdateChecksum(&code[0], code_size); + CHECK(quick_code != nullptr); + offset = compiled_method->AlignCode(offset); + DCHECK_ALIGNED(offset, kArmAlignment); + uint32_t code_size = quick_code->size() * sizeof(uint8_t); + CHECK_NE(code_size, 0U); + uint32_t thumb_offset = compiled_method->CodeDelta(); + quick_code_offset = offset + sizeof(code_size) + thumb_offset; + + // Deduplicate code arrays + SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator code_iter = + code_offsets_.find(quick_code); + if (code_iter != code_offsets_.end()) { + quick_code_offset = code_iter->second; + } else { + code_offsets_.Put(quick_code, quick_code_offset); + offset += sizeof(code_size); // code size is prepended before code + offset += code_size; + oat_header_->UpdateChecksum(&(*quick_code)[0], code_size); + } } -#endif frame_size_in_bytes = compiled_method->GetFrameSizeInBytes(); core_spill_mask = compiled_method->GetCoreSpillMask(); fp_spill_mask = compiled_method->GetFpSpillMask(); @@ -456,7 +458,7 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, } oat_class->method_offsets_[*method_offsets_index] = - OatMethodOffsets(code_offset, + OatMethodOffsets(quick_code_offset, frame_size_in_bytes, core_spill_mask, fp_spill_mask, @@ -483,9 +485,11 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, // Don't overwrite static method trampoline if (!method->IsStatic() || method->IsConstructor() || method->GetDeclaringClass()->IsInitialized()) { - method->SetOatCodeOffset(code_offset); + // TODO: record portable code offsets: method->SetPortableOatCodeOffset(portable_code_offset); + method->SetQuickOatCodeOffset(quick_code_offset); } else { - method->SetEntryPointFromCompiledCode(NULL); + method->SetEntryPointFromPortableCompiledCode(nullptr); + method->SetEntryPointFromQuickCompiledCode(nullptr); } method->SetOatVmapTableOffset(vmap_table_offset); method->SetOatNativeGcMapOffset(gc_map_offset); @@ -753,52 +757,52 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset, if (compiled_method != NULL) { // ie. not an abstract method const OatMethodOffsets method_offsets = oat_class->method_offsets_[*method_offsets_index]; (*method_offsets_index)++; - -#if !defined(ART_USE_PORTABLE_COMPILER) - uint32_t aligned_offset = compiled_method->AlignCode(relative_offset); - uint32_t aligned_code_delta = aligned_offset - relative_offset; - if (aligned_code_delta != 0) { - off_t new_offset = out.Seek(aligned_code_delta, kSeekCurrent); - size_code_alignment_ += aligned_code_delta; - uint32_t expected_offset = file_offset + aligned_offset; - if (static_cast<uint32_t>(new_offset) != expected_offset) { - PLOG(ERROR) << "Failed to seek to align oat code. Actual: " << new_offset - << " Expected: " << expected_offset << " File: " << out.GetLocation(); - return 0; + const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode(); + if (quick_code != nullptr) { + CHECK(compiled_method->GetPortableCode() == nullptr); + uint32_t aligned_offset = compiled_method->AlignCode(relative_offset); + uint32_t aligned_code_delta = aligned_offset - relative_offset; + if (aligned_code_delta != 0) { + off_t new_offset = out.Seek(aligned_code_delta, kSeekCurrent); + size_code_alignment_ += aligned_code_delta; + uint32_t expected_offset = file_offset + aligned_offset; + if (static_cast<uint32_t>(new_offset) != expected_offset) { + PLOG(ERROR) << "Failed to seek to align oat code. Actual: " << new_offset + << " Expected: " << expected_offset << " File: " << out.GetLocation(); + return 0; + } + relative_offset += aligned_code_delta; + DCHECK_OFFSET(); } - relative_offset += aligned_code_delta; - DCHECK_OFFSET(); - } - DCHECK_ALIGNED(relative_offset, kArmAlignment); - const std::vector<uint8_t>& code = compiled_method->GetCode(); - uint32_t code_size = code.size() * sizeof(code[0]); - CHECK_NE(code_size, 0U); - - // Deduplicate code arrays - size_t code_offset = relative_offset + sizeof(code_size) + compiled_method->CodeDelta(); - SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator code_iter = code_offsets_.find(&code); - if (code_iter != code_offsets_.end() && code_offset != method_offsets.code_offset_) { - DCHECK(code_iter->second == method_offsets.code_offset_) - << PrettyMethod(method_idx, dex_file); - } else { - DCHECK(code_offset == method_offsets.code_offset_) << PrettyMethod(method_idx, dex_file); - if (!out.WriteFully(&code_size, sizeof(code_size))) { - ReportWriteFailure("method code size", method_idx, dex_file, out); - return 0; + DCHECK_ALIGNED(relative_offset, kArmAlignment); + uint32_t code_size = quick_code->size() * sizeof(uint8_t); + CHECK_NE(code_size, 0U); + + // Deduplicate code arrays + size_t code_offset = relative_offset + sizeof(code_size) + compiled_method->CodeDelta(); + SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator code_iter = + code_offsets_.find(quick_code); + if (code_iter != code_offsets_.end() && code_offset != method_offsets.code_offset_) { + DCHECK(code_iter->second == method_offsets.code_offset_) + << PrettyMethod(method_idx, dex_file); + } else { + DCHECK(code_offset == method_offsets.code_offset_) << PrettyMethod(method_idx, dex_file); + if (!out.WriteFully(&code_size, sizeof(code_size))) { + ReportWriteFailure("method code size", method_idx, dex_file, out); + return 0; + } + size_code_size_ += sizeof(code_size); + relative_offset += sizeof(code_size); + DCHECK_OFFSET(); + if (!out.WriteFully(&(*quick_code)[0], code_size)) { + ReportWriteFailure("method code", method_idx, dex_file, out); + return 0; + } + size_code_ += code_size; + relative_offset += code_size; } - size_code_size_ += sizeof(code_size); - relative_offset += sizeof(code_size); DCHECK_OFFSET(); - if (!out.WriteFully(&code[0], code_size)) { - ReportWriteFailure("method code", method_idx, dex_file, out); - return 0; - } - size_code_ += code_size; - relative_offset += code_size; } - DCHECK_OFFSET(); -#endif - const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable(); size_t mapping_table_size = mapping_table.size() * sizeof(mapping_table[0]); @@ -994,7 +998,6 @@ OatWriter::OatClass::~OatClass() { delete compiled_methods_; } -#if defined(ART_USE_PORTABLE_COMPILER) size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatHeader( size_t class_def_method_index_) const { uint32_t method_offset = GetOatMethodOffsetsOffsetFromOatClass(class_def_method_index_); @@ -1008,7 +1011,6 @@ size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatClass( size_t class_def_method_index_) const { return oat_method_offsets_offsets_from_oat_class_[class_def_method_index_]; } -#endif size_t OatWriter::OatClass::SizeOf() const { return sizeof(status_) diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h index 64275e6bbb..067c78971f 100644 --- a/compiler/oat_writer.h +++ b/compiler/oat_writer.h @@ -65,7 +65,7 @@ class OatWriter { public: OatWriter(const std::vector<const DexFile*>& dex_files, uint32_t image_file_location_oat_checksum, - uint32_t image_file_location_oat_begin, + uintptr_t image_file_location_oat_begin, const std::string& image_file_location, const CompilerDriver* compiler, TimingLogger* timings); @@ -150,10 +150,8 @@ class OatWriter { uint32_t num_non_null_compiled_methods, mirror::Class::Status status); ~OatClass(); -#if defined(ART_USE_PORTABLE_COMPILER) size_t GetOatMethodOffsetsOffsetFromOatHeader(size_t class_def_method_index_) const; size_t GetOatMethodOffsetsOffsetFromOatClass(size_t class_def_method_index_) const; -#endif size_t SizeOf() const; void UpdateChecksum(OatHeader& oat_header) const; bool Write(OatWriter* oat_writer, OutputStream& out, const size_t file_offset) const; @@ -217,7 +215,7 @@ class OatWriter { // dependencies on the image. uint32_t image_file_location_oat_checksum_; - uint32_t image_file_location_oat_begin_; + uintptr_t image_file_location_oat_begin_; std::string image_file_location_; // data to write diff --git a/compiler/output_stream.h b/compiler/output_stream.h index 112dcfca74..478a854f26 100644 --- a/compiler/output_stream.h +++ b/compiler/output_stream.h @@ -41,7 +41,7 @@ class OutputStream { return location_; } - virtual bool WriteFully(const void* buffer, int64_t byte_count) = 0; + virtual bool WriteFully(const void* buffer, size_t byte_count) = 0; virtual off_t Seek(off_t offset, Whence whence) = 0; diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h index 638e0ec457..7cc253ccdb 100644 --- a/compiler/utils/dedupe_set.h +++ b/compiler/utils/dedupe_set.h @@ -62,7 +62,9 @@ class DedupeSet { explicit DedupeSet(const char* set_name) { for (HashType i = 0; i < kShard; ++i) { - lock_name_[i] = StringPrintf("%s lock %d", set_name, i); + std::ostringstream oss; + oss << set_name << " lock " << i; + lock_name_[i] = oss.str(); lock_[i].reset(new Mutex(lock_name_[i].c_str())); } } diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index 2be3d56cfa..fdd2bab4da 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -23,18 +23,6 @@ namespace art { namespace mips { -#if 0 -class DirectCallRelocation : public AssemblerFixup { - public: - void Process(const MemoryRegion& region, int position) { - // Direct calls are relative to the following instruction on mips. - int32_t pointer = region.Load<int32_t>(position); - int32_t start = reinterpret_cast<int32_t>(region.start()); - int32_t delta = start + position + sizeof(int32_t); - region.Store<int32_t>(position, pointer - delta); - } -}; -#endif std::ostream& operator<<(std::ostream& os, const DRegister& rhs) { if (rhs >= D0 && rhs < kNumberOfDRegisters) { diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 9095180246..136d2486df 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -24,17 +24,6 @@ namespace art { namespace x86 { -class DirectCallRelocation : public AssemblerFixup { - public: - void Process(const MemoryRegion& region, int position) { - // Direct calls are relative to the following instruction on x86. - int32_t pointer = region.Load<int32_t>(position); - int32_t start = reinterpret_cast<int32_t>(region.start()); - int32_t delta = start + position + sizeof(int32_t); - region.Store<int32_t>(position, pointer - delta); - } -}; - std::ostream& operator<<(std::ostream& os, const XmmRegister& reg) { return os << "XMM" << static_cast<int>(reg); } @@ -1304,15 +1293,6 @@ void X86Assembler::Bind(Label* label) { } -void X86Assembler::Stop(const char* message) { - // Emit the message address as immediate operand in the test rax instruction, - // followed by the int3 instruction. - // Execution can be resumed with the 'cont' command in gdb. - testl(EAX, Immediate(reinterpret_cast<int32_t>(message))); - int3(); -} - - void X86Assembler::EmitOperand(int reg_or_opcode, const Operand& operand) { CHECK_GE(reg_or_opcode, 0); CHECK_LT(reg_or_opcode, 8); diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index 4ba03d1bd3..0fa8e0086c 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -452,9 +452,6 @@ class X86Assembler : public Assembler { void Align(int alignment, int offset); void Bind(Label* label); - // Debugging and bringup support. - void Stop(const char* message); - // // Overridden common assembler high-level functionality // diff --git a/compiler/vector_output_stream.h b/compiler/vector_output_stream.h index a3f82262af..09daa12e02 100644 --- a/compiler/vector_output_stream.h +++ b/compiler/vector_output_stream.h @@ -31,7 +31,7 @@ class VectorOutputStream : public OutputStream { virtual ~VectorOutputStream() {} - bool WriteFully(const void* buffer, int64_t byte_count) { + bool WriteFully(const void* buffer, size_t byte_count) { if (static_cast<size_t>(offset_) == vector_.size()) { const uint8_t* start = reinterpret_cast<const uint8_t*>(buffer); vector_.insert(vector_.end(), &start[0], &start[byte_count]); |