diff options
24 files changed, 302 insertions, 90 deletions
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index b5c42f11a..9e3fbbc96 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -291,8 +291,12 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset, BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p, ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { - if (code_offset >= current_code_item_->insns_size_in_code_units_) { - return nullptr; + if (UNLIKELY(code_offset >= current_code_item_->insns_size_in_code_units_)) { + // There can be a fall-through out of the method code. We shall record such a block + // here (assuming create==true) and check that it's dead at the end of InlineMethod(). + // Though we're only aware of the cases where code_offset is exactly the same as + // insns_size_in_code_units_, treat greater code_offset the same just in case. + code_offset = current_code_item_->insns_size_in_code_units_; } int block_id = (*dex_pc_to_block_map)[code_offset]; @@ -483,6 +487,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs BasicBlock* taken_block = FindBlock(target, /* create */ true, /* immed_pred_block_p */ &cur_block, dex_pc_to_block_map); + DCHECK(taken_block != nullptr); cur_block->taken = taken_block->id; taken_block->predecessors.push_back(cur_block->id); @@ -494,6 +499,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs /* immed_pred_block_p */ &cur_block, dex_pc_to_block_map); + DCHECK(fallthrough_block != nullptr); cur_block->fall_through = fallthrough_block->id; fallthrough_block->predecessors.push_back(cur_block->id); } else if (code_ptr < code_end) { @@ -508,7 +514,8 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { UNUSED(flags); const uint16_t* switch_data = - reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB); + reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + + static_cast<int32_t>(insn->dalvikInsn.vB)); int size; const int* keyTable; const int* target_table; @@ -561,6 +568,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* create */ true, /* immed_pred_block_p */ &cur_block, dex_pc_to_block_map); + DCHECK(case_block != nullptr); SuccessorBlockInfo* successor_block_info = static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor)); @@ -576,6 +584,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map); + DCHECK(fallthrough_block != nullptr); cur_block->fall_through = fallthrough_block->id; fallthrough_block->predecessors.push_back(cur_block->id); return cur_block; @@ -709,8 +718,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ // FindBlock lookup cache. ScopedArenaAllocator allocator(&cu_->arena_stack); ScopedArenaVector<uint16_t> dex_pc_to_block_map(allocator.Adapter()); - dex_pc_to_block_map.resize(dex_pc_to_block_map.size() + - current_code_item_->insns_size_in_code_units_); + dex_pc_to_block_map.resize(current_code_item_->insns_size_in_code_units_ + + 1 /* Fall-through on last insn; dead or punt to interpreter. */); // TODO: replace with explicit resize routine. Using automatic extension side effect for now. try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_); @@ -876,6 +885,20 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ if (cu_->verbose) { DumpMIRGraph(); } + + // Check if there's been a fall-through out of the method code. + BasicBlockId out_bb_id = dex_pc_to_block_map[current_code_item_->insns_size_in_code_units_]; + if (UNLIKELY(out_bb_id != NullBasicBlockId)) { + // Eagerly calculate DFS order to determine if the block is dead. + DCHECK(!DfsOrdersUpToDate()); + ComputeDFSOrders(); + BasicBlock* out_bb = GetBasicBlock(out_bb_id); + DCHECK(out_bb != nullptr); + if (out_bb->block_type != kDead) { + LOG(WARNING) << "Live fall-through out of method in " << PrettyMethod(method_idx, dex_file); + SetPuntToInterpreter(true); + } + } } void MIRGraph::ShowOpcodeStats() { diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc index 934fa3509..8467b718a 100644 --- a/compiler/dex/quick/x86/assemble_x86.cc +++ b/compiler/dex/quick/x86/assemble_x86.cc @@ -428,7 +428,7 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0, { kX86PextrwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0xC5, 0x00, 0, 0, 1, false }, "PextwRRI", "!0r,!1r,!2d" }, { kX86PextrdRRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextdRRI", "!0r,!1r,!2d" }, { kX86PextrbMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrbMRI", "[!0r+!1d],!2r,!3d" }, - { kX86PextrwMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrwMRI", "[!0r+!1d],!2r,!3d" }, + { kX86PextrwMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x15, 0, 0, 1, false }, "PextrwMRI", "[!0r+!1d],!2r,!3d" }, { kX86PextrdMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrdMRI", "[!0r+!1d],!2r,!3d" }, { kX86PshuflwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0xF2, 0, 0x0F, 0x70, 0, 0, 0, 1, false }, "PshuflwRRI", "!0r,!1r,!2d" }, diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc index 36339f72e..f58f206af 100644 --- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc +++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc @@ -180,6 +180,13 @@ TEST_F(QuickAssembleX86LowLevelTest, Mulpd) { RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg()); } +TEST_F(QuickAssembleX86LowLevelTest, Pextrw) { + Test(kX86, "Pextrw", "pextrw $7, %xmm3, 8(%eax)\n", kX86PextrwMRI, + RegStorage::Solo32(r0).GetReg(), 8, RegStorage::Solo128(3).GetReg(), 7); + Test(kX86_64, "Pextrw", "pextrw $7, %xmm8, 8(%r10)\n", kX86PextrwMRI, + RegStorage::Solo64(r10q).GetReg(), 8, RegStorage::Solo128(8).GetReg(), 7); +} + class QuickAssembleX86MacroTest : public QuickAssembleX86TestBase { protected: typedef void (X86Mir2Lir::*AsmFn)(MIR*); diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index e2eb46aab..699987c05 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -449,6 +449,20 @@ void HBasicBlock::InsertInstructionBefore(HInstruction* instruction, HInstructio instructions_.InsertInstructionBefore(instruction, cursor); } +void HBasicBlock::InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor) { + DCHECK(!cursor->IsPhi()); + DCHECK(!instruction->IsPhi()); + DCHECK_EQ(instruction->GetId(), -1); + DCHECK_NE(cursor->GetId(), -1); + DCHECK_EQ(cursor->GetBlock(), this); + DCHECK(!instruction->IsControlFlow()); + DCHECK(!cursor->IsControlFlow()); + instruction->SetBlock(this); + instruction->SetId(GetGraph()->GetNextInstructionId()); + UpdateInputsUsers(instruction); + instructions_.InsertInstructionAfter(instruction, cursor); +} + void HBasicBlock::InsertPhiAfter(HPhi* phi, HPhi* cursor) { DCHECK_EQ(phi->GetId(), -1); DCHECK_NE(cursor->GetId(), -1); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index f64086e60..3fe23e181 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -620,7 +620,9 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { void DisconnectAndDelete(); void AddInstruction(HInstruction* instruction); + // Insert `instruction` before/after an existing instruction `cursor`. void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor); + void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor); // Replace instruction `initial` with `replacement` within this block. void ReplaceAndRemoveInstructionWith(HInstruction* initial, HInstruction* replacement); diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index b764095fc..8490afbee 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> @@ -324,26 +325,19 @@ class WatchDog { return nullptr; } - static void Message(char severity, const std::string& message) { - // TODO: Remove when we switch to LOG when we can guarantee it won't prevent shutdown in error - // cases. - fprintf(stderr, "dex2oat%s %c %d %d %s\n", - kIsDebugBuild ? "d" : "", - severity, - getpid(), - GetTid(), - message.c_str()); - } - NO_RETURN static void Fatal(const std::string& message) { - Message('F', message); + // TODO: When we can guarantee it won't prevent shutdown in error cases, move to LOG. However, + // it's rather easy to hang in unwinding. + // LogLine also avoids ART logging lock issues, as it's really only a wrapper around + // logcat logging or stderr output. + LogMessage::LogLine(__FILE__, __LINE__, LogSeverity::FATAL, message.c_str()); exit(1); } void Wait() { // TODO: tune the multiplier for GC verification, the following is just to make the timeout // large. - int64_t multiplier = kVerifyObjectSupport > kVerifyObjectModeFast ? 100 : 1; + constexpr int64_t multiplier = kVerifyObjectSupport > kVerifyObjectModeFast ? 100 : 1; timespec timeout_ts; InitTimeSpec(true, CLOCK_REALTIME, multiplier * kWatchDogTimeoutSeconds * 1000, 0, &timeout_ts); const char* reason = "dex2oat watch dog thread waiting"; @@ -351,7 +345,8 @@ class WatchDog { while (!shutting_down_) { int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &mutex_, &timeout_ts)); if (rc == ETIMEDOUT) { - Fatal(StringPrintf("dex2oat did not finish after %d seconds", kWatchDogTimeoutSeconds)); + Fatal(StringPrintf("dex2oat did not finish after %" PRId64 " seconds", + kWatchDogTimeoutSeconds)); } else if (rc != 0) { std::string message(StringPrintf("pthread_cond_timedwait failed: %s", strerror(errno))); @@ -363,10 +358,10 @@ class WatchDog { // When setting timeouts, keep in mind that the build server may not be as fast as your desktop. // Debug builds are slower so they have larger timeouts. - static const unsigned int kSlowdownFactor = kIsDebugBuild ? 5U : 1U; + static constexpr int64_t kSlowdownFactor = kIsDebugBuild ? 5U : 1U; - // 6 minutes scaled by kSlowdownFactor. - static const unsigned int kWatchDogTimeoutSeconds = kSlowdownFactor * 6 * 60; + // 10 minutes scaled by kSlowdownFactor. + static constexpr int64_t kWatchDogTimeoutSeconds = kSlowdownFactor * 10 * 60; bool is_watch_dog_enabled_; bool shutting_down_; @@ -1806,8 +1801,6 @@ class Dex2Oat FINAL { DISALLOW_IMPLICIT_CONSTRUCTORS(Dex2Oat); }; -const unsigned int WatchDog::kWatchDogTimeoutSeconds; - static void b13564922() { #if defined(__linux__) && defined(__arm__) int major, minor; diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc index ba0c0bdeb..2ead4a2af 100644 --- a/disassembler/disassembler_x86.cc +++ b/disassembler/disassembler_x86.cc @@ -587,6 +587,14 @@ DISASSEMBLER_ENTRY(cmp, src_reg_file = SSE; immediate_bytes = 1; break; + case 0x15: + opcode1 = "pextrw"; + prefix[2] = 0; + has_modrm = true; + store = true; + src_reg_file = SSE; + immediate_bytes = 1; + break; case 0x16: opcode1 = "pextrd"; prefix[2] = 0; diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index af9ef1ff3..ee5c59f96 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -982,6 +982,7 @@ ENTRY art_quick_set_obj_instance RETURN_IF_ZERO END art_quick_set_obj_instance +// Macro to facilitate adding new allocation entrypoints. .macro ONE_ARG_DOWNCALL name, entrypoint, return .extern \entrypoint ENTRY \name @@ -992,19 +993,6 @@ ENTRY \name END \name .endm -// Macro to facilitate adding new allocation entrypoints. -.macro ONE_ARG_DOWNCALL name, entrypoint, return - .extern \entrypoint -ENTRY \name - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a1, rSELF # pass Thread::Current - jal \entrypoint - move $a2, $sp # pass $sp - \return -END \name -.endm - .macro TWO_ARG_DOWNCALL name, entrypoint, return .extern \entrypoint ENTRY \name @@ -1028,11 +1016,9 @@ END \name .macro FOUR_ARG_DOWNCALL name, entrypoint, return .extern \entrypoint ENTRY \name - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - sw rSELF, 16($sp) # pass Thread::Current + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC jal \entrypoint - sw $sp, 20($sp) # pass $sp + sw rSELF, 16($sp) # pass Thread::Current \return END \name .endm diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index f867aa836..d781e7662 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -1265,6 +1265,16 @@ ENTRY \name END \name .endm +.macro FOUR_ARG_DOWNCALL name, entrypoint, return + .extern \entrypoint +ENTRY \name + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + jal \entrypoint + move $a4, rSELF # pass Thread::Current + \return +END \name +.endm + // Generate the allocation entrypoints for each allocator. GENERATE_ALL_ALLOC_ENTRYPOINTS diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc index 0589cdd3a..20098e748 100644 --- a/runtime/dex_file.cc +++ b/runtime/dex_file.cc @@ -296,6 +296,12 @@ std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive, cons return dex_file; } +// Technically we do not have a limitation with respect to the number of dex files that can be in a +// multidex APK. However, it's bad practice, as each dex file requires its own tables for symbols +// (types, classes, methods, ...) and dex caches. So warn the user that we open a zip with what +// seems an excessive number. +static constexpr size_t kWarnOnManyDexFilesThreshold = 100; + bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& location, std::string* error_msg, std::vector<std::unique_ptr<const DexFile>>* dex_files) { @@ -310,14 +316,13 @@ bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& loca dex_files->push_back(std::move(dex_file)); // Now try some more. - size_t i = 2; // We could try to avoid std::string allocations by working on a char array directly. As we // do not expect a lot of iterations, this seems too involved and brittle. - while (i < 100) { - std::string name = StringPrintf("classes%zu.dex", i); - std::string fake_location = location + kMultiDexSeparator + name; + for (size_t i = 1; ; ++i) { + std::string name = GetMultiDexClassesDexName(i); + std::string fake_location = GetMultiDexLocation(i, location.c_str()); std::unique_ptr<const DexFile> next_dex_file(Open(zip_archive, name.c_str(), fake_location, error_msg, &error_code)); if (next_dex_file.get() == nullptr) { @@ -329,7 +334,16 @@ bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& loca dex_files->push_back(std::move(next_dex_file)); } - i++; + if (i == kWarnOnManyDexFilesThreshold) { + LOG(WARNING) << location << " has in excess of " << kWarnOnManyDexFilesThreshold + << " dex files. Please consider coalescing and shrinking the number to " + " avoid runtime overhead."; + } + + if (i == std::numeric_limits<size_t>::max()) { + LOG(ERROR) << "Overflow in number of dex files!"; + break; + } } return true; @@ -973,11 +987,19 @@ bool DexFile::IsMultiDexLocation(const char* location) { return strrchr(location, kMultiDexSeparator) != nullptr; } -std::string DexFile::GetMultiDexClassesDexName(size_t number, const char* dex_location) { - if (number == 0) { +std::string DexFile::GetMultiDexClassesDexName(size_t index) { + if (index == 0) { + return "classes.dex"; + } else { + return StringPrintf("classes%zu.dex", index + 1); + } +} + +std::string DexFile::GetMultiDexLocation(size_t index, const char* dex_location) { + if (index == 0) { return dex_location; } else { - return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, number + 1); + return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, index + 1); } } diff --git a/runtime/dex_file.h b/runtime/dex_file.h index 0d0735828..6b3f883a5 100644 --- a/runtime/dex_file.h +++ b/runtime/dex_file.h @@ -888,7 +888,13 @@ class DexFile { return size_; } - static std::string GetMultiDexClassesDexName(size_t number, const char* dex_location); + // Return the name of the index-th classes.dex in a multidex zip file. This is classes.dex for + // index == 0, and classes{index + 1}.dex else. + static std::string GetMultiDexClassesDexName(size_t index); + + // Return the (possibly synthetic) dex location for a multidex entry. This is dex_location for + // index == 0, and dex_location + multi-dex-separator + GetMultiDexClassesDexName(index) else. + static std::string GetMultiDexLocation(size_t index, const char* dex_location); // Returns the canonical form of the given dex location. // diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc index 4d099e1ca..90b35a3e0 100644 --- a/runtime/dex_file_test.cc +++ b/runtime/dex_file_test.cc @@ -350,11 +350,20 @@ TEST_F(DexFileTest, FindFieldId) { } TEST_F(DexFileTest, GetMultiDexClassesDexName) { + ASSERT_EQ("classes.dex", DexFile::GetMultiDexClassesDexName(0)); + ASSERT_EQ("classes2.dex", DexFile::GetMultiDexClassesDexName(1)); + ASSERT_EQ("classes3.dex", DexFile::GetMultiDexClassesDexName(2)); + ASSERT_EQ("classes100.dex", DexFile::GetMultiDexClassesDexName(99)); +} + +TEST_F(DexFileTest, GetMultiDexLocation) { std::string dex_location_str = "/system/app/framework.jar"; const char* dex_location = dex_location_str.c_str(); - ASSERT_EQ("/system/app/framework.jar", DexFile::GetMultiDexClassesDexName(0, dex_location)); - ASSERT_EQ("/system/app/framework.jar:classes2.dex", DexFile::GetMultiDexClassesDexName(1, dex_location)); - ASSERT_EQ("/system/app/framework.jar:classes101.dex", DexFile::GetMultiDexClassesDexName(100, dex_location)); + ASSERT_EQ("/system/app/framework.jar", DexFile::GetMultiDexLocation(0, dex_location)); + ASSERT_EQ("/system/app/framework.jar:classes2.dex", + DexFile::GetMultiDexLocation(1, dex_location)); + ASSERT_EQ("/system/app/framework.jar:classes101.dex", + DexFile::GetMultiDexLocation(100, dex_location)); } TEST_F(DexFileTest, GetDexCanonicalLocation) { @@ -363,7 +372,7 @@ TEST_F(DexFileTest, GetDexCanonicalLocation) { std::string dex_location(dex_location_real.get()); ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location.c_str())); - std::string multidex_location = DexFile::GetMultiDexClassesDexName(1, dex_location.c_str()); + std::string multidex_location = DexFile::GetMultiDexLocation(1, dex_location.c_str()); ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location.c_str())); std::string dex_location_sym = dex_location + "symlink"; @@ -371,7 +380,7 @@ TEST_F(DexFileTest, GetDexCanonicalLocation) { ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location_sym.c_str())); - std::string multidex_location_sym = DexFile::GetMultiDexClassesDexName(1, dex_location_sym.c_str()); + std::string multidex_location_sym = DexFile::GetMultiDexLocation(1, dex_location_sym.c_str()); ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location_sym.c_str())); ASSERT_EQ(0, unlink(dex_location_sym.c_str())); diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index 85234dc27..49c7fda2e 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -1042,10 +1042,11 @@ inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_ inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t num_vec) { const size_t kBitsPerVec = 32; - DCHECK_GE(num_slots * kBitsPerVec, num_vec); + DCHECK_GE(num_vec * kBitsPerVec, num_slots); + DCHECK_NE(num_vec, 0U); size_t remain = num_vec * kBitsPerVec - num_slots; - DCHECK_NE(remain, kBitsPerVec); - return ((1U << remain) - 1) << (kBitsPerVec - remain); + DCHECK_LT(remain, kBitsPerVec); + return ((1U << remain) - 1) << ((kBitsPerVec - remain) & 0x1F); } inline bool RosAlloc::Run::IsAllFree() { diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h index 8f5a7d425..cd5d2f66d 100644 --- a/runtime/mirror/string-inl.h +++ b/runtime/mirror/string-inl.h @@ -54,8 +54,9 @@ class SetStringCountVisitor { // Sets string count and value in the allocation code path to ensure it is guarded by a CAS. class SetStringCountAndBytesVisitor { public: - SetStringCountAndBytesVisitor(int32_t count, uint8_t* src, int32_t high_byte) - : count_(count), src_(src), high_byte_(high_byte) { + SetStringCountAndBytesVisitor(int32_t count, Handle<ByteArray> src_array, int32_t offset, + int32_t high_byte) + : count_(count), src_array_(src_array), offset_(offset), high_byte_(high_byte) { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const @@ -64,35 +65,63 @@ class SetStringCountAndBytesVisitor { String* string = down_cast<String*>(obj); string->SetCount(count_); uint16_t* value = string->GetValue(); + const uint8_t* const src = reinterpret_cast<uint8_t*>(src_array_->GetData()) + offset_; for (int i = 0; i < count_; i++) { - value[i] = high_byte_ + (src_[i] & 0xFF); + value[i] = high_byte_ + (src[i] & 0xFF); } } private: const int32_t count_; - const uint8_t* const src_; + Handle<ByteArray> src_array_; + const int32_t offset_; const int32_t high_byte_; }; // Sets string count and value in the allocation code path to ensure it is guarded by a CAS. -class SetStringCountAndValueVisitor { +class SetStringCountAndValueVisitorFromCharArray { public: - SetStringCountAndValueVisitor(int32_t count, uint16_t* src) : count_(count), src_(src) { + SetStringCountAndValueVisitorFromCharArray(int32_t count, Handle<CharArray> src_array, + int32_t offset) : + count_(count), src_array_(src_array), offset_(offset) { } - void operator()(Object* obj, size_t usable_size) const + void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Avoid AsString as object is not yet in live bitmap or allocation stack. + String* string = down_cast<String*>(obj); + string->SetCount(count_); + const uint16_t* const src = src_array_->GetData() + offset_; + memcpy(string->GetValue(), src, count_ * sizeof(uint16_t)); + } + + private: + const int32_t count_; + Handle<CharArray> src_array_; + const int32_t offset_; +}; + +// Sets string count and value in the allocation code path to ensure it is guarded by a CAS. +class SetStringCountAndValueVisitorFromString { + public: + SetStringCountAndValueVisitorFromString(int32_t count, Handle<String> src_string, + int32_t offset) : + count_(count), src_string_(src_string), offset_(offset) { + } + + void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - UNUSED(usable_size); // Avoid AsString as object is not yet in live bitmap or allocation stack. String* string = down_cast<String*>(obj); string->SetCount(count_); - memcpy(string->GetValue(), src_, count_ * sizeof(uint16_t)); + const uint16_t* const src = src_string_->GetValue() + offset_; + memcpy(string->GetValue(), src, count_ * sizeof(uint16_t)); } private: const int32_t count_; - const uint16_t* const src_; + Handle<String> src_string_; + const int32_t offset_; }; inline String* String::Intern() { @@ -140,8 +169,7 @@ template <bool kIsInstrumented> inline String* String::AllocFromByteArray(Thread* self, int32_t byte_length, Handle<ByteArray> array, int32_t offset, int32_t high_byte, gc::AllocatorType allocator_type) { - uint8_t* data = reinterpret_cast<uint8_t*>(array->GetData()) + offset; - SetStringCountAndBytesVisitor visitor(byte_length, data, high_byte << 8); + SetStringCountAndBytesVisitor visitor(byte_length, array, offset, high_byte << 8); String* string = Alloc<kIsInstrumented>(self, byte_length, allocator_type, visitor); return string; } @@ -150,8 +178,7 @@ template <bool kIsInstrumented> inline String* String::AllocFromCharArray(Thread* self, int32_t array_length, Handle<CharArray> array, int32_t offset, gc::AllocatorType allocator_type) { - uint16_t* data = array->GetData() + offset; - SetStringCountAndValueVisitor visitor(array_length, data); + SetStringCountAndValueVisitorFromCharArray visitor(array_length, array, offset); String* new_string = Alloc<kIsInstrumented>(self, array_length, allocator_type, visitor); return new_string; } @@ -159,8 +186,7 @@ inline String* String::AllocFromCharArray(Thread* self, int32_t array_length, template <bool kIsInstrumented> inline String* String::AllocFromString(Thread* self, int32_t string_length, Handle<String> string, int32_t offset, gc::AllocatorType allocator_type) { - uint16_t* data = string->GetValue() + offset; - SetStringCountAndValueVisitor visitor(string_length, data); + SetStringCountAndValueVisitorFromString visitor(string_length, string, offset); String* new_string = Alloc<kIsInstrumented>(self, string_length, allocator_type, visitor); return new_string; } diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc index ca9464424..782b9c076 100644 --- a/runtime/mirror/throwable.cc +++ b/runtime/mirror/throwable.cc @@ -115,10 +115,14 @@ std::string Throwable::Dump() { } else { for (int32_t i = 0; i < ste_array->GetLength(); ++i) { StackTraceElement* ste = ste_array->Get(i); - result += StringPrintf(" at %s (%s:%d)\n", - ste->GetMethodName()->ToModifiedUtf8().c_str(), - ste->GetFileName()->ToModifiedUtf8().c_str(), - ste->GetLineNumber()); + DCHECK(ste != nullptr); + auto* method_name = ste->GetMethodName(); + auto* file_name = ste->GetFileName(); + result += StringPrintf( + " at %s (%s:%d)\n", + method_name != nullptr ? method_name->ToModifiedUtf8().c_str() : "<unknown method>", + file_name != nullptr ? file_name->ToModifiedUtf8().c_str() : "(Unknown Source)", + ste->GetLineNumber()); } } } else { diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc index 036d04eba..0fd6759d6 100644 --- a/runtime/native/java_lang_reflect_Constructor.cc +++ b/runtime/native/java_lang_reflect_Constructor.cc @@ -36,7 +36,7 @@ namespace art { */ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) { ScopedFastNativeObjectAccess soa(env); - mirror::Method* m = soa.Decode<mirror::Method*>(javaMethod); + mirror::Constructor* m = soa.Decode<mirror::Constructor*>(javaMethod); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> c(hs.NewHandle(m->GetDeclaringClass())); if (UNLIKELY(c->IsAbstract())) { @@ -46,7 +46,7 @@ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectA return nullptr; } // Verify that we can access the class. - if (!c->IsPublic()) { + if (!m->IsAccessible() && !c->IsPublic()) { auto* caller = GetCallingClass(soa.Self(), 1); // If caller is null, then we called from JNI, just avoid the check since JNI avoids most // access checks anyways. TODO: Investigate if this the correct behavior. diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index 37e85ab37..2f6726328 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -230,8 +230,8 @@ std::vector<std::unique_ptr<const DexFile>> OatFileAssistant::LoadDexFiles( dex_files.push_back(std::move(dex_file)); // Load secondary multidex files - for (int i = 1; ; i++) { - std::string secondary_dex_location = DexFile::GetMultiDexClassesDexName(i, dex_location); + for (size_t i = 1; ; i++) { + std::string secondary_dex_location = DexFile::GetMultiDexLocation(i, dex_location); oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false); if (oat_dex_file == nullptr) { // There are no more secondary dex files to load. @@ -403,9 +403,9 @@ bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) { } // Verify the dex checksums for any secondary multidex files - for (int i = 1; ; i++) { + for (size_t i = 1; ; i++) { std::string secondary_dex_location - = DexFile::GetMultiDexClassesDexName(i, dex_location_); + = DexFile::GetMultiDexLocation(i, dex_location_); const OatFile::OatDexFile* secondary_oat_dex_file = file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false); if (secondary_oat_dex_file == nullptr) { diff --git a/test/098-ddmc/src/Main.java b/test/098-ddmc/src/Main.java index 962bd7f7d..f41ff2a94 100644 --- a/test/098-ddmc/src/Main.java +++ b/test/098-ddmc/src/Main.java @@ -44,7 +44,7 @@ public class Main { System.out.println("Confirm when we overflow, we don't roll over to zero. b/17392248"); final int overflowAllocations = 64 * 1024; // Won't fit in unsigned 16-bit value. for (int i = 0; i < overflowAllocations; i++) { - new String("fnord"); + new Object(); } Allocations after = new Allocations(DdmVmInternal.getRecentAllocations()); System.out.println("before < overflowAllocations=" + (before.numberOfEntries < overflowAllocations)); diff --git a/test/100-reflect2/src/Main.java b/test/100-reflect2/src/Main.java index 86a5ef89d..72e14b15f 100644 --- a/test/100-reflect2/src/Main.java +++ b/test/100-reflect2/src/Main.java @@ -266,7 +266,7 @@ class Main { show(ctor.newInstance(new char[] { 'x', 'y', 'z', '!' }, 1, 2)); } - private static void testPackagePrivate() { + private static void testPackagePrivateConstructor() { try { Class<?> c = Class.forName("sub.PPClass"); Constructor cons = c.getConstructor(); @@ -280,10 +280,23 @@ class Main { } } + private static void testPackagePrivateAccessibleConstructor() { + try { + Class<?> c = Class.forName("sub.PPClass"); + Constructor cons = c.getConstructor(); + cons.setAccessible(true); // ensure we prevent IllegalAccessException + cons.newInstance(); + } catch (Exception e) { + // Error. + e.printStackTrace(); + } + } + public static void main(String[] args) throws Exception { testFieldReflection(); testMethodReflection(); testConstructorReflection(); - testPackagePrivate(); + testPackagePrivateConstructor(); + testPackagePrivateAccessibleConstructor(); } } diff --git a/test/472-unreachable-if-regression/expected.txt b/test/472-unreachable-if-regression/expected.txt new file mode 100644 index 000000000..9fc8bea9e --- /dev/null +++ b/test/472-unreachable-if-regression/expected.txt @@ -0,0 +1,3 @@ +Test started. +Successfully called UnreachableIf(). +Successfully called UnreachablePackedSwitch(). diff --git a/test/472-unreachable-if-regression/info.txt b/test/472-unreachable-if-regression/info.txt new file mode 100644 index 000000000..d8b5a4541 --- /dev/null +++ b/test/472-unreachable-if-regression/info.txt @@ -0,0 +1,3 @@ +Regression test for crashes during compilation of methods which end +with an if-cc or switch, i.e. there's a fall-through out of method code. +Also tests a packed-switch with negative offset to its data. diff --git a/test/472-unreachable-if-regression/smali/Test.smali b/test/472-unreachable-if-regression/smali/Test.smali new file mode 100644 index 000000000..c7107d19e --- /dev/null +++ b/test/472-unreachable-if-regression/smali/Test.smali @@ -0,0 +1,46 @@ +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LTest; + +.super Ljava/lang/Object; + +.method public static UnreachableIf()V + .registers 1 + return-void + : unreachable + not-int v0, v0 + if-lt v0, v0, :unreachable + # fall-through out of code item +.end method + +.method public static UnreachablePackedSwitch()V + .registers 1 + return-void + : unreachable + goto :pswitch_2 + :pswitch_data + .packed-switch 1 + :pswitch_1 + :pswitch_2 + :pswitch_1 + :pswitch_2 + .end packed-switch + :pswitch_1 + not-int v0, v0 + :pswitch_2 + packed-switch v0, :pswitch_data + # fall-through out of code item +.end method diff --git a/test/472-unreachable-if-regression/src/Main.java b/test/472-unreachable-if-regression/src/Main.java new file mode 100644 index 000000000..c9f951183 --- /dev/null +++ b/test/472-unreachable-if-regression/src/Main.java @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Method; + +public class Main { + + // Workaround for b/18051191. + class InnerClass {} + + public static void main(String args[]) throws Exception { + System.out.println("Test started."); + Class<?> c = Class.forName("Test"); + + Method unreachableIf = c.getMethod("UnreachableIf", (Class[]) null); + unreachableIf.invoke(null, (Object[]) null); + System.out.println("Successfully called UnreachableIf()."); + + Method unreachablePackedSwitch = c.getMethod("UnreachablePackedSwitch", (Class[]) null); + unreachablePackedSwitch.invoke(null, (Object[]) null); + System.out.println("Successfully called UnreachablePackedSwitch()."); + } + +} diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 93340fb81..c7e68775c 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -385,8 +385,7 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := # Known broken tests for the optimizing compiler. TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS := -TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS += 099-vmdebug # b/18098594 -TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS += 802-deoptimization # b/18547544 +TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS += 472-unreachable-if-regression # b/19988134 ifneq (,$(filter optimizing,$(COMPILER_TYPES))) ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ |