summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/quick/arm/int_arm.cc1
-rw-r--r--compiler/dex/quick/gen_common.cc6
-rw-r--r--compiler/dex/quick/mir_to_lir.cc4
-rw-r--r--compiler/dex/quick/mir_to_lir.h1
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h1
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc20
-rw-r--r--runtime/class_linker.cc2
-rw-r--r--runtime/fault_handler.cc6
-rw-r--r--runtime/gc/heap-inl.h22
-rw-r--r--runtime/gc/heap.cc29
-rw-r--r--runtime/gc/heap.h2
-rw-r--r--runtime/mirror/class.cc1
-rw-r--r--runtime/mirror/class.h1
13 files changed, 79 insertions, 17 deletions
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index e38dbf5a8d..4aedbafcfb 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1162,6 +1162,7 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
// Check for destructive overlap
if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
RegStorage t_reg = AllocTemp();
+ OpRegCopy(t_reg, rl_result.reg.GetLow());
OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
FreeTemp(t_reg);
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 9be8719b5d..774176ebb1 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -322,6 +322,12 @@ void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
StoreValueWide(rl_dest, rl_result);
}
+void Mir2Lir::GenLongToInt(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = UpdateLocWide(rl_src);
+ rl_src = NarrowRegLoc(rl_src);
+ StoreValue(rl_dest, rl_src);
+}
+
void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src) {
rl_src = LoadValue(rl_src, kCoreReg);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 68856cdce0..320c0f4900 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -933,9 +933,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
break;
case Instruction::LONG_TO_INT:
- rl_src[0] = UpdateLocWide(rl_src[0]);
- rl_src[0] = NarrowRegLoc(rl_src[0]);
- StoreValue(rl_dest, rl_src[0]);
+ GenLongToInt(rl_dest, rl_src[0]);
break;
case Instruction::INT_TO_BYTE:
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 88b260c0eb..5d78a6e25c 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -810,6 +810,7 @@ class Mir2Lir : public Backend {
LIR* taken);
void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken);
virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenLongToInt(RegLocation rl_dest, RegLocation rl_src);
void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src);
void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 09e1482b4d..9cb0bf53e6 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -90,6 +90,7 @@ class X86Mir2Lir : public Mir2Lir {
OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ void GenLongToInt(RegLocation rl_dest, RegLocation rl_src);
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index a063ce18c2..80cdc83497 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -3213,6 +3213,26 @@ void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
StoreValueWide(rl_dest, rl_result);
}
+void X86Mir2Lir::GenLongToInt(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = UpdateLocWide(rl_src);
+ rl_src = NarrowRegLoc(rl_src);
+ StoreValue(rl_dest, rl_src);
+
+ if (cu_->target64) {
+ // if src and dest are in the same phys reg then StoreValue generates
+ // no operation but we need explicit 32-bit mov R, R to clear
+ // the higher 32-bits
+ rl_dest = UpdateLoc(rl_dest);
+ if (rl_src.location == kLocPhysReg && rl_dest.location == kLocPhysReg
+ && IsSameReg(rl_src.reg, rl_dest.reg)) {
+ LIR* copy_lir = OpRegCopyNoInsert(rl_dest.reg, rl_dest.reg);
+ // remove nop flag set by OpRegCopyNoInsert if src == dest
+ copy_lir->flags.is_nop = false;
+ AppendLIR(copy_lir);
+ }
+ }
+}
+
void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift) {
if (!cu_->target64) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f092772239..4bd702d0aa 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2780,7 +2780,7 @@ void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
klass->SetDexClassDefIndex(dex_file.GetIndexForClassDef(dex_class_def));
klass->SetDexTypeIndex(dex_class_def.class_idx_);
- klass->SetDexCacheStrings(klass->GetDexCache()->GetStrings());
+ CHECK(klass->GetDexCacheStrings() != nullptr);
const uint8_t* class_data = dex_file.GetClassData(dex_class_def);
if (class_data == nullptr) {
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 835485c351..94753d4461 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -177,6 +177,12 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
Thread* self = Thread::Current();
+ // If ART is not running, or the thread is not attached to ART pass the
+ // signal on to the next handler in the chain.
+ if (self == nullptr || Runtime::Current() == nullptr || !Runtime::Current()->IsStarted()) {
+ InvokeUserSignalHandler(sig, info, context);
+ return;
+ }
// Now set up the nested signal handler.
// TODO: add SIGSEGV back to the nested signals when we can handle running out stack gracefully.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 3101c68599..9d2f6d1238 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -48,11 +48,20 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
}
// Need to check that we arent the large object allocator since the large object allocation code
// path this function. If we didn't check we would have an infinite loop.
+ mirror::Object* obj;
if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
- return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count,
- pre_fence_visitor);
+ obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
+ pre_fence_visitor);
+ if (obj != nullptr) {
+ return obj;
+ } else {
+ // There should be an OOM exception, since we are retrying, clear it.
+ self->ClearException();
+ }
+ // If the large object allocation failed, try to use the normal spaces (main space,
+ // non moving space). This can happen if there is significant virtual address space
+ // fragmentation.
}
- mirror::Object* obj;
AllocationTimer alloc_timer(this, &obj);
size_t bytes_allocated;
size_t usable_size;
@@ -171,10 +180,13 @@ inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
}
template <bool kInstrumented, typename PreFenceVisitor>
-inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass,
+inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klass,
size_t byte_count,
const PreFenceVisitor& pre_fence_visitor) {
- return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count,
+ // Save and restore the class in case it moves.
+ StackHandleScope<1> hs(self);
+ auto klass_wrapper = hs.NewHandleWrapper(klass);
+ return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count,
kAllocatorTypeLOS,
pre_fence_visitor);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 7b679ea988..0fd0a9ff52 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -365,6 +365,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
uint8_t* heap_end = continuous_spaces_.back()->Limit();
size_t heap_capacity = heap_end - heap_begin;
// Remove the main backup space since it slows down the GC to have unused extra spaces.
+ // TODO: Avoid needing to do this.
if (main_space_backup_.get() != nullptr) {
RemoveSpace(main_space_backup_.get());
}
@@ -1600,6 +1601,8 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
const uint64_t space_size_before_compaction = from_space->Size();
AddSpace(to_space);
+ // Make sure that we will have enough room to copy.
+ CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
// Leave as prot read so that we can still run ROSAlloc verification on this space.
from_space->GetMemMap()->Protect(PROT_READ);
@@ -1718,8 +1721,8 @@ void Heap::TransitionCollector(CollectorType collector_type) {
RemoveSpace(temp_space_);
temp_space_ = nullptr;
mem_map->Protect(PROT_READ | PROT_WRITE);
- CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(),
- mem_map->Size());
+ CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize,
+ std::min(mem_map->Size(), growth_limit_), mem_map->Size());
mem_map.release();
// Compact to the main space from the bump pointer space, don't need to swap semispaces.
AddSpace(main_space_);
@@ -1732,9 +1735,9 @@ void Heap::TransitionCollector(CollectorType collector_type) {
if (kIsDebugBuild && kUseRosAlloc) {
mem_map->Protect(PROT_READ | PROT_WRITE);
}
- main_space_backup_.reset(CreateMallocSpaceFromMemMap(mem_map.get(), kDefaultInitialSize,
- mem_map->Size(), mem_map->Size(),
- name, true));
+ main_space_backup_.reset(CreateMallocSpaceFromMemMap(
+ mem_map.get(), kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
+ mem_map->Size(), name, true));
if (kIsDebugBuild && kUseRosAlloc) {
mem_map->Protect(PROT_NONE);
}
@@ -1976,7 +1979,8 @@ void Heap::PreZygoteFork() {
MemMap* mem_map = main_space_->ReleaseMemMap();
RemoveSpace(main_space_);
space::Space* old_main_space = main_space_;
- CreateMainMallocSpace(mem_map, kDefaultInitialSize, mem_map->Size(), mem_map->Size());
+ CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
+ mem_map->Size());
delete old_main_space;
AddSpace(main_space_);
} else {
@@ -2988,7 +2992,18 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
void Heap::ClearGrowthLimit() {
growth_limit_ = capacity_;
- non_moving_space_->ClearGrowthLimit();
+ for (const auto& space : continuous_spaces_) {
+ if (space->IsMallocSpace()) {
+ gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
+ malloc_space->ClearGrowthLimit();
+ malloc_space->SetFootprintLimit(malloc_space->Capacity());
+ }
+ }
+ // This space isn't added for performance reasons.
+ if (main_space_backup_.get() != nullptr) {
+ main_space_backup_->ClearGrowthLimit();
+ main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
+ }
}
void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 69a573ef98..4e1a0ff242 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -654,7 +654,7 @@ class Heap {
// We don't force this to be inlined since it is a slow path.
template <bool kInstrumented, typename PreFenceVisitor>
- mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
+ mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count,
const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 566505911b..bd3bfbf9fe 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -149,6 +149,7 @@ void Class::SetStatus(Status new_status, Thread* self) {
void Class::SetDexCache(DexCache* new_dex_cache) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache);
+ SetDexCacheStrings(new_dex_cache != nullptr ? new_dex_cache->GetStrings() : nullptr);
}
void Class::SetClassSize(uint32_t new_class_size) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 56867dd319..812cfd30b1 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -658,6 +658,7 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Also updates the dex_cache_strings_ variable from new_dex_cache.
void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE ObjectArray<ArtMethod>* GetDirectMethods()