summaryrefslogtreecommitdiffstats
path: root/vm/alloc/HeapSource.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'vm/alloc/HeapSource.cpp')
-rw-r--r--vm/alloc/HeapSource.cpp136
1 files changed, 133 insertions, 3 deletions
diff --git a/vm/alloc/HeapSource.cpp b/vm/alloc/HeapSource.cpp
index 93cdd2fd7..4d03da1f2 100644
--- a/vm/alloc/HeapSource.cpp
+++ b/vm/alloc/HeapSource.cpp
@@ -177,6 +177,13 @@ struct HeapSource {
HeapBitmap markBits;
/*
+ * Native allocations.
+ */
+ int32_t nativeBytesAllocated;
+ size_t nativeFootprintGCWatermark;
+ size_t nativeFootprintLimit;
+
+ /*
* State for the GC daemon.
*/
bool hasGcThread;
@@ -604,6 +611,9 @@ GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
hs->softLimit = SIZE_MAX; // no soft limit at first
hs->numHeaps = 0;
hs->sawZygote = gDvm.zygote;
+ hs->nativeBytesAllocated = 0;
+ hs->nativeFootprintGCWatermark = startSize;
+ hs->nativeFootprintLimit = startSize * 2;
hs->hasGcThread = false;
hs->heapBase = (char *)base;
hs->heapLength = length;
@@ -883,10 +893,45 @@ void* dvmHeapSourceAlloc(size_t n)
FRACTIONAL_MB(hs->softLimit), n);
return NULL;
}
- void* ptr = mspace_calloc(heap->msp, 1, n);
- if (ptr == NULL) {
- return NULL;
+ void* ptr;
+ if (gDvm.lowMemoryMode) {
+ /* This is only necessary because mspace_calloc always memsets the
+ * allocated memory to 0. This is bad for memory usage since it leads
+ * to dirty zero pages. If low memory mode is enabled, we use
+ * mspace_malloc which doesn't memset the allocated memory and madvise
+ * the page aligned region back to the kernel.
+ */
+ ptr = mspace_malloc(heap->msp, n);
+ if (ptr == NULL) {
+ return NULL;
+ }
+ uintptr_t zero_begin = (uintptr_t)ptr;
+ uintptr_t zero_end = (uintptr_t)ptr + n;
+ /* Calculate the page aligned region.
+ */
+ uintptr_t begin = ALIGN_UP_TO_PAGE_SIZE(zero_begin);
+ uintptr_t end = zero_end & ~(uintptr_t)(SYSTEM_PAGE_SIZE - 1);
+ /* If our allocation spans more than one page, we attempt to madvise.
+ */
+ if (begin < end) {
+ /* madvise the page aligned region to kernel.
+ */
+ madvise((void*)begin, end - begin, MADV_DONTNEED);
+ /* Zero the region after the page aligned region.
+ */
+ memset((void*)end, 0, zero_end - end);
+ /* Zero out the region before the page aligned region.
+ */
+ zero_end = begin;
+ }
+ memset((void*)zero_begin, 0, zero_end - zero_begin);
+ } else {
+ ptr = mspace_calloc(heap->msp, 1, n);
+ if (ptr == NULL) {
+ return NULL;
+ }
}
+
countAllocation(heap, ptr);
/*
* Check to see if a concurrent GC should be initiated.
@@ -1423,3 +1468,88 @@ void *dvmHeapSourceGetImmuneLimit(bool isPartial)
return NULL;
}
}
+
+static void dvmHeapSourceUpdateMaxNativeFootprint()
+{
+ /* Use the current target utilization ratio to determine the new native GC
+ * watermarks.
+ */
+ size_t nativeSize = gHs->nativeBytesAllocated;
+ size_t targetSize =
+ (nativeSize / gHs->targetUtilization) * HEAP_UTILIZATION_MAX;
+
+ if (targetSize > nativeSize + gHs->maxFree) {
+ targetSize = nativeSize + gHs->maxFree;
+ } else if (targetSize < nativeSize + gHs->minFree) {
+ targetSize = nativeSize + gHs->minFree;
+ }
+ gHs->nativeFootprintGCWatermark = targetSize;
+ gHs->nativeFootprintLimit = 2 * targetSize - nativeSize;
+}
+
+void dvmHeapSourceRegisterNativeAllocation(int bytes)
+{
+ android_atomic_add(bytes, &gHs->nativeBytesAllocated);
+
+ if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintGCWatermark) {
+ /* The second watermark is higher than the gc watermark. If you hit
+ * this it means you are allocating native objects faster than the GC
+ * can keep up with. If this occurs, we do a GC for alloc.
+ */
+ if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
+ Thread* self = dvmThreadSelf();
+
+ dvmRunFinalization();
+ if (!dvmCheckException(self)) {
+ return;
+ }
+
+ dvmLockHeap();
+ bool waited = dvmWaitForConcurrentGcToComplete();
+ dvmUnlockHeap();
+ if (waited) {
+ // Just finished a GC, attempt to run finalizers.
+ dvmRunFinalization();
+ if (!dvmCheckException(self)) {
+ return;
+ }
+ }
+
+ // If we still are over the watermark, attempt a GC for alloc and run finalizers.
+ if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
+ dvmLockHeap();
+ dvmWaitForConcurrentGcToComplete();
+ dvmCollectGarbageInternal(GC_FOR_MALLOC);
+ dvmUnlockHeap();
+ dvmRunFinalization();
+
+ if (!dvmCheckException(self)) {
+ return;
+ }
+ }
+ /* We have just run finalizers, update the native watermark since
+ * it is very likely that finalizers released native managed
+ * allocations.
+ */
+ dvmHeapSourceUpdateMaxNativeFootprint();
+ } else {
+ dvmSignalCond(&gHs->gcThreadCond);
+ }
+ }
+}
+
+/*
+ * Called from VMRuntime.registerNativeFree.
+ */
+void dvmHeapSourceRegisterNativeFree(int bytes)
+{
+ int expected_size, new_size;
+ do {
+ expected_size = gHs->nativeBytesAllocated;
+ new_size = expected_size - bytes;
+ if (new_size < 0) {
+ break;
+ }
+ } while (android_atomic_cas(expected_size, new_size,
+ &gHs->nativeBytesAllocated));
+}