summaryrefslogtreecommitdiffstats
path: root/vm/alloc
diff options
context:
space:
mode:
authorCarl Shapiro <cshapiro@google.com>2011-03-21 13:35:24 -0700
committerCarl Shapiro <cshapiro@google.com>2011-03-21 13:35:24 -0700
commit3475f9cdb47a6d6f8ad2ce49bbc3af46bca92f09 (patch)
tree32bccadbd08a353a4650a6451218b0c854ec6fc2 /vm/alloc
parent14b63ca9f3ba5b58c6f8ad703c7a9c68028aa230 (diff)
downloadandroid_dalvik-3475f9cdb47a6d6f8ad2ce49bbc3af46bca92f09.tar.gz
android_dalvik-3475f9cdb47a6d6f8ad2ce49bbc3af46bca92f09.tar.bz2
android_dalvik-3475f9cdb47a6d6f8ad2ce49bbc3af46bca92f09.zip
Move finalization out of the VM.
This change introduces a new reference class whose referent points to instances requiring finalization. This makes the finalization of objects possible using a reference queue and a dedicated thread which removes objects from the queue. Change-Id: I0ff6dd272f00ca08c6ed3aa667bf766a039a944e
Diffstat (limited to 'vm/alloc')
-rw-r--r--vm/alloc/Alloc.c1
-rw-r--r--vm/alloc/Heap.c23
-rw-r--r--vm/alloc/HeapInternal.h30
-rw-r--r--vm/alloc/HeapWorker.c84
-rw-r--r--vm/alloc/HeapWorker.h24
-rw-r--r--vm/alloc/MarkSweep.c151
-rw-r--r--vm/alloc/MarkSweep.h1
-rw-r--r--vm/alloc/Visit.c1
8 files changed, 63 insertions, 252 deletions
diff --git a/vm/alloc/Alloc.c b/vm/alloc/Alloc.c
index 8269290a3..370ac3576 100644
--- a/vm/alloc/Alloc.c
+++ b/vm/alloc/Alloc.c
@@ -169,6 +169,7 @@ Object* dvmAllocObject(ClassObject* clazz, int flags)
{
Object* newObj;
+ assert(clazz != NULL);
assert(dvmIsClassInitialized(clazz) || dvmIsClassInitializing(clazz));
/* allocate on GC heap; memory is zeroed out */
diff --git a/vm/alloc/Heap.c b/vm/alloc/Heap.c
index c0ef8fb55..1a38a76cc 100644
--- a/vm/alloc/Heap.c
+++ b/vm/alloc/Heap.c
@@ -104,8 +104,6 @@ bool dvmHeapStartup()
* and reference objects.
*/
dvmInitMutex(&gDvm.heapWorkerListLock);
- gcHeap->finalizableRefs = NULL;
- gcHeap->pendingFinalizationRefs = NULL;
gcHeap->referenceOperations = NULL;
if (!dvmCardTableStartup(gDvm.heapMaximumSize)) {
@@ -135,12 +133,6 @@ void dvmHeapShutdown()
* cleaned up explicitly. The process may stick around, so we
* don't want to leak any native memory.
*/
- dvmHeapFreeLargeTable(gDvm.gcHeap->finalizableRefs);
- gDvm.gcHeap->finalizableRefs = NULL;
-
- dvmHeapFreeLargeTable(gDvm.gcHeap->pendingFinalizationRefs);
- gDvm.gcHeap->pendingFinalizationRefs = NULL;
-
dvmHeapFreeLargeTable(gDvm.gcHeap->referenceOperations);
gDvm.gcHeap->referenceOperations = NULL;
@@ -191,27 +183,15 @@ void dvmUnlockHeap()
*
* Typically only called by the heap worker thread.
*/
-Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
+Object *dvmGetNextHeapWorkerObject()
{
Object *obj;
GcHeap *gcHeap = gDvm.gcHeap;
- assert(op != NULL);
-
dvmLockMutex(&gDvm.heapWorkerListLock);
obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
if (obj != NULL) {
- *op = WORKER_ENQUEUE;
- } else {
- obj = dvmHeapGetNextObjectFromLargeTable(
- &gcHeap->pendingFinalizationRefs);
- if (obj != NULL) {
- *op = WORKER_FINALIZE;
- }
- }
-
- if (obj != NULL) {
/* Don't let the GC collect the object until the
* worker thread is done with it.
*/
@@ -709,6 +689,7 @@ void dvmCollectGarbageInternal(const GcSpec* spec)
dvmHeapProcessReferences(&gcHeap->softReferences,
spec->softReferencePolicy == CLEAR,
&gcHeap->weakReferences,
+ &gcHeap->finalizerReferences,
&gcHeap->phantomReferences);
#if defined(WITH_JIT)
diff --git a/vm/alloc/HeapInternal.h b/vm/alloc/HeapInternal.h
index d10a41708..7f8c9c5e3 100644
--- a/vm/alloc/HeapInternal.h
+++ b/vm/alloc/HeapInternal.h
@@ -29,37 +29,15 @@ typedef struct HeapSource HeapSource;
struct GcHeap {
HeapSource *heapSource;
- /* List of heap objects that will require finalization when
- * collected. I.e., instance objects
- *
- * a) whose class definitions override java.lang.Object.finalize()
- *
- * *** AND ***
- *
- * b) that have never been finalized.
- *
- * Note that this does not exclude non-garbage objects; this
- * is not the list of pending finalizations, but of objects that
- * potentially have finalization in their futures.
- */
- LargeHeapRefTable *finalizableRefs;
-
- /* The list of objects that need to have finalize() called
- * on themselves. These references are part of the root set.
- *
- * This table is protected by gDvm.heapWorkerListLock, which must
- * be acquired after the heap lock.
- */
- LargeHeapRefTable *pendingFinalizationRefs;
-
/* Linked lists of subclass instances of java/lang/ref/Reference
* that we find while recursing. The "next" pointers are hidden
* in the objects' <code>int Reference.vmData</code> fields.
* These lists are cleared and rebuilt each time the GC runs.
*/
- Object *softReferences;
- Object *weakReferences;
- Object *phantomReferences;
+ Object *softReferences;
+ Object *weakReferences;
+ Object *finalizerReferences;
+ Object *phantomReferences;
/* The list of Reference objects that need to be cleared and/or
* enqueued. The bottom two bits of the object pointers indicate
diff --git a/vm/alloc/HeapWorker.c b/vm/alloc/HeapWorker.c
index 57089f4f1..e0116d107 100644
--- a/vm/alloc/HeapWorker.c
+++ b/vm/alloc/HeapWorker.c
@@ -272,17 +272,11 @@ static void callMethod(Thread *self, Object *obj, Method *method)
static void doHeapWork(Thread *self)
{
Object *obj;
- HeapWorkerOperation op;
- int numFinalizersCalled, numReferencesEnqueued;
+ size_t numReferencesEnqueued;
- assert(gDvm.voffJavaLangObject_finalize >= 0);
assert(gDvm.methJavaLangRefReference_enqueueInternal != NULL);
-
- numFinalizersCalled = 0;
numReferencesEnqueued = 0;
- while ((obj = dvmGetNextHeapWorkerObject(&op)) != NULL) {
- Method *method = NULL;
-
+ while ((obj = dvmGetNextHeapWorkerObject()) != NULL) {
/* Make sure the object hasn't been collected since
* being scheduled.
*/
@@ -290,30 +284,18 @@ static void doHeapWork(Thread *self)
/* Call the appropriate method(s).
*/
- if (op == WORKER_FINALIZE) {
- numFinalizersCalled++;
- method = obj->clazz->vtable[gDvm.voffJavaLangObject_finalize];
- assert(dvmCompareNameDescriptorAndMethod("finalize", "()V",
- method) == 0);
- assert(method->clazz != gDvm.classJavaLangObject);
- callMethod(self, obj, method);
- } else {
- assert(op == WORKER_ENQUEUE);
- assert(dvmGetFieldObject(
- obj, gDvm.offJavaLangRefReference_queue) != NULL);
- assert(dvmGetFieldObject(
- obj, gDvm.offJavaLangRefReference_queueNext) == NULL);
- numReferencesEnqueued++;
- callMethod(self, obj,
- gDvm.methJavaLangRefReference_enqueueInternal);
- }
+ assert(dvmGetFieldObject(
+ obj, gDvm.offJavaLangRefReference_queue) != NULL);
+ assert(dvmGetFieldObject(
+ obj, gDvm.offJavaLangRefReference_queueNext) == NULL);
+ numReferencesEnqueued++;
+ callMethod(self, obj, gDvm.methJavaLangRefReference_enqueueInternal);
/* Let the GC collect the object.
*/
dvmReleaseTrackedAlloc(obj, self);
}
- LOGV("Called %d finalizers\n", numFinalizersCalled);
- LOGV("Enqueued %d references\n", numReferencesEnqueued);
+ LOGV("Enqueued %zd references", numReferencesEnqueued);
}
/*
@@ -455,54 +437,6 @@ void dvmSignalHeapWorker(bool shouldLock)
}
/*
- * Block until all pending heap worker work has finished.
- */
-void dvmWaitForHeapWorkerIdle()
-{
- assert(gDvm.heapWorkerReady);
-
- dvmChangeStatus(NULL, THREAD_VMWAIT);
-
- dvmLockMutex(&gDvm.heapWorkerLock);
-
- /* Wake up the heap worker and wait for it to finish. */
- //TODO(http://b/issue?id=699704): This will deadlock if
- // called from finalize(), enqueue(), or clear(). We
- // need to detect when this is called from the HeapWorker
- // context and just give up.
- dvmSignalHeapWorker(false);
- dvmWaitCond(&gDvm.heapWorkerIdleCond, &gDvm.heapWorkerLock);
-
- dvmUnlockMutex(&gDvm.heapWorkerLock);
-
- dvmChangeStatus(NULL, THREAD_RUNNING);
-}
-
-/*
- * Do not return until any pending heap work has finished. This may
- * or may not happen in the context of the calling thread.
- * No exceptions will escape.
- */
-void dvmRunFinalizationSync()
-{
- if (gDvm.zygote) {
- assert(!gDvm.heapWorkerReady);
-
- /* When in zygote mode, there is no heap worker.
- * Do the work in the current thread.
- */
- dvmLockMutex(&gDvm.heapWorkerLock);
- doHeapWork(dvmThreadSelf());
- dvmUnlockMutex(&gDvm.heapWorkerLock);
- } else {
- /* Outside of zygote mode, we can just ask the
- * heap worker thread to do the work.
- */
- dvmWaitForHeapWorkerIdle();
- }
-}
-
-/*
* Requests that dvmHeapSourceTrim() be called no sooner
* than timeoutSec seconds from now. If timeoutSec
* is zero, any pending trim is cancelled.
diff --git a/vm/alloc/HeapWorker.h b/vm/alloc/HeapWorker.h
index 45587ff12..67babc3b5 100644
--- a/vm/alloc/HeapWorker.h
+++ b/vm/alloc/HeapWorker.h
@@ -40,20 +40,6 @@ void dvmHeapWorkerShutdown(void);
void dvmSignalHeapWorker(bool shouldLock);
/*
- * Block until all pending heap worker work has finished.
- */
-void dvmWaitForHeapWorkerIdle(void);
-
-/*
- * Does not return until any pending finalizers have been called.
- * This may or may not happen in the context of the calling thread.
- * No exceptions will escape.
- *
- * Used by zygote, which doesn't have a HeapWorker thread.
- */
-void dvmRunFinalizationSync(void);
-
-/*
* Requests that dvmHeapSourceTrim() be called no sooner
* than timeoutSec seconds from now. If timeoutSec
* is zero, any pending trim is cancelled.
@@ -72,14 +58,6 @@ void dvmScheduleHeapSourceTrim(size_t timeoutSec);
void dvmAssertHeapWorkerThreadRunning();
/*
- * The type of operation for HeapWorker to perform on an object.
- */
-typedef enum HeapWorkerOperation {
- WORKER_FINALIZE = 0,
- WORKER_ENQUEUE = 1,
-} HeapWorkerOperation;
-
-/*
* Called by the worker thread to get the next object
* to finalize/enqueue/clear. Implemented in Heap.c.
*
@@ -87,6 +65,6 @@ typedef enum HeapWorkerOperation {
* Must be non-NULL.
* @return The object to operate on, or NULL.
*/
-Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op);
+Object *dvmGetNextHeapWorkerObject();
#endif /*_DALVIK_ALLOC_HEAP_WORKER*/
diff --git a/vm/alloc/MarkSweep.c b/vm/alloc/MarkSweep.c
index bd872dff2..e5994e698 100644
--- a/vm/alloc/MarkSweep.c
+++ b/vm/alloc/MarkSweep.c
@@ -508,6 +508,7 @@ static int referenceClassFlags(const Object *obj)
{
int flags = CLASS_ISREFERENCE |
CLASS_ISWEAKREFERENCE |
+ CLASS_ISFINALIZERREFERENCE |
CLASS_ISPHANTOMREFERENCE;
return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
}
@@ -529,6 +530,14 @@ static bool isWeakReference(const Object *obj)
}
/*
+ * Returns true if the object derives from FinalizerReference.
+ */
+static bool isFinalizerReference(const Object *obj)
+{
+ return referenceClassFlags(obj) & CLASS_ISFINALIZERREFERENCE;
+}
+
+/*
* Returns true if the object derives from PhantomReference.
*/
static bool isPhantomReference(const Object *obj)
@@ -606,6 +615,8 @@ static void delayReferenceReferent(Object *obj, GcMarkContext *ctx)
list = &gcHeap->softReferences;
} else if (isWeakReference(obj)) {
list = &gcHeap->weakReferences;
+ } else if (isFinalizerReference(obj)) {
+ list = &gcHeap->finalizerReferences;
} else if (isPhantomReference(obj)) {
list = &gcHeap->phantomReferences;
}
@@ -872,109 +883,35 @@ static void clearWhiteReferences(Object **list)
assert(*list == NULL);
}
-/* Find unreachable objects that need to be finalized,
- * and schedule them for finalization.
+/*
+ * Enqueues finalizer references with white referents. White
+ * referents are blackened, moved to the pendingNext field, and the
+ * referent field is cleared.
*/
-static void scheduleFinalizations(void)
+static void enqueueFinalizerReferences(Object **list)
{
- ReferenceTable newPendingRefs;
- LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
- Object **ref;
- Object **lastRef;
- size_t totalPendCount;
GcMarkContext *ctx = &gDvm.gcHeap->markContext;
-
- /*
- * All reachable objects have been marked.
- * Any unmarked finalizable objects need to be finalized.
- */
-
- /* Create a table that the new pending refs will
- * be added to.
- */
- if (!dvmHeapInitHeapRefTable(&newPendingRefs)) {
- //TODO: mark all finalizable refs and hope that
- // we can schedule them next time. Watch out,
- // because we may be expecting to free up space
- // by calling finalizers.
- LOGE("scheduleFinalizations(): no room for pending finalizations");
- dvmAbort();
- }
-
- /* Walk through finalizableRefs and move any unmarked references
- * to the list of new pending refs.
- */
- totalPendCount = 0;
- while (finRefs != NULL) {
- Object **gapRef;
- size_t newPendCount = 0;
-
- gapRef = ref = finRefs->refs.table;
- lastRef = finRefs->refs.nextEntry;
- while (ref < lastRef) {
- if (!isMarked(*ref, ctx)) {
- if (!dvmAddToReferenceTable(&newPendingRefs, *ref)) {
- //TODO: add the current table and allocate
- // a new, smaller one.
- LOGE("scheduleFinalizations(): "
- "no room for any more pending finalizations: %zd",
- dvmReferenceTableEntries(&newPendingRefs));
- dvmAbort();
- }
- newPendCount++;
- } else {
- /* This ref is marked, so will remain on finalizableRefs.
- */
- if (newPendCount > 0) {
- /* Copy it up to fill the holes.
- */
- *gapRef++ = *ref;
- } else {
- /* No holes yet; don't bother copying.
- */
- gapRef++;
- }
- }
- ref++;
+ size_t referentOffset = gDvm.offJavaLangRefReference_referent;
+ size_t pendingNextOffset = gDvm.offJavaLangRefReference_pendingNext;
+ bool doSignal = false;
+ while (*list != NULL) {
+ Object *ref = dequeuePendingReference(list);
+ Object *referent = dvmGetFieldObject(ref, referentOffset);
+ if (referent != NULL && !isMarked(referent, ctx)) {
+ markObject(referent, ctx);
+ /* If the referent is non-null the reference must queuable. */
+ assert(isEnqueuable(ref));
+ dvmSetFieldObject(ref, pendingNextOffset, referent);
+ clearReference(ref);
+ enqueueReference(ref);
+ doSignal = true;
}
- finRefs->refs.nextEntry = gapRef;
- //TODO: if the table is empty when we're done, free it.
- totalPendCount += newPendCount;
- finRefs = finRefs->next;
- }
- LOGV("scheduleFinalizations(): %zd finalizers triggered.", totalPendCount);
- if (totalPendCount == 0) {
- /* No objects required finalization.
- * Free the empty temporary table.
- */
- dvmClearReferenceTable(&newPendingRefs);
- return;
- }
-
- /* Add the new pending refs to the main list.
- */
- if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
- &newPendingRefs))
- {
- LOGE("scheduleFinalizations(): can't insert new pending finalizations");
- dvmAbort();
}
-
- //TODO: try compacting the main list with a memcpy loop
-
- /* Mark the refs we just moved; we don't want them or their
- * children to get swept yet.
- */
- ref = newPendingRefs.table;
- lastRef = newPendingRefs.nextEntry;
- assert(ref < lastRef);
- while (ref < lastRef) {
- assert(*ref != NULL);
- markObject(*ref, ctx);
- ref++;
+ if (doSignal) {
+ processMarkStack(ctx);
+ dvmSignalHeapWorker(false);
}
- processMarkStack(ctx);
- dvmSignalHeapWorker(false);
+ assert(*list == NULL);
}
/*
@@ -984,15 +921,14 @@ static void scheduleFinalizations(void)
* This is called when Object.<init> completes normally. It's also
* called for clones of finalizable objects.
*/
-void dvmSetFinalizable(Object* obj)
+void dvmSetFinalizable(Object *obj)
{
- dvmLockHeap();
- GcHeap* gcHeap = gDvm.gcHeap;
- if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs, obj)) {
- LOGE_HEAP("No room for any more finalizable objects");
- dvmAbort();
- }
- dvmUnlockHeap();
+ Thread *self = dvmThreadSelf();
+ assert(self != NULL);
+ Method *meth = gDvm.methJavaLangRefFinalizerReferenceAdd;
+ assert(meth != NULL);
+ JValue unused;
+ dvmCallMethod(self, meth, obj, &unused, obj);
}
/*
@@ -1000,10 +936,12 @@ void dvmSetFinalizable(Object* obj)
*/
void dvmHeapProcessReferences(Object **softReferences, bool clearSoftRefs,
Object **weakReferences,
+ Object **finalizerReferences,
Object **phantomReferences)
{
assert(softReferences != NULL);
assert(weakReferences != NULL);
+ assert(finalizerReferences != NULL);
assert(phantomReferences != NULL);
/*
* Unless we are in the zygote or required to clear soft
@@ -1023,7 +961,7 @@ void dvmHeapProcessReferences(Object **softReferences, bool clearSoftRefs,
* Preserve all white objects with finalize methods and schedule
* them for finalization.
*/
- scheduleFinalizations();
+ enqueueFinalizerReferences(finalizerReferences);
/*
* Clear all f-reachable soft and weak references with white
* referents.
@@ -1039,6 +977,7 @@ void dvmHeapProcessReferences(Object **softReferences, bool clearSoftRefs,
*/
assert(*softReferences == NULL);
assert(*weakReferences == NULL);
+ assert(*finalizerReferences == NULL);
assert(*phantomReferences == NULL);
}
diff --git a/vm/alloc/MarkSweep.h b/vm/alloc/MarkSweep.h
index 0672aa800..c9f11e487 100644
--- a/vm/alloc/MarkSweep.h
+++ b/vm/alloc/MarkSweep.h
@@ -53,6 +53,7 @@ void dvmHeapScanMarkedObjects(bool isPartial);
void dvmHeapReScanMarkedObjects(void);
void dvmHeapProcessReferences(Object **softReferences, bool clearSoftRefs,
Object **weakReferences,
+ Object **finalizerReferences,
Object **phantomReferences);
void dvmHeapFinishMarkStep(void);
void dvmHeapSweepSystemWeaks(void);
diff --git a/vm/alloc/Visit.c b/vm/alloc/Visit.c
index 1961fc9a7..e11d58378 100644
--- a/vm/alloc/Visit.c
+++ b/vm/alloc/Visit.c
@@ -251,7 +251,6 @@ void dvmVisitRoots(RootVisitor *visitor, void *arg)
visitReferenceTable(visitor, &gDvm.jniPinRefTable, 0, ROOT_VM_INTERNAL, arg);
dvmUnlockMutex(&gDvm.jniPinRefLock);
visitLargeHeapRefTable(visitor, gDvm.gcHeap->referenceOperations, ROOT_REFERENCE_CLEANUP, arg);
- visitLargeHeapRefTable(visitor, gDvm.gcHeap->pendingFinalizationRefs, ROOT_FINALIZING, arg);
visitThreads(visitor, arg);
(*visitor)(&gDvm.outOfMemoryObj, 0, ROOT_VM_INTERNAL, arg);
(*visitor)(&gDvm.internalErrorObj, 0, ROOT_VM_INTERNAL, arg);