diff options
| author | Hans Boehm <hboehm@google.com> | 2016-08-02 18:39:30 -0700 |
|---|---|---|
| committer | Hans Boehm <hboehm@google.com> | 2016-08-13 11:17:51 -0700 |
| commit | 23c857ebd68fec4296f8483a41a0fff6692c9ac2 (patch) | |
| tree | bee2569fb78084bc6f04e456aefc06dad863b836 /libutils/tests | |
| parent | 07f14c9cdde718bface371d075ccc7f8bf16f098 (diff) | |
| download | system_core-23c857ebd68fec4296f8483a41a0fff6692c9ac2.tar.gz system_core-23c857ebd68fec4296f8483a41a0fff6692c9ac2.tar.bz2 system_core-23c857ebd68fec4296f8483a41a0fff6692c9ac2.zip | |
Make RefBase more robust and debuggable
This prevents two different kinds of client errors from causing
undetected memory corruption, and helps with the detection of others:
1. We no longer deallocate objects when the weak count goes to zero
and there have been no strong references. This otherwise causes
us to return a garbage object from a constructor if the constructor
allocates and deallocates a weak pointer to this. And we do know
that clients allocate such weak pointers in constructors and their
lifetime is hard to trace.
2. We abort if a RefBase object is explicitly destroyed while
the weak count is nonzero. Otherwise a subsequent decrement
would cause a write to potentially reallocated memory.
3. We check counter values returned by atomic decrements for
plausibility, and fail immediately if they are not plausible.
We unconditionally log any cases in which 1 changes behavior
from before. We abort in cases in which 2 changes behavior, since
those reflect clear bugs.
In case 1, a log message now indicates a possible leak. We have
not seen such a message in practice.
The third point introduces a small amount of overhead into the
reference count decrement path. But this should be negligible
compared to the actual decrement cost.
Add a test for promote/attemptIncStrong that tries to check for
both (1) above and concurrent operation of attemptIncStrong.
Add some additional warnings and explanations to the RefBase
documentation.
Bug: 30503444
Bug: 30292291
Bug: 30292538
Change-Id: Ida92b9a2e247f543a948a75d221fbc0038dea66c
Diffstat (limited to 'libutils/tests')
| -rw-r--r-- | libutils/tests/RefBase_test.cpp | 116 |
1 files changed, 95 insertions, 21 deletions
diff --git a/libutils/tests/RefBase_test.cpp b/libutils/tests/RefBase_test.cpp index 224c2ca72..2e0cf6e46 100644 --- a/libutils/tests/RefBase_test.cpp +++ b/libutils/tests/RefBase_test.cpp @@ -87,7 +87,7 @@ TEST(RefBase, WeakCopies) { EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount()); ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!"; wp1 = nullptr; - ASSERT_TRUE(isDeleted) << "foo2 was leaked!"; + ASSERT_FALSE(isDeleted) << "Deletion on wp destruction should no longer occur"; } @@ -121,8 +121,33 @@ static inline void waitFor(bool val) { cpu_set_t otherCpus; +// Divide the cpus we're allowed to run on into myCpus and otherCpus. +// Set origCpus to the processors we were originally allowed to run on. +// Return false if origCpus doesn't include at least processors 0 and 1. +static bool setExclusiveCpus(cpu_set_t* origCpus /* out */, + cpu_set_t* myCpus /* out */, cpu_set_t* otherCpus) { + if (sched_getaffinity(0, sizeof(cpu_set_t), origCpus) != 0) { + return false; + } + if (!CPU_ISSET(0, origCpus) || !CPU_ISSET(1, origCpus)) { + return false; + } + CPU_ZERO(myCpus); + CPU_ZERO(otherCpus); + CPU_OR(myCpus, myCpus, origCpus); + CPU_OR(otherCpus, otherCpus, origCpus); + for (unsigned i = 0; i < CPU_SETSIZE; ++i) { + // I get the even cores, the other thread gets the odd ones. + if (i & 1) { + CPU_CLR(i, myCpus); + } else { + CPU_CLR(i, otherCpus); + } + } + return true; +} + static void visit2AndRemove() { - EXPECT_TRUE(CPU_ISSET(1, &otherCpus)); if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) { FAIL() << "setaffinity returned:" << errno; } @@ -139,27 +164,10 @@ TEST(RefBase, RacingDestructors) { cpu_set_t myCpus; // Restrict us and the helper thread to disjoint cpu sets. // This prevents us from getting scheduled against each other, - // which would be atrociously slow. We fail if that's impossible. - if (sched_getaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) { - FAIL(); - } - EXPECT_TRUE(CPU_ISSET(0, &origCpus)); - if (CPU_ISSET(1, &origCpus)) { - CPU_ZERO(&myCpus); - CPU_ZERO(&otherCpus); - CPU_OR(&myCpus, &myCpus, &origCpus); - CPU_OR(&otherCpus, &otherCpus, &origCpus); - for (unsigned i = 0; i < CPU_SETSIZE; ++i) { - // I get the even cores, the other thread gets the odd ones. - if (i & 1) { - CPU_CLR(i, &myCpus); - } else { - CPU_CLR(i, &otherCpus); - } - } + // which would be atrociously slow. + if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) { std::thread t(visit2AndRemove); std::atomic<int> deleteCount(0); - EXPECT_TRUE(CPU_ISSET(0, &myCpus)); if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) { FAIL() << "setaffinity returned:" << errno; } @@ -182,3 +190,69 @@ TEST(RefBase, RacingDestructors) { ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!"; } // Otherwise this is slow and probably pointless on a uniprocessor. } + +static wp<Bar> wpBuffer; +static std::atomic<bool> wpBufferFull(false); + +// Wait until wpBufferFull has value val. +static inline void wpWaitFor(bool val) { + while (wpBufferFull != val) {} +} + +static void visit3AndRemove() { + if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) { + FAIL() << "setaffinity returned:" << errno; + } + for (int i = 0; i < NITERS; ++i) { + wpWaitFor(true); + { + sp<Bar> sp1 = wpBuffer.promote(); + // We implicitly check that sp1 != NULL + sp1->mVisited2 = true; + } + wpBuffer = nullptr; + wpBufferFull = false; + } +} + +TEST(RefBase, RacingPromotions) { + cpu_set_t origCpus; + cpu_set_t myCpus; + // Restrict us and the helper thread to disjoint cpu sets. + // This prevents us from getting scheduled against each other, + // which would be atrociously slow. + if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) { + std::thread t(visit3AndRemove); + std::atomic<int> deleteCount(0); + if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) { + FAIL() << "setaffinity returned:" << errno; + } + for (int i = 0; i < NITERS; ++i) { + Bar* bar = new Bar(&deleteCount); + wp<Bar> wp1(bar); + bar->mVisited1 = true; + if (i % (NITERS / 10) == 0) { + // Do this rarely, since it generates a log message. + wp1 = nullptr; // No longer destroys the object. + wp1 = bar; + } + wpBuffer = wp1; + ASSERT_EQ(bar->getWeakRefs()->getWeakCount(), 2); + wpBufferFull = true; + // Promotion races with that in visit3AndRemove. + // This may or may not succeed, but it shouldn't interfere with + // the concurrent one. + sp<Bar> sp1 = wp1.promote(); + wpWaitFor(false); // Waits for other thread to drop strong pointer. + sp1 = nullptr; + // No strong pointers here. + sp1 = wp1.promote(); + ASSERT_EQ(sp1.get(), nullptr) << "Dead wp promotion succeeded!"; + } + t.join(); + if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) { + FAIL(); + } + ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!"; + } // Otherwise this is slow and probably pointless on a uniprocessor. +} |
