diff options
| author | Colin Cross <ccross@android.com> | 2016-01-14 15:35:40 -0800 |
|---|---|---|
| committer | Colin Cross <ccross@android.com> | 2016-02-08 17:08:49 -0800 |
| commit | bcb4ed3eaa92d23949d4ab33dbf1b2604bba8a18 (patch) | |
| tree | 3fdec871a3f3ca3762df545224971fa403bb23d4 /libmemunreachable/tests | |
| parent | aae1eb2c4f10f3d2c49455eb37c4ae4b38ffa47d (diff) | |
| download | system_core-bcb4ed3eaa92d23949d4ab33dbf1b2604bba8a18.tar.gz system_core-bcb4ed3eaa92d23949d4ab33dbf1b2604bba8a18.tar.bz2 system_core-bcb4ed3eaa92d23949d4ab33dbf1b2604bba8a18.zip | |
imprecise mark and sweep native memory leak detector
libmemunreachable uses an imprecise mark and sweep pass over all memory
allocated by jemalloc in order to find unreachable allocations.
Change-Id: Ia70bbf31f5b40ff71dab28cfd6cd06c5ef01a2d4
Diffstat (limited to 'libmemunreachable/tests')
| -rw-r--r-- | libmemunreachable/tests/Allocator_test.cpp | 273 | ||||
| -rw-r--r-- | libmemunreachable/tests/HeapWalker_test.cpp | 145 | ||||
| -rw-r--r-- | libmemunreachable/tests/MemUnreachable_test.cpp | 218 | ||||
| -rw-r--r-- | libmemunreachable/tests/ThreadCapture_test.cpp | 351 |
4 files changed, 987 insertions, 0 deletions
diff --git a/libmemunreachable/tests/Allocator_test.cpp b/libmemunreachable/tests/Allocator_test.cpp new file mode 100644 index 000000000..d8e473eba --- /dev/null +++ b/libmemunreachable/tests/Allocator_test.cpp @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <Allocator.h> +#include <sys/time.h> + +#include <chrono> +#include <functional> +#include <list> +#include <vector> + +#include <gtest/gtest.h> +#include <ScopedDisableMalloc.h> + + +std::function<void()> ScopedAlarm::func_; + +using namespace std::chrono_literals; + +class AllocatorTest : public testing::Test { + protected: + AllocatorTest() : heap(), disable_malloc_() {} + virtual void SetUp() { + heap_count = 0; + } + virtual void TearDown() { + ASSERT_EQ(heap_count, 0); + ASSERT_TRUE(heap.empty()); + ASSERT_FALSE(disable_malloc_.timed_out()); + } + Heap heap; + private: + ScopedDisableMallocTimeout disable_malloc_; +}; + +TEST_F(AllocatorTest, simple) { + Allocator<char[100]> allocator(heap); + void *ptr = allocator.allocate(); + ASSERT_TRUE(ptr != NULL); + allocator.deallocate(ptr); +} + +TEST_F(AllocatorTest, multiple) { + Allocator<char[100]> allocator(heap); + void *ptr1 = allocator.allocate(); + ASSERT_TRUE(ptr1 != NULL); + void *ptr2 = allocator.allocate(); + ASSERT_TRUE(ptr2 != NULL); + ASSERT_NE(ptr1, ptr2); + allocator.deallocate(ptr1); + void *ptr3 = allocator.allocate(); + ASSERT_EQ(ptr1, ptr3); + allocator.deallocate(ptr3); + allocator.deallocate(ptr2); +} + +TEST_F(AllocatorTest, many) { + const int num = 4096; + const int size = 128; + Allocator<char[size]> allocator(heap); + void *ptr[num]; + for (int i = 0; i < num; i++) { + ptr[i] = allocator.allocate(); + memset(ptr[i], 0xaa, size); + *(reinterpret_cast<unsigned char*>(ptr[i])) = i; + } + + for (int i = 0; i < num; i++) { + for (int j = 0; j < num; j++) { + if (i != j) { + ASSERT_NE(ptr[i], ptr[j]); + } + } + } + + for (int i = 0; i < num; i++) { + ASSERT_EQ(*(reinterpret_cast<unsigned char*>(ptr[i])), i & 0xFF); + allocator.deallocate(ptr[i]); + } +} + +TEST_F(AllocatorTest, large) { + const size_t size = 1024 * 1024; + Allocator<char[size]> allocator(heap); + void *ptr = allocator.allocate(); + memset(ptr, 0xaa, size); + allocator.deallocate(ptr); +} + +TEST_F(AllocatorTest, many_large) { + const int num = 128; + const int size = 1024 * 1024; + Allocator<char[size]> allocator(heap); + void *ptr[num]; + for (int i = 0; i < num; i++) { + ptr[i] = allocator.allocate(); + memset(ptr[i], 0xaa, size); + *(reinterpret_cast<unsigned char*>(ptr[i])) = i; + } + + for (int i = 0; i < num; i++) { + ASSERT_EQ(*(reinterpret_cast<unsigned char*>(ptr[i])), i & 0xFF); + allocator.deallocate(ptr[i]); + } +} + +TEST_F(AllocatorTest, copy) { + Allocator<char[100]> a(heap); + Allocator<char[200]> b = a; + Allocator<char[300]> c(b); + Allocator<char[100]> d(a); + Allocator<char[100]> e(heap); + + ASSERT_EQ(a, b); + ASSERT_EQ(a, c); + ASSERT_EQ(a, d); + ASSERT_EQ(a, e); + + void* ptr1 = a.allocate(); + void* ptr2 = b.allocate(); + void* ptr3 = c.allocate(); + void* ptr4 = d.allocate(); + + b.deallocate(ptr1); + d.deallocate(ptr2); + a.deallocate(ptr3); + c.deallocate(ptr4); +} + +TEST_F(AllocatorTest, stl_vector) { + auto v = allocator::vector<int>(Allocator<int>(heap)); + for (int i = 0; i < 1024; i++) { + v.push_back(i); + } + for (int i = 0; i < 1024; i++) { + ASSERT_EQ(v[i], i); + } + v.clear(); +} + +TEST_F(AllocatorTest, stl_list) { + auto v = allocator::list<int>(Allocator<int>(heap)); + for (int i = 0; i < 1024; i++) { + v.push_back(i); + } + int i = 0; + for (auto iter = v.begin(); iter != v.end(); iter++, i++) { + ASSERT_EQ(*iter, i); + } + v.clear(); +} + +TEST_F(AllocatorTest, shared) { + Allocator<int> allocator(heap); + + Allocator<int>::shared_ptr ptr = allocator.make_shared(0); + { + auto ptr2 = ptr; + } + ASSERT_NE(ptr, nullptr); +} + +TEST_F(AllocatorTest, unique) { + Allocator<int> allocator(heap); + + Allocator<int>::unique_ptr ptr = allocator.make_unique(0); + + ASSERT_NE(ptr, nullptr); +} + +class DisableMallocTest : public ::testing::Test { + protected: + void alarm(std::chrono::microseconds us) { + std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(us); + itimerval t = itimerval(); + t.it_value.tv_sec = s.count(); + t.it_value.tv_usec = (us - s).count(); + setitimer(ITIMER_REAL, &t, NULL); + } +}; + +TEST_F(DisableMallocTest, reenable) { + ASSERT_EXIT({ + alarm(100ms); + void *ptr1 = malloc(128); + ASSERT_NE(ptr1, nullptr); + free(ptr1); + { + ScopedDisableMalloc disable_malloc; + } + void *ptr2 = malloc(128); + ASSERT_NE(ptr2, nullptr); + free(ptr2); + _exit(1); + }, ::testing::ExitedWithCode(1), ""); +} + +TEST_F(DisableMallocTest, deadlock_allocate) { + ASSERT_DEATH({ + void *ptr = malloc(128); + ASSERT_NE(ptr, nullptr); + free(ptr); + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + void* ptr = malloc(128); + ASSERT_NE(ptr, nullptr); + free(ptr); + } + }, ""); +} + +TEST_F(DisableMallocTest, deadlock_new) { + ASSERT_DEATH({ + char* ptr = new(char); + ASSERT_NE(ptr, nullptr); + delete(ptr); + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + char* ptr = new(char); + ASSERT_NE(ptr, nullptr); + delete(ptr); + } + }, ""); +} + +TEST_F(DisableMallocTest, deadlock_delete) { + ASSERT_DEATH({ + char* ptr = new(char); + ASSERT_NE(ptr, nullptr); + { + alarm(250ms); + ScopedDisableMalloc disable_malloc; + delete(ptr); + } + }, ""); +} + +TEST_F(DisableMallocTest, deadlock_free) { + ASSERT_DEATH({ + void *ptr = malloc(128); + ASSERT_NE(ptr, nullptr); + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + free(ptr); + } + }, ""); +} + +TEST_F(DisableMallocTest, deadlock_fork) { + ASSERT_DEATH({ + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + fork(); + } + }, ""); +} diff --git a/libmemunreachable/tests/HeapWalker_test.cpp b/libmemunreachable/tests/HeapWalker_test.cpp new file mode 100644 index 000000000..9921eb65e --- /dev/null +++ b/libmemunreachable/tests/HeapWalker_test.cpp @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "HeapWalker.h" + +#include <gtest/gtest.h> +#include <ScopedDisableMalloc.h> +#include "Allocator.h" + +class HeapWalkerTest : public ::testing::Test { + public: + HeapWalkerTest() : disable_malloc_(), heap_() {} + + void TearDown() { + ASSERT_TRUE(heap_.empty()); + if (!HasFailure()) { + ASSERT_FALSE(disable_malloc_.timed_out()); + } + } + + protected: + ScopedDisableMallocTimeout disable_malloc_; + Heap heap_; +}; + +TEST_F(HeapWalkerTest, allocation) { + HeapWalker heap_walker(heap_); + ASSERT_TRUE(heap_walker.Allocation(3, 4)); + ASSERT_TRUE(heap_walker.Allocation(2, 3)); + ASSERT_TRUE(heap_walker.Allocation(4, 5)); + ASSERT_TRUE(heap_walker.Allocation(6, 7)); + ASSERT_TRUE(heap_walker.Allocation(0, 1)); +} + +TEST_F(HeapWalkerTest, overlap) { + HeapWalker heap_walker(heap_); + ASSERT_TRUE(heap_walker.Allocation(2, 3)); + ASSERT_TRUE(heap_walker.Allocation(3, 4)); + ASSERT_FALSE(heap_walker.Allocation(2, 3)); + ASSERT_FALSE(heap_walker.Allocation(1, 3)); + ASSERT_FALSE(heap_walker.Allocation(1, 4)); + ASSERT_FALSE(heap_walker.Allocation(1, 5)); + ASSERT_FALSE(heap_walker.Allocation(3, 4)); + ASSERT_FALSE(heap_walker.Allocation(3, 5)); + ASSERT_TRUE(heap_walker.Allocation(4, 5)); + ASSERT_TRUE(heap_walker.Allocation(1, 2)); +} + +TEST_F(HeapWalkerTest, zero) { + HeapWalker heap_walker(heap_); + ASSERT_TRUE(heap_walker.Allocation(2, 2)); + ASSERT_FALSE(heap_walker.Allocation(2, 2)); + ASSERT_TRUE(heap_walker.Allocation(3, 3)); + ASSERT_TRUE(heap_walker.Allocation(1, 1)); + ASSERT_FALSE(heap_walker.Allocation(2, 3)); +} + +#define buffer_begin(buffer) reinterpret_cast<uintptr_t>(buffer) +#define buffer_end(buffer) (reinterpret_cast<uintptr_t>(buffer) + sizeof(buffer)) + +TEST_F(HeapWalkerTest, leak) { + void* buffer1[16]{}; + char buffer2[16]{}; + buffer1[0] = &buffer2[0] - sizeof(void*); + buffer1[1] = &buffer2[15] + sizeof(void*); + + HeapWalker heap_walker(heap_); + heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2)); + + allocator::vector<Range> leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(1U, num_leaks); + EXPECT_EQ(16U, leaked_bytes); + ASSERT_EQ(1U, leaked.size()); + EXPECT_EQ(buffer_begin(buffer2), leaked[0].begin); + EXPECT_EQ(buffer_end(buffer2), leaked[0].end); +} + +TEST_F(HeapWalkerTest, live) { + const int from_buffer_entries = 4; + const int to_buffer_bytes = 16; + + for (int i = 0; i < from_buffer_entries; i++) { + for (int j = 0; j < to_buffer_bytes; j++) { + void* buffer1[from_buffer_entries]{}; + char buffer2[to_buffer_bytes]{}; + buffer1[i] = &buffer2[j]; + + HeapWalker heap_walker(heap_); + heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2)); + heap_walker.Root(buffer_begin(buffer1), buffer_end(buffer1)); + + allocator::vector<Range> leaked(heap_); + size_t num_leaks = SIZE_T_MAX; + size_t leaked_bytes = SIZE_T_MAX; + ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(0U, num_leaks); + EXPECT_EQ(0U, leaked_bytes); + EXPECT_EQ(0U, leaked.size()); + } + } +} + +TEST_F(HeapWalkerTest, unaligned) { + const int from_buffer_entries = 4; + const int to_buffer_bytes = 16; + void* buffer1[from_buffer_entries]{}; + char buffer2[to_buffer_bytes]{}; + + buffer1[1] = &buffer2; + + for (unsigned int i = 0; i < sizeof(uintptr_t); i++) { + for (unsigned int j = 0; j < sizeof(uintptr_t); j++) { + HeapWalker heap_walker(heap_); + heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2)); + heap_walker.Root(buffer_begin(buffer1) + i, buffer_end(buffer1) - j); + + allocator::vector<Range> leaked(heap_); + size_t num_leaks = SIZE_T_MAX; + size_t leaked_bytes = SIZE_T_MAX; + ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(0U, num_leaks); + EXPECT_EQ(0U, leaked_bytes); + EXPECT_EQ(0U, leaked.size()); + } + } +} diff --git a/libmemunreachable/tests/MemUnreachable_test.cpp b/libmemunreachable/tests/MemUnreachable_test.cpp new file mode 100644 index 000000000..0747b12b6 --- /dev/null +++ b/libmemunreachable/tests/MemUnreachable_test.cpp @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <fcntl.h> +#include <stdlib.h> +#include <unistd.h> +#include <sys/prctl.h> + +#include <gtest/gtest.h> + +#include <memunreachable/memunreachable.h> + +void* ptr; + +class HiddenPointer { + public: + HiddenPointer(size_t size = 256) { + Set(malloc(size)); + } + ~HiddenPointer() { + Free(); + } + void* Get() { + return reinterpret_cast<void*>(~ptr_); + } + void Free() { + free(Get()); + Set(nullptr); + } + private: + void Set(void* ptr) { + ptr_ = ~reinterpret_cast<uintptr_t>(ptr); + } + volatile uintptr_t ptr_; +}; + +static void Ref(void* ptr) { + write(0, ptr, 0); +} + +TEST(MemunreachableTest, clean) { + UnreachableMemoryInfo info; + + ASSERT_TRUE(LogUnreachableMemory(true, 100)); + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(0U, info.leaks.size()); +} + +TEST(MemunreachableTest, stack) { + HiddenPointer hidden_ptr; + + { + void* ptr = hidden_ptr.Get(); + Ref(ptr); + + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(0U, info.leaks.size()); + + Ref(ptr); + } + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(1U, info.leaks.size()); + } + + hidden_ptr.Free(); + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(0U, info.leaks.size()); + } +} + +TEST(MemunreachableTest, global) { + HiddenPointer hidden_ptr; + + ptr = hidden_ptr.Get(); + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(0U, info.leaks.size()); + } + + ptr = NULL; + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(1U, info.leaks.size()); + } + + hidden_ptr.Free(); + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(0U, info.leaks.size()); + } +} + +TEST(MemunreachableTest, tls) { + HiddenPointer hidden_ptr; + pthread_key_t key; + pthread_key_create(&key, NULL); + + pthread_setspecific(key, hidden_ptr.Get()); + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(0U, info.leaks.size()); + } + + pthread_setspecific(key, nullptr); + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(1U, info.leaks.size()); + } + + hidden_ptr.Free(); + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(0U, info.leaks.size()); + } + + pthread_key_delete(key); +} + +TEST(MemunreachableTest, twice) { + HiddenPointer hidden_ptr; + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(1U, info.leaks.size()); + } + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(1U, info.leaks.size()); + } + + hidden_ptr.Free(); + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(0U, info.leaks.size()); + } +} + +TEST(MemunreachableTest, log) { + HiddenPointer hidden_ptr; + + ASSERT_TRUE(LogUnreachableMemory(true, 100)); + + hidden_ptr.Free(); + + { + UnreachableMemoryInfo info; + + ASSERT_TRUE(GetUnreachableMemory(info)); + ASSERT_EQ(0U, info.leaks.size()); + } +} + +TEST(MemunreachableTest, notdumpable) { + ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0)); + + HiddenPointer hidden_ptr; + + ASSERT_TRUE(LogUnreachableMemory(true, 100)); + + ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 1)); +} + +TEST(MemunreachableTest, leak_lots) { + std::vector<HiddenPointer> hidden_ptrs; + hidden_ptrs.resize(1024); + + ASSERT_TRUE(LogUnreachableMemory(true, 100)); +} diff --git a/libmemunreachable/tests/ThreadCapture_test.cpp b/libmemunreachable/tests/ThreadCapture_test.cpp new file mode 100644 index 000000000..cefe94e6a --- /dev/null +++ b/libmemunreachable/tests/ThreadCapture_test.cpp @@ -0,0 +1,351 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ThreadCapture.h" + +#include <fcntl.h> +#include <pthread.h> +#include <sys/syscall.h> +#include <sys/types.h> + +#include <algorithm> +#include <functional> +#include <memory> +#include <thread> + +#include <gtest/gtest.h> + +#include <android-base/unique_fd.h> + +#include "Allocator.h" +#include "ScopedDisableMalloc.h" +#include "ScopedPipe.h" + +using namespace std::chrono_literals; + +class ThreadListTest : public ::testing::TestWithParam<int> { + public: + ThreadListTest() : stop_(false) {} + + ~ThreadListTest() { + // pthread_join may return before the entry in /proc/pid/task/ is gone, + // loop until ListThreads only finds the main thread so the next test + // doesn't fail. + WaitForThreads(); + } + + virtual void TearDown() { + ASSERT_TRUE(heap.empty()); + } + + protected: + template<class Function> + void StartThreads(unsigned int threads, Function&& func) { + threads_.reserve(threads); + tids_.reserve(threads); + for (unsigned int i = 0; i < threads; i++) { + threads_.emplace_back([&, i, threads, this]() { + { + std::lock_guard<std::mutex> lk(m_); + tids_.push_back(gettid()); + if (tids_.size() == threads) { + cv_start_.notify_one(); + } + } + + func(); + + { + std::unique_lock<std::mutex> lk(m_); + cv_stop_.wait(lk, [&] {return stop_;}); + } + }); + } + + { + std::unique_lock<std::mutex> lk(m_); + cv_start_.wait(lk, [&]{ return tids_.size() == threads; }); + } + } + + void StopThreads() { + { + std::lock_guard<std::mutex> lk(m_); + stop_ = true; + } + cv_stop_.notify_all(); + + for (auto i = threads_.begin(); i != threads_.end(); i++) { + i->join(); + } + threads_.clear(); + tids_.clear(); + } + + std::vector<pid_t>& tids() { + return tids_; + } + + Heap heap; + + private: + void WaitForThreads() { + auto tids = TidList{heap}; + ThreadCapture thread_capture{getpid(), heap}; + + for (unsigned int i = 0; i < 100; i++) { + EXPECT_TRUE(thread_capture.ListThreads(tids)); + if (tids.size() == 1) { + break; + } + std::this_thread::sleep_for(10ms); + } + EXPECT_EQ(1U, tids.size()); + } + + std::mutex m_; + std::condition_variable cv_start_; + std::condition_variable cv_stop_; + bool stop_; + std::vector<pid_t> tids_; + + std::vector<std::thread> threads_; +}; + +TEST_F(ThreadListTest, list_one) { + ScopedDisableMallocTimeout disable_malloc; + + ThreadCapture thread_capture(getpid(), heap); + + auto expected_tids = allocator::vector<pid_t>(1, getpid(), heap); + auto list_tids = allocator::vector<pid_t>(heap); + + ASSERT_TRUE(thread_capture.ListThreads(list_tids)); + + ASSERT_EQ(expected_tids, list_tids); + + if (!HasFailure()) { + ASSERT_FALSE(disable_malloc.timed_out()); + } +} + +TEST_P(ThreadListTest, list_some) { + const unsigned int threads = GetParam() - 1; + + StartThreads(threads, [](){}); + std::vector<pid_t> expected_tids = tids(); + expected_tids.push_back(getpid()); + + auto list_tids = allocator::vector<pid_t>(heap); + + { + ScopedDisableMallocTimeout disable_malloc; + + ThreadCapture thread_capture(getpid(), heap); + + ASSERT_TRUE(thread_capture.ListThreads(list_tids)); + + if (!HasFailure()) { + ASSERT_FALSE(disable_malloc.timed_out()); + } + } + + StopThreads(); + + std::sort(list_tids.begin(), list_tids.end()); + std::sort(expected_tids.begin(), expected_tids.end()); + + ASSERT_EQ(expected_tids.size(), list_tids.size()); + EXPECT_TRUE(std::equal(expected_tids.begin(), expected_tids.end(), list_tids.begin())); +} + +INSTANTIATE_TEST_CASE_P(ThreadListTest, ThreadListTest, ::testing::Values(1, 2, 10, 1024)); + +class ThreadCaptureTest : public ThreadListTest { + public: + ThreadCaptureTest() {} + ~ThreadCaptureTest() {} + void Fork(std::function<void()>&& child_init, + std::function<void()>&& child_cleanup, + std::function<void(pid_t)>&& parent) { + + ScopedPipe start_pipe; + ScopedPipe stop_pipe; + + int pid = fork(); + + if (pid == 0) { + // child + child_init(); + EXPECT_EQ(1, TEMP_FAILURE_RETRY(write(start_pipe.Sender(), "+", 1))) << strerror(errno); + char buf; + EXPECT_EQ(1, TEMP_FAILURE_RETRY(read(stop_pipe.Receiver(), &buf, 1))) << strerror(errno); + child_cleanup(); + _exit(0); + } else { + // parent + ASSERT_GT(pid, 0); + char buf; + ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(start_pipe.Receiver(), &buf, 1))) << strerror(errno); + + parent(pid); + + ASSERT_EQ(1, TEMP_FAILURE_RETRY(write(stop_pipe.Sender(), "+", 1))) << strerror(errno); + siginfo_t info{}; + ASSERT_EQ(0, TEMP_FAILURE_RETRY(waitid(P_PID, pid, &info, WEXITED))) << strerror(errno); + } + } +}; + +TEST_P(ThreadCaptureTest, capture_some) { + const unsigned int threads = GetParam(); + + Fork([&](){ + // child init + StartThreads(threads - 1, [](){}); + }, + [&](){ + // child cleanup + StopThreads(); + }, + [&](pid_t child){ + // parent + ASSERT_GT(child, 0); + + { + ScopedDisableMallocTimeout disable_malloc; + + ThreadCapture thread_capture(child, heap); + auto list_tids = allocator::vector<pid_t>(heap); + + ASSERT_TRUE(thread_capture.ListThreads(list_tids)); + ASSERT_EQ(threads, list_tids.size()); + + ASSERT_TRUE(thread_capture.CaptureThreads()); + + auto thread_info = allocator::vector<ThreadInfo>(heap); + ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info)); + ASSERT_EQ(threads, thread_info.size()); + ASSERT_TRUE(thread_capture.ReleaseThreads()); + + if (!HasFailure()) { + ASSERT_FALSE(disable_malloc.timed_out()); + } +} + }); +} + +INSTANTIATE_TEST_CASE_P(ThreadCaptureTest, ThreadCaptureTest, ::testing::Values(1, 2, 10, 1024)); + +TEST_F(ThreadCaptureTest, capture_kill) { + int ret = fork(); + + if (ret == 0) { + // child + sleep(10); + } else { + // parent + ASSERT_GT(ret, 0); + + { + ScopedDisableMallocTimeout disable_malloc; + + ThreadCapture thread_capture(ret, heap); + thread_capture.InjectTestFunc([&](pid_t tid){ + syscall(SYS_tgkill, ret, tid, SIGKILL); + usleep(10000); + }); + auto list_tids = allocator::vector<pid_t>(heap); + + ASSERT_TRUE(thread_capture.ListThreads(list_tids)); + ASSERT_EQ(1U, list_tids.size()); + + ASSERT_FALSE(thread_capture.CaptureThreads()); + + if (!HasFailure()) { + ASSERT_FALSE(disable_malloc.timed_out()); + } + } + } +} + +TEST_F(ThreadCaptureTest, capture_signal) { + const int sig = SIGUSR1; + + ScopedPipe pipe; + + // For signal handler + static ScopedPipe* g_pipe; + + Fork([&](){ + // child init + pipe.CloseReceiver(); + + g_pipe = &pipe; + + struct sigaction act{}; + act.sa_handler = [](int){ + char buf = '+'; + write(g_pipe->Sender(), &buf, 1); + g_pipe->CloseSender(); + }; + sigaction(sig, &act, NULL); + sigset_t set; + sigemptyset(&set); + sigaddset(&set, sig); + pthread_sigmask(SIG_UNBLOCK, &set, NULL); + }, + [&](){ + // child cleanup + g_pipe = nullptr; + pipe.Close(); + }, + [&](pid_t child){ + // parent + ASSERT_GT(child, 0); + pipe.CloseSender(); + + { + ScopedDisableMallocTimeout disable_malloc; + + ThreadCapture thread_capture(child, heap); + thread_capture.InjectTestFunc([&](pid_t tid){ + syscall(SYS_tgkill, child, tid, sig); + usleep(10000); + }); + auto list_tids = allocator::vector<pid_t>(heap); + + ASSERT_TRUE(thread_capture.ListThreads(list_tids)); + ASSERT_EQ(1U, list_tids.size()); + + ASSERT_TRUE(thread_capture.CaptureThreads()); + + auto thread_info = allocator::vector<ThreadInfo>(heap); + ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info)); + ASSERT_EQ(1U, thread_info.size()); + ASSERT_TRUE(thread_capture.ReleaseThreads()); + + usleep(100000); + char buf; + ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(pipe.Receiver(), &buf, 1))); + ASSERT_EQ(buf, '+'); + + if (!HasFailure()) { + ASSERT_FALSE(disable_malloc.timed_out()); + } + } + }); +} |
