From 545ebe9f225aec6dca49109516fac02e973a3de2 Mon Sep 17 00:00:00 2001 From: Marat Dukhan Date: Sat, 5 Dec 2020 20:28:13 -0800 Subject: Implement 6D parallelization with 1D and no tiling --- include/pthreadpool.h | 108 +++++++ src/fastpath.c | 157 +++++++++++ src/portable-api.c | 271 ++++++++++++++++++ src/shim.c | 55 ++++ src/threadpool-object.h | 72 +++++ test/pthreadpool.cc | 731 ++++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 1394 insertions(+) diff --git a/include/pthreadpool.h b/include/pthreadpool.h index 6a7d61f..59c4abf 100644 --- a/include/pthreadpool.h +++ b/include/pthreadpool.h @@ -20,6 +20,8 @@ typedef void (*pthreadpool_task_4d_tile_2d_t)(void*, size_t, size_t, size_t, siz typedef void (*pthreadpool_task_5d_t)(void*, size_t, size_t, size_t, size_t, size_t); typedef void (*pthreadpool_task_5d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); typedef void (*pthreadpool_task_5d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t); typedef void (*pthreadpool_task_6d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t); typedef void (*pthreadpool_task_1d_with_id_t)(void*, uint32_t, size_t); @@ -907,6 +909,112 @@ void pthreadpool_parallelize_5d_tile_2d( size_t tile_m, uint32_t flags); +/** + * Process items on a 6D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n++) + * function(context, i, j, k, l, m, n); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d( + pthreadpool_t threadpool, + pthreadpool_task_6d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + uint32_t flags); + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n += tile_n) + * function(context, i, j, k, l, m, n, min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_6d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_n, + uint32_t flags); + /** * Process items on a 6D grid with the specified maximum tile size along the * last two grid dimensions. diff --git a/src/fastpath.c b/src/fastpath.c index 6abbebe..b914ff0 100644 --- a/src/fastpath.c +++ b/src/fastpath.c @@ -1085,6 +1085,163 @@ PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_tile_2d_fastpath( pthreadpool_fence_release(); } +PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_fastpath( + struct pthreadpool* threadpool, + struct thread_info* thread) +{ + assert(threadpool != NULL); + assert(thread != NULL); + + const pthreadpool_task_6d_t task = (pthreadpool_task_6d_t) pthreadpool_load_relaxed_void_p(&threadpool->task); + void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument); + + const size_t threads_count = threadpool->threads_count.value; + const size_t range_threshold = -threads_count; + + /* Process thread's own range of items */ + const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start); + const struct fxdiv_divisor_size_t range_lmn = threadpool->params.parallelize_6d.range_lmn; + const struct fxdiv_result_size_t index_ijk_lmn = fxdiv_divide_size_t(range_start, range_lmn); + const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_6d.range_k; + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lmn.quotient, range_k); + const struct fxdiv_divisor_size_t range_n = threadpool->params.parallelize_6d.range_n; + const struct fxdiv_result_size_t index_lm_n = fxdiv_divide_size_t(index_ijk_lmn.remainder, range_n); + const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d.range_j; + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_6d.range_m; + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_lm_n.quotient, range_m); + size_t i = index_i_j.quotient; + size_t j = index_i_j.remainder; + size_t k = index_ij_k.remainder; + size_t l = index_l_m.quotient; + size_t m = index_l_m.remainder; + size_t n = index_lm_n.remainder; + + const size_t range_l = threadpool->params.parallelize_6d.range_l; + while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) { + task(argument, i, j, k, l, m, n); + if (++n == range_n.value) { + n = 0; + if (++m == range_m.value) { + m = 0; + if (++l == range_l) { + l = 0; + if (++k == range_k.value) { + k = 0; + if (++j == range_j.value) { + j = 0; + i += 1; + } + } + } + } + } + } + + + /* There still may be other threads with work */ + const size_t thread_number = thread->thread_number; + for (size_t tid = modulo_decrement(thread_number, threads_count); + tid != thread_number; + tid = modulo_decrement(tid, threads_count)) + { + struct thread_info* other_thread = &threadpool->threads[tid]; + while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) { + const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end); + const struct fxdiv_result_size_t index_ijk_lmn = fxdiv_divide_size_t(linear_index, range_lmn); + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lmn.quotient, range_k); + const struct fxdiv_result_size_t index_lm_n = fxdiv_divide_size_t(index_ijk_lmn.remainder, range_n); + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_lm_n.quotient, range_m); + task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder, index_lm_n.remainder); + } + } + + /* Make changes by this thread visible to other threads */ + pthreadpool_fence_release(); +} + +PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_tile_1d_fastpath( + struct pthreadpool* threadpool, + struct thread_info* thread) +{ + assert(threadpool != NULL); + assert(thread != NULL); + + const pthreadpool_task_6d_tile_1d_t task = (pthreadpool_task_6d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task); + void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument); + + const size_t threads_count = threadpool->threads_count.value; + const size_t range_threshold = -threads_count; + + /* Process thread's own range of items */ + const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start); + const struct fxdiv_divisor_size_t tile_range_lmn = threadpool->params.parallelize_6d_tile_1d.tile_range_lmn; + const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(range_start, tile_range_lmn); + const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_6d_tile_1d.range_k; + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k); + const struct fxdiv_divisor_size_t tile_range_n = threadpool->params.parallelize_6d_tile_1d.tile_range_n; + const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n); + const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d_tile_1d.range_j; + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_6d_tile_1d.range_m; + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, range_m); + const size_t tile_n = threadpool->params.parallelize_6d_tile_1d.tile_n; + size_t i = index_i_j.quotient; + size_t j = index_i_j.remainder; + size_t k = index_ij_k.remainder; + size_t l = index_l_m.quotient; + size_t m = index_l_m.remainder; + size_t start_n = tile_index_lm_n.remainder * tile_n; + + const size_t range_n = threadpool->params.parallelize_6d_tile_1d.range_n; + const size_t range_l = threadpool->params.parallelize_6d_tile_1d.range_l; + while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) { + task(argument, i, j, k, l, m, start_n, min(range_n - start_n, tile_n)); + start_n += tile_n; + if (start_n >= range_n) { + start_n = 0; + if (++m == range_m.value) { + m = 0; + if (++l == range_l) { + l = 0; + if (++k == range_k.value) { + k = 0; + if (++j == range_j.value) { + j = 0; + i += 1; + } + } + } + } + } + } + + + /* There still may be other threads with work */ + const size_t thread_number = thread->thread_number; + for (size_t tid = modulo_decrement(thread_number, threads_count); + tid != thread_number; + tid = modulo_decrement(tid, threads_count)) + { + struct thread_info* other_thread = &threadpool->threads[tid]; + while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) { + const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end); + const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(linear_index, tile_range_lmn); + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k); + const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n); + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, range_m); + const size_t start_n = tile_index_lm_n.remainder * tile_n; + task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder, + start_n, min(range_n - start_n, tile_n)); + } + } + + /* Make changes by this thread visible to other threads */ + pthreadpool_fence_release(); +} + PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_tile_2d_fastpath( struct pthreadpool* threadpool, struct thread_info* thread) diff --git a/src/portable-api.c b/src/portable-api.c index ef36578..42d0369 100644 --- a/src/portable-api.c +++ b/src/portable-api.c @@ -1002,6 +1002,153 @@ static void thread_parallelize_5d_tile_2d(struct pthreadpool* threadpool, struct pthreadpool_fence_release(); } +static void thread_parallelize_6d(struct pthreadpool* threadpool, struct thread_info* thread) { + assert(threadpool != NULL); + assert(thread != NULL); + + const pthreadpool_task_6d_t task = (pthreadpool_task_6d_t) pthreadpool_load_relaxed_void_p(&threadpool->task); + void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument); + + /* Process thread's own range of items */ + const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start); + const struct fxdiv_divisor_size_t range_lmn = threadpool->params.parallelize_6d.range_lmn; + const struct fxdiv_result_size_t index_ijk_lmn = fxdiv_divide_size_t(range_start, range_lmn); + const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_6d.range_k; + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lmn.quotient, range_k); + const struct fxdiv_divisor_size_t range_n = threadpool->params.parallelize_6d.range_n; + const struct fxdiv_result_size_t index_lm_n = fxdiv_divide_size_t(index_ijk_lmn.remainder, range_n); + const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d.range_j; + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_6d.range_m; + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_lm_n.quotient, range_m); + size_t i = index_i_j.quotient; + size_t j = index_i_j.remainder; + size_t k = index_ij_k.remainder; + size_t l = index_l_m.quotient; + size_t m = index_l_m.remainder; + size_t n = index_lm_n.remainder; + + const size_t range_l = threadpool->params.parallelize_6d.range_l; + while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) { + task(argument, i, j, k, l, m, n); + if (++n == range_n.value) { + n = 0; + if (++m == range_m.value) { + m = 0; + if (++l == range_l) { + l = 0; + if (++k == range_k.value) { + k = 0; + if (++j == range_j.value) { + j = 0; + i += 1; + } + } + } + } + } + } + + + /* There still may be other threads with work */ + const size_t thread_number = thread->thread_number; + const size_t threads_count = threadpool->threads_count.value; + for (size_t tid = modulo_decrement(thread_number, threads_count); + tid != thread_number; + tid = modulo_decrement(tid, threads_count)) + { + struct thread_info* other_thread = &threadpool->threads[tid]; + while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) { + const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end); + const struct fxdiv_result_size_t index_ijk_lmn = fxdiv_divide_size_t(linear_index, range_lmn); + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lmn.quotient, range_k); + const struct fxdiv_result_size_t index_lm_n = fxdiv_divide_size_t(index_ijk_lmn.remainder, range_n); + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_lm_n.quotient, range_m); + task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder, index_lm_n.remainder); + } + } + + /* Make changes by this thread visible to other threads */ + pthreadpool_fence_release(); +} + +static void thread_parallelize_6d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) { + assert(threadpool != NULL); + assert(thread != NULL); + + const pthreadpool_task_6d_tile_1d_t task = (pthreadpool_task_6d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task); + void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument); + + /* Process thread's own range of items */ + const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start); + const struct fxdiv_divisor_size_t tile_range_lmn = threadpool->params.parallelize_6d_tile_1d.tile_range_lmn; + const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(range_start, tile_range_lmn); + const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_6d_tile_1d.range_k; + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k); + const struct fxdiv_divisor_size_t tile_range_n = threadpool->params.parallelize_6d_tile_1d.tile_range_n; + const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n); + const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d_tile_1d.range_j; + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_6d_tile_1d.range_m; + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, range_m); + const size_t tile_n = threadpool->params.parallelize_6d_tile_1d.tile_n; + size_t i = index_i_j.quotient; + size_t j = index_i_j.remainder; + size_t k = index_ij_k.remainder; + size_t l = index_l_m.quotient; + size_t m = index_l_m.remainder; + size_t start_n = tile_index_lm_n.remainder * tile_n; + + const size_t range_n = threadpool->params.parallelize_6d_tile_1d.range_n; + const size_t range_l = threadpool->params.parallelize_6d_tile_1d.range_l; + while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) { + task(argument, i, j, k, l, m, start_n, min(range_n - start_n, tile_n)); + start_n += tile_n; + if (start_n >= range_n) { + start_n = 0; + if (++m == range_m.value) { + m = 0; + if (++l == range_l) { + l = 0; + if (++k == range_k.value) { + k = 0; + if (++j == range_j.value) { + j = 0; + i += 1; + } + } + } + } + } + } + + + /* There still may be other threads with work */ + const size_t thread_number = thread->thread_number; + const size_t threads_count = threadpool->threads_count.value; + for (size_t tid = modulo_decrement(thread_number, threads_count); + tid != thread_number; + tid = modulo_decrement(tid, threads_count)) + { + struct thread_info* other_thread = &threadpool->threads[tid]; + while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) { + const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end); + const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(linear_index, tile_range_lmn); + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k); + const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n); + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, range_m); + const size_t start_n = tile_index_lm_n.remainder * tile_n; + task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder, + start_n, min(range_n - start_n, tile_n)); + } + } + + /* Make changes by this thread visible to other threads */ + pthreadpool_fence_release(); +} + static void thread_parallelize_6d_tile_2d(struct pthreadpool* threadpool, struct thread_info* thread) { assert(threadpool != NULL); assert(thread != NULL); @@ -2043,6 +2190,130 @@ void pthreadpool_parallelize_5d_tile_2d( } } +void pthreadpool_parallelize_6d( + pthreadpool_t threadpool, + pthreadpool_task_6d_t task, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + uint32_t flags) +{ + size_t threads_count; + if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j | range_k | range_l | range_m | range_n) <= 1) { + /* No thread pool used: execute task sequentially on the calling thread */ + struct fpu_state saved_fpu_state = { 0 }; + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + saved_fpu_state = get_fpu_state(); + disable_fpu_denormals(); + } + for (size_t i = 0; i < range_i; i++) { + for (size_t j = 0; j < range_j; j++) { + for (size_t k = 0; k < range_k; k++) { + for (size_t l = 0; l < range_l; l++) { + for (size_t m = 0; m < range_m; m++) { + for (size_t n = 0; n < range_n; n++) { + task(argument, i, j, k, l, m, n); + } + } + } + } + } + } + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + set_fpu_state(saved_fpu_state); + } + } else { + const size_t range_lmn = range_l * range_m * range_n; + const size_t range = range_i * range_j * range_k * range_lmn; + const struct pthreadpool_6d_params params = { + .range_l = range_l, + .range_j = fxdiv_init_size_t(range_j), + .range_k = fxdiv_init_size_t(range_k), + .range_lmn = fxdiv_init_size_t(range_lmn), + .range_m = fxdiv_init_size_t(range_m), + .range_n = fxdiv_init_size_t(range_n), + }; + thread_function_t parallelize_6d = &thread_parallelize_6d; + #if PTHREADPOOL_USE_FASTPATH + const size_t range_threshold = -threads_count; + if (range < range_threshold) { + parallelize_6d = &pthreadpool_thread_parallelize_6d_fastpath; + } + #endif + pthreadpool_parallelize( + threadpool, parallelize_6d, ¶ms, sizeof(params), + task, argument, range, flags); + } +} + +void pthreadpool_parallelize_6d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_6d_tile_1d_t task, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_n, + uint32_t flags) +{ + size_t threads_count; + if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k | range_l | range_m) <= 1 && range_n <= tile_n)) { + /* No thread pool used: execute task sequentially on the calling thread */ + struct fpu_state saved_fpu_state = { 0 }; + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + saved_fpu_state = get_fpu_state(); + disable_fpu_denormals(); + } + for (size_t i = 0; i < range_i; i++) { + for (size_t j = 0; j < range_j; j++) { + for (size_t k = 0; k < range_k; k++) { + for (size_t l = 0; l < range_l; l++) { + for (size_t m = 0; m < range_m; m++) { + for (size_t n = 0; n < range_n; n += tile_n) { + task(argument, i, j, k, l, m, n, min(range_n - n, tile_n)); + } + } + } + } + } + } + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + set_fpu_state(saved_fpu_state); + } + } else { + const size_t tile_range_n = divide_round_up(range_n, tile_n); + const size_t tile_range_lmn = range_l * range_m * tile_range_n; + const size_t tile_range = range_i * range_j * range_k * tile_range_lmn; + const struct pthreadpool_6d_tile_1d_params params = { + .range_l = range_l, + .range_n = range_n, + .tile_n = tile_n, + .range_j = fxdiv_init_size_t(range_j), + .range_k = fxdiv_init_size_t(range_k), + .tile_range_lmn = fxdiv_init_size_t(tile_range_lmn), + .range_m = fxdiv_init_size_t(range_m), + .tile_range_n = fxdiv_init_size_t(tile_range_n), + }; + thread_function_t parallelize_6d_tile_1d = &thread_parallelize_6d_tile_1d; + #if PTHREADPOOL_USE_FASTPATH + const size_t range_threshold = -threads_count; + if (tile_range < range_threshold) { + parallelize_6d_tile_1d = &pthreadpool_thread_parallelize_6d_tile_1d_fastpath; + } + #endif + pthreadpool_parallelize( + threadpool, parallelize_6d_tile_1d, ¶ms, sizeof(params), + task, argument, tile_range, flags); + } +} + void pthreadpool_parallelize_6d_tile_2d( pthreadpool_t threadpool, pthreadpool_task_6d_tile_2d_t task, diff --git a/src/shim.c b/src/shim.c index e90ac45..39ec884 100644 --- a/src/shim.c +++ b/src/shim.c @@ -383,6 +383,61 @@ void pthreadpool_parallelize_5d_tile_2d( } } +void pthreadpool_parallelize_6d( + pthreadpool_t threadpool, + pthreadpool_task_6d_t task, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + uint32_t flags) +{ + for (size_t i = 0; i < range_i; i++) { + for (size_t j = 0; j < range_j; j++) { + for (size_t k = 0; k < range_k; k++) { + for (size_t l = 0; l < range_l; l++) { + for (size_t m = 0; m < range_m; m++) { + for (size_t n = 0; n < range_n; n++) { + task(argument, i, j, k, l, m, n); + } + } + } + } + } + } +} + +void pthreadpool_parallelize_6d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_6d_tile_1d_t task, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_n, + uint32_t flags) +{ + for (size_t i = 0; i < range_i; i++) { + for (size_t j = 0; j < range_j; j++) { + for (size_t k = 0; k < range_k; k++) { + for (size_t l = 0; l < range_l; l++) { + for (size_t m = 0; m < range_m; m++) { + for (size_t n = 0; n < range_n; n += tile_n) { + task(argument, i, j, k, l, m, n, min(range_n - n, tile_n)); + } + } + } + } + } + } +} + void pthreadpool_parallelize_6d_tile_2d( pthreadpool_t threadpool, pthreadpool_task_6d_tile_2d_t task, diff --git a/src/threadpool-object.h b/src/threadpool-object.h index 9870e8a..590dc96 100644 --- a/src/threadpool-object.h +++ b/src/threadpool-object.h @@ -476,6 +476,68 @@ struct pthreadpool_5d_tile_2d_params { struct fxdiv_divisor_size_t tile_range_m; }; +struct pthreadpool_6d_params { + /** + * Copy of the range_l argument passed to the pthreadpool_parallelize_6d function. + */ + size_t range_l; + /** + * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_6d function. + */ + struct fxdiv_divisor_size_t range_j; + /** + * FXdiv divisor for the range_k argument passed to the pthreadpool_parallelize_6d function. + */ + struct fxdiv_divisor_size_t range_k; + /** + * FXdiv divisor for the range_l * range_m * range_n value. + */ + struct fxdiv_divisor_size_t range_lmn; + /** + * FXdiv divisor for the range_m argument passed to the pthreadpool_parallelize_6d function. + */ + struct fxdiv_divisor_size_t range_m; + /** + * FXdiv divisor for the range_n argument passed to the pthreadpool_parallelize_6d function. + */ + struct fxdiv_divisor_size_t range_n; +}; + +struct pthreadpool_6d_tile_1d_params { + /** + * Copy of the range_l argument passed to the pthreadpool_parallelize_6d_tile_1d function. + */ + size_t range_l; + /** + * Copy of the range_n argument passed to the pthreadpool_parallelize_6d_tile_1d function. + */ + size_t range_n; + /** + * Copy of the tile_n argument passed to the pthreadpool_parallelize_6d_tile_1d function. + */ + size_t tile_n; + /** + * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_6d_tile_1d function. + */ + struct fxdiv_divisor_size_t range_j; + /** + * FXdiv divisor for the range_k argument passed to the pthreadpool_parallelize_6d_tile_1d function. + */ + struct fxdiv_divisor_size_t range_k; + /** + * FXdiv divisor for the range_l * range_m * divide_round_up(range_n, tile_n) value. + */ + struct fxdiv_divisor_size_t tile_range_lmn; + /** + * FXdiv divisor for the range_m argument passed to the pthreadpool_parallelize_6d_tile_1d function. + */ + struct fxdiv_divisor_size_t range_m; + /** + * FXdiv divisor for the divide_round_up(range_n, tile_n) value. + */ + struct fxdiv_divisor_size_t tile_range_n; +}; + struct pthreadpool_6d_tile_2d_params { /** * Copy of the range_k argument passed to the pthreadpool_parallelize_6d_tile_2d function. @@ -575,6 +637,8 @@ struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool { struct pthreadpool_5d_params parallelize_5d; struct pthreadpool_5d_tile_1d_params parallelize_5d_tile_1d; struct pthreadpool_5d_tile_2d_params parallelize_5d_tile_2d; + struct pthreadpool_6d_params parallelize_6d; + struct pthreadpool_6d_tile_1d_params parallelize_6d_tile_1d; struct pthreadpool_6d_tile_2d_params parallelize_6d_tile_2d; } params; /** @@ -735,6 +799,14 @@ PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_tile_2d_fastpath( struct pthreadpool* threadpool, struct thread_info* thread); +PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_fastpath( + struct pthreadpool* threadpool, + struct thread_info* thread); + +PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_tile_1d_fastpath( + struct pthreadpool* threadpool, + struct thread_info* thread); + PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_tile_2d_fastpath( struct pthreadpool* threadpool, struct thread_info* thread); diff --git a/test/pthreadpool.cc b/test/pthreadpool.cc index f822506..c9592ec 100644 --- a/test/pthreadpool.cc +++ b/test/pthreadpool.cc @@ -68,6 +68,19 @@ const size_t kParallelize5DTile2DRangeL = 23; const size_t kParallelize5DTile2DRangeM = 29; const size_t kParallelize5DTile2DTileL = 3; const size_t kParallelize5DTile2DTileM = 2; +const size_t kParallelize6DRangeI = 3; +const size_t kParallelize6DRangeJ = 5; +const size_t kParallelize6DRangeK = 7; +const size_t kParallelize6DRangeL = 11; +const size_t kParallelize6DRangeM = 13; +const size_t kParallelize6DRangeN = 17; +const size_t kParallelize6DTile1DRangeI = 5; +const size_t kParallelize6DTile1DRangeJ = 7; +const size_t kParallelize6DTile1DRangeK = 11; +const size_t kParallelize6DTile1DRangeL = 13; +const size_t kParallelize6DTile1DRangeM = 17; +const size_t kParallelize6DTile1DRangeN = 19; +const size_t kParallelize6DTile1DTileN = 5; const size_t kParallelize6DTile2DRangeI = 7; const size_t kParallelize6DTile2DRangeJ = 11; const size_t kParallelize6DTile2DRangeK = 13; @@ -6267,6 +6280,724 @@ TEST(Parallelize5DTile2D, MultiThreadPoolWorkStealing) { EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DTile2DRangeI * kParallelize5DTile2DRangeJ * kParallelize5DTile2DRangeK * kParallelize5DTile2DRangeL * kParallelize5DTile2DRangeM); } +static void ComputeNothing6D(void*, size_t, size_t, size_t, size_t, size_t, size_t) { +} + +TEST(Parallelize6D, SingleThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_6d(threadpool.get(), + ComputeNothing6D, + nullptr, + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); +} + +TEST(Parallelize6D, MultiThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d( + threadpool.get(), + ComputeNothing6D, + nullptr, + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); +} + +static void CheckBounds6D(void*, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) { + EXPECT_LT(i, kParallelize6DRangeI); + EXPECT_LT(j, kParallelize6DRangeJ); + EXPECT_LT(k, kParallelize6DRangeK); + EXPECT_LT(l, kParallelize6DRangeL); + EXPECT_LT(m, kParallelize6DRangeM); + EXPECT_LT(n, kParallelize6DRangeN); +} + +TEST(Parallelize6D, SingleThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_6d( + threadpool.get(), + CheckBounds6D, + nullptr, + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); +} + +TEST(Parallelize6D, MultiThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d( + threadpool.get(), + CheckBounds6D, + nullptr, + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); +} + +static void SetTrue6D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) { + const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n; + processed_indicators[linear_idx].store(true, std::memory_order_relaxed); +} + +TEST(Parallelize6D, SingleThreadPoolAllItemsProcessed) { + std::vector indicators(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_6d( + threadpool.get(), + reinterpret_cast(SetTrue6D), + static_cast(indicators.data()), + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize6DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n; + EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed)) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") not processed"; + } + } + } + } + } + } +} + +TEST(Parallelize6D, MultiThreadPoolAllItemsProcessed) { + std::vector indicators(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d( + threadpool.get(), + reinterpret_cast(SetTrue6D), + static_cast(indicators.data()), + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize6DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n; + EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed)) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") not processed"; + } + } + } + } + } + } +} + +static void Increment6D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) { + const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n; + processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed); +} + +TEST(Parallelize6D, SingleThreadPoolEachItemProcessedOnce) { + std::vector counters(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_6d( + threadpool.get(), + reinterpret_cast(Increment6D), + static_cast(counters.data()), + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize6DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)"; + } + } + } + } + } + } +} + +TEST(Parallelize6D, MultiThreadPoolEachItemProcessedOnce) { + std::vector counters(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d( + threadpool.get(), + reinterpret_cast(Increment6D), + static_cast(counters.data()), + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize6DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)"; + } + } + } + } + } + } +} + +TEST(Parallelize6D, SingleThreadPoolEachItemProcessedMultipleTimes) { + std::vector counters(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + for (size_t iteration = 0; iteration < kIncrementIterations6D; iteration++) { + pthreadpool_parallelize_6d( + threadpool.get(), + reinterpret_cast(Increment6D), + static_cast(counters.data()), + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize6DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations6D) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations6D << ")"; + } + } + } + } + } + } +} + +TEST(Parallelize6D, MultiThreadPoolEachItemProcessedMultipleTimes) { + std::vector counters(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + for (size_t iteration = 0; iteration < kIncrementIterations6D; iteration++) { + pthreadpool_parallelize_6d( + threadpool.get(), + reinterpret_cast(Increment6D), + static_cast(counters.data()), + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize6DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations6D) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations6D << ")"; + } + } + } + } + } + } +} + +static void IncrementSame6D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) { + num_processed_items->fetch_add(1, std::memory_order_relaxed); +} + +TEST(Parallelize6D, MultiThreadPoolHighContention) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d( + threadpool.get(), + reinterpret_cast(IncrementSame6D), + static_cast(&num_processed_items), + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN); +} + +static void WorkImbalance6D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) { + num_processed_items->fetch_add(1, std::memory_order_relaxed); + if (i == 0 && j == 0 && k == 0 && l == 0 && m == 0 && n == 0) { + /* Spin-wait until all items are computed */ + while (num_processed_items->load(std::memory_order_relaxed) != kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN) { + std::atomic_thread_fence(std::memory_order_acquire); + } + } +} + +TEST(Parallelize6D, MultiThreadPoolWorkStealing) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d( + threadpool.get(), + reinterpret_cast(WorkImbalance6D), + static_cast(&num_processed_items), + kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN); +} + +static void ComputeNothing6DTile1D(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t) { +} + +TEST(Parallelize6DTile1D, SingleThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_6d_tile_1d(threadpool.get(), + ComputeNothing6DTile1D, + nullptr, + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); +} + +TEST(Parallelize6DTile1D, MultiThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + ComputeNothing6DTile1D, + nullptr, + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); +} + +static void CheckBounds6DTile1D(void*, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) { + EXPECT_LT(i, kParallelize6DTile1DRangeI); + EXPECT_LT(j, kParallelize6DTile1DRangeJ); + EXPECT_LT(k, kParallelize6DTile1DRangeK); + EXPECT_LT(l, kParallelize6DTile1DRangeL); + EXPECT_LT(m, kParallelize6DTile1DRangeM); + EXPECT_LT(start_n, kParallelize6DTile1DRangeN); + EXPECT_LE(start_n + tile_n, kParallelize6DTile1DRangeN); +} + +TEST(Parallelize6DTile1D, SingleThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + CheckBounds6DTile1D, + nullptr, + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); +} + +TEST(Parallelize6DTile1D, MultiThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + CheckBounds6DTile1D, + nullptr, + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); +} + +static void CheckTiling6DTile1D(void*, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) { + EXPECT_GT(tile_n, 0); + EXPECT_LE(tile_n, kParallelize6DTile1DTileN); + EXPECT_EQ(start_n % kParallelize6DTile1DTileN, 0); + EXPECT_EQ(tile_n, std::min(kParallelize6DTile1DTileN, kParallelize6DTile1DRangeN - start_n)); +} + +TEST(Parallelize6DTile1D, SingleThreadPoolUniformTiling) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + CheckTiling6DTile1D, + nullptr, + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); +} + +TEST(Parallelize6DTile1D, MultiThreadPoolUniformTiling) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + CheckTiling6DTile1D, + nullptr, + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); +} + +static void SetTrue6DTile1D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) { + for (size_t n = start_n; n < start_n + tile_n; n++) { + const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n; + processed_indicators[linear_idx].store(true, std::memory_order_relaxed); + } +} + +TEST(Parallelize6DTile1D, SingleThreadPoolAllItemsProcessed) { + std::vector indicators(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + reinterpret_cast(SetTrue6DTile1D), + static_cast(indicators.data()), + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n; + EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed)) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") not processed"; + } + } + } + } + } + } +} + +TEST(Parallelize6DTile1D, MultiThreadPoolAllItemsProcessed) { + std::vector indicators(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + reinterpret_cast(SetTrue6DTile1D), + static_cast(indicators.data()), + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n; + EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed)) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") not processed"; + } + } + } + } + } + } +} + +static void Increment6DTile1D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) { + for (size_t n = start_n; n < start_n + tile_n; n++) { + const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n; + processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed); + } +} + +TEST(Parallelize6DTile1D, SingleThreadPoolEachItemProcessedOnce) { + std::vector counters(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + reinterpret_cast(Increment6DTile1D), + static_cast(counters.data()), + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)"; + } + } + } + } + } + } +} + +TEST(Parallelize6DTile1D, MultiThreadPoolEachItemProcessedOnce) { + std::vector counters(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + reinterpret_cast(Increment6DTile1D), + static_cast(counters.data()), + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)"; + } + } + } + } + } + } +} + +TEST(Parallelize6DTile1D, SingleThreadPoolEachItemProcessedMultipleTimes) { + std::vector counters(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + for (size_t iteration = 0; iteration < kIncrementIterations6D; iteration++) { + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + reinterpret_cast(Increment6DTile1D), + static_cast(counters.data()), + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations6D) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations6D << ")"; + } + } + } + } + } + } +} + +TEST(Parallelize6DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) { + std::vector counters(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + for (size_t iteration = 0; iteration < kIncrementIterations6D; iteration++) { + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + reinterpret_cast(Increment6DTile1D), + static_cast(counters.data()), + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) { + for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) { + for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) { + for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) { + for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) { + for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) { + const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations6D) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations6D << ")"; + } + } + } + } + } + } +} + +static void IncrementSame6DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) { + for (size_t n = start_n; n < start_n + tile_n; n++) { + num_processed_items->fetch_add(1, std::memory_order_relaxed); + } +} + +TEST(Parallelize6DTile1D, MultiThreadPoolHighContention) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + reinterpret_cast(IncrementSame6DTile1D), + static_cast(&num_processed_items), + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN); +} + +static void WorkImbalance6DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) { + num_processed_items->fetch_add(tile_n, std::memory_order_relaxed); + if (i == 0 && j == 0 && k == 0 && l == 0 && m == 0 && start_n == 0) { + /* Spin-wait until all items are computed */ + while (num_processed_items->load(std::memory_order_relaxed) != kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN) { + std::atomic_thread_fence(std::memory_order_acquire); + } + } +} + +TEST(Parallelize6DTile1D, MultiThreadPoolWorkStealing) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_6d_tile_1d( + threadpool.get(), + reinterpret_cast(WorkImbalance6DTile1D), + static_cast(&num_processed_items), + kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN, + kParallelize6DTile1DTileN, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN); +} + static void ComputeNothing6DTile2D(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t) { } -- cgit v1.2.3