diff options
Diffstat (limited to 'src/portable-api.c')
-rw-r--r-- | src/portable-api.c | 271 |
1 files changed, 271 insertions, 0 deletions
diff --git a/src/portable-api.c b/src/portable-api.c index ef36578..42d0369 100644 --- a/src/portable-api.c +++ b/src/portable-api.c @@ -1002,6 +1002,153 @@ static void thread_parallelize_5d_tile_2d(struct pthreadpool* threadpool, struct pthreadpool_fence_release(); } +static void thread_parallelize_6d(struct pthreadpool* threadpool, struct thread_info* thread) { + assert(threadpool != NULL); + assert(thread != NULL); + + const pthreadpool_task_6d_t task = (pthreadpool_task_6d_t) pthreadpool_load_relaxed_void_p(&threadpool->task); + void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument); + + /* Process thread's own range of items */ + const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start); + const struct fxdiv_divisor_size_t range_lmn = threadpool->params.parallelize_6d.range_lmn; + const struct fxdiv_result_size_t index_ijk_lmn = fxdiv_divide_size_t(range_start, range_lmn); + const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_6d.range_k; + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lmn.quotient, range_k); + const struct fxdiv_divisor_size_t range_n = threadpool->params.parallelize_6d.range_n; + const struct fxdiv_result_size_t index_lm_n = fxdiv_divide_size_t(index_ijk_lmn.remainder, range_n); + const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d.range_j; + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_6d.range_m; + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_lm_n.quotient, range_m); + size_t i = index_i_j.quotient; + size_t j = index_i_j.remainder; + size_t k = index_ij_k.remainder; + size_t l = index_l_m.quotient; + size_t m = index_l_m.remainder; + size_t n = index_lm_n.remainder; + + const size_t range_l = threadpool->params.parallelize_6d.range_l; + while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) { + task(argument, i, j, k, l, m, n); + if (++n == range_n.value) { + n = 0; + if (++m == range_m.value) { + m = 0; + if (++l == range_l) { + l = 0; + if (++k == range_k.value) { + k = 0; + if (++j == range_j.value) { + j = 0; + i += 1; + } + } + } + } + } + } + + + /* There still may be other threads with work */ + const size_t thread_number = thread->thread_number; + const size_t threads_count = threadpool->threads_count.value; + for (size_t tid = modulo_decrement(thread_number, threads_count); + tid != thread_number; + tid = modulo_decrement(tid, threads_count)) + { + struct thread_info* other_thread = &threadpool->threads[tid]; + while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) { + const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end); + const struct fxdiv_result_size_t index_ijk_lmn = fxdiv_divide_size_t(linear_index, range_lmn); + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lmn.quotient, range_k); + const struct fxdiv_result_size_t index_lm_n = fxdiv_divide_size_t(index_ijk_lmn.remainder, range_n); + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_lm_n.quotient, range_m); + task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder, index_lm_n.remainder); + } + } + + /* Make changes by this thread visible to other threads */ + pthreadpool_fence_release(); +} + +static void thread_parallelize_6d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) { + assert(threadpool != NULL); + assert(thread != NULL); + + const pthreadpool_task_6d_tile_1d_t task = (pthreadpool_task_6d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task); + void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument); + + /* Process thread's own range of items */ + const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start); + const struct fxdiv_divisor_size_t tile_range_lmn = threadpool->params.parallelize_6d_tile_1d.tile_range_lmn; + const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(range_start, tile_range_lmn); + const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_6d_tile_1d.range_k; + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k); + const struct fxdiv_divisor_size_t tile_range_n = threadpool->params.parallelize_6d_tile_1d.tile_range_n; + const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n); + const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d_tile_1d.range_j; + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_6d_tile_1d.range_m; + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, range_m); + const size_t tile_n = threadpool->params.parallelize_6d_tile_1d.tile_n; + size_t i = index_i_j.quotient; + size_t j = index_i_j.remainder; + size_t k = index_ij_k.remainder; + size_t l = index_l_m.quotient; + size_t m = index_l_m.remainder; + size_t start_n = tile_index_lm_n.remainder * tile_n; + + const size_t range_n = threadpool->params.parallelize_6d_tile_1d.range_n; + const size_t range_l = threadpool->params.parallelize_6d_tile_1d.range_l; + while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) { + task(argument, i, j, k, l, m, start_n, min(range_n - start_n, tile_n)); + start_n += tile_n; + if (start_n >= range_n) { + start_n = 0; + if (++m == range_m.value) { + m = 0; + if (++l == range_l) { + l = 0; + if (++k == range_k.value) { + k = 0; + if (++j == range_j.value) { + j = 0; + i += 1; + } + } + } + } + } + } + + + /* There still may be other threads with work */ + const size_t thread_number = thread->thread_number; + const size_t threads_count = threadpool->threads_count.value; + for (size_t tid = modulo_decrement(thread_number, threads_count); + tid != thread_number; + tid = modulo_decrement(tid, threads_count)) + { + struct thread_info* other_thread = &threadpool->threads[tid]; + while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) { + const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end); + const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(linear_index, tile_range_lmn); + const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k); + const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n); + const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j); + const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, range_m); + const size_t start_n = tile_index_lm_n.remainder * tile_n; + task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder, + start_n, min(range_n - start_n, tile_n)); + } + } + + /* Make changes by this thread visible to other threads */ + pthreadpool_fence_release(); +} + static void thread_parallelize_6d_tile_2d(struct pthreadpool* threadpool, struct thread_info* thread) { assert(threadpool != NULL); assert(thread != NULL); @@ -2043,6 +2190,130 @@ void pthreadpool_parallelize_5d_tile_2d( } } +void pthreadpool_parallelize_6d( + pthreadpool_t threadpool, + pthreadpool_task_6d_t task, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + uint32_t flags) +{ + size_t threads_count; + if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j | range_k | range_l | range_m | range_n) <= 1) { + /* No thread pool used: execute task sequentially on the calling thread */ + struct fpu_state saved_fpu_state = { 0 }; + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + saved_fpu_state = get_fpu_state(); + disable_fpu_denormals(); + } + for (size_t i = 0; i < range_i; i++) { + for (size_t j = 0; j < range_j; j++) { + for (size_t k = 0; k < range_k; k++) { + for (size_t l = 0; l < range_l; l++) { + for (size_t m = 0; m < range_m; m++) { + for (size_t n = 0; n < range_n; n++) { + task(argument, i, j, k, l, m, n); + } + } + } + } + } + } + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + set_fpu_state(saved_fpu_state); + } + } else { + const size_t range_lmn = range_l * range_m * range_n; + const size_t range = range_i * range_j * range_k * range_lmn; + const struct pthreadpool_6d_params params = { + .range_l = range_l, + .range_j = fxdiv_init_size_t(range_j), + .range_k = fxdiv_init_size_t(range_k), + .range_lmn = fxdiv_init_size_t(range_lmn), + .range_m = fxdiv_init_size_t(range_m), + .range_n = fxdiv_init_size_t(range_n), + }; + thread_function_t parallelize_6d = &thread_parallelize_6d; + #if PTHREADPOOL_USE_FASTPATH + const size_t range_threshold = -threads_count; + if (range < range_threshold) { + parallelize_6d = &pthreadpool_thread_parallelize_6d_fastpath; + } + #endif + pthreadpool_parallelize( + threadpool, parallelize_6d, ¶ms, sizeof(params), + task, argument, range, flags); + } +} + +void pthreadpool_parallelize_6d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_6d_tile_1d_t task, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_n, + uint32_t flags) +{ + size_t threads_count; + if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k | range_l | range_m) <= 1 && range_n <= tile_n)) { + /* No thread pool used: execute task sequentially on the calling thread */ + struct fpu_state saved_fpu_state = { 0 }; + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + saved_fpu_state = get_fpu_state(); + disable_fpu_denormals(); + } + for (size_t i = 0; i < range_i; i++) { + for (size_t j = 0; j < range_j; j++) { + for (size_t k = 0; k < range_k; k++) { + for (size_t l = 0; l < range_l; l++) { + for (size_t m = 0; m < range_m; m++) { + for (size_t n = 0; n < range_n; n += tile_n) { + task(argument, i, j, k, l, m, n, min(range_n - n, tile_n)); + } + } + } + } + } + } + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + set_fpu_state(saved_fpu_state); + } + } else { + const size_t tile_range_n = divide_round_up(range_n, tile_n); + const size_t tile_range_lmn = range_l * range_m * tile_range_n; + const size_t tile_range = range_i * range_j * range_k * tile_range_lmn; + const struct pthreadpool_6d_tile_1d_params params = { + .range_l = range_l, + .range_n = range_n, + .tile_n = tile_n, + .range_j = fxdiv_init_size_t(range_j), + .range_k = fxdiv_init_size_t(range_k), + .tile_range_lmn = fxdiv_init_size_t(tile_range_lmn), + .range_m = fxdiv_init_size_t(range_m), + .tile_range_n = fxdiv_init_size_t(tile_range_n), + }; + thread_function_t parallelize_6d_tile_1d = &thread_parallelize_6d_tile_1d; + #if PTHREADPOOL_USE_FASTPATH + const size_t range_threshold = -threads_count; + if (tile_range < range_threshold) { + parallelize_6d_tile_1d = &pthreadpool_thread_parallelize_6d_tile_1d_fastpath; + } + #endif + pthreadpool_parallelize( + threadpool, parallelize_6d_tile_1d, ¶ms, sizeof(params), + task, argument, tile_range, flags); + } +} + void pthreadpool_parallelize_6d_tile_2d( pthreadpool_t threadpool, pthreadpool_task_6d_tile_2d_t task, |