aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHaibo Huang <hhb@google.com>2020-04-03 19:40:05 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2020-04-03 19:40:05 +0000
commit3e87b1089e1a3b48e99fe57be895752333b73eb8 (patch)
tree9d2475b6e71d04d43b684326efe4b5dd235617b2
parent90984e8dd4cd2df62b8b5b95d51825f022395666 (diff)
parent797ab0c0c583d50a616f05ef0850feef60148496 (diff)
downloadplatform_external_pthreadpool-3e87b1089e1a3b48e99fe57be895752333b73eb8.tar.gz
platform_external_pthreadpool-3e87b1089e1a3b48e99fe57be895752333b73eb8.tar.bz2
platform_external_pthreadpool-3e87b1089e1a3b48e99fe57be895752333b73eb8.zip
Upgrade pthreadpool to 76042155a8b1e189c8f141429fd72219472c32e1 am: 597e92a6f0 am: 797ab0c0c5
Change-Id: I22fd730182bbf24b82078c0a9a68cab76899a904
-rw-r--r--CMakeLists.txt32
-rw-r--r--METADATA14
-rw-r--r--README.md8
-rw-r--r--bench/latency.cc5
-rw-r--r--cmake/DownloadCpuinfo.cmake15
-rw-r--r--include/pthreadpool.h594
-rw-r--r--src/threadpool-atomics.h178
-rw-r--r--src/threadpool-pthreads.c883
-rw-r--r--src/threadpool-shim.c83
-rw-r--r--test/pthreadpool.cc1790
10 files changed, 3368 insertions, 234 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 714325a..79a17a1 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -39,6 +39,21 @@ IF(NOT DEFINED FXDIV_SOURCE_DIR)
SET(FXDIV_SOURCE_DIR "${CMAKE_BINARY_DIR}/FXdiv-source" CACHE STRING "FXdiv source directory")
ENDIF()
+IF(CMAKE_SYSTEM_NAME MATCHES "^(Linux|Android)$" AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv[5-8].*|aarch64)$")
+ IF(NOT DEFINED CPUINFO_SOURCE_DIR)
+ MESSAGE(STATUS "Downloading cpuinfo to ${CMAKE_BINARY_DIR}/cpuinfo-source (define CPUINFO_SOURCE_DIR to avoid it)")
+ CONFIGURE_FILE(cmake/DownloadCpuinfo.cmake "${CMAKE_BINARY_DIR}/cpuinfo-download/CMakeLists.txt")
+ EXECUTE_PROCESS(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
+ WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/cpuinfo-download")
+ EXECUTE_PROCESS(COMMAND "${CMAKE_COMMAND}" --build .
+ WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/cpuinfo-download")
+ SET(CPUINFO_SOURCE_DIR "${CMAKE_BINARY_DIR}/cpuinfo-source" CACHE STRING "cpuinfo source directory")
+ ENDIF()
+ SET(PTHREADPOOL_USE_CPUINFO ON)
+ELSE()
+ SET(PTHREADPOOL_USE_CPUINFO OFF)
+ENDIF()
+
IF(PTHREADPOOL_BUILD_TESTS AND NOT DEFINED GOOGLETEST_SOURCE_DIR)
MESSAGE(STATUS "Downloading Google Test to ${CMAKE_BINARY_DIR}/googletest-source (define GOOGLETEST_SOURCE_DIR to avoid it)")
CONFIGURE_FILE(cmake/DownloadGoogleTest.cmake "${CMAKE_BINARY_DIR}/googletest-download/CMakeLists.txt")
@@ -122,6 +137,23 @@ IF(NOT TARGET fxdiv)
ENDIF()
TARGET_LINK_LIBRARIES(pthreadpool PRIVATE fxdiv)
+# ---[ Configure cpuinfo
+IF(PTHREADPOOL_USE_CPUINFO)
+ IF(NOT TARGET cpuinfo)
+ SET(CPUINFO_BUILD_TOOLS OFF CACHE BOOL "")
+ SET(CPUINFO_BUILD_UNIT_TESTS OFF CACHE BOOL "")
+ SET(CPUINFO_BUILD_MOCK_TESTS OFF CACHE BOOL "")
+ SET(CPUINFO_BUILD_BENCHMARKS OFF CACHE BOOL "")
+ ADD_SUBDIRECTORY(
+ "${CPUINFO_SOURCE_DIR}"
+ "${CMAKE_BINARY_DIR}/cpuinfo")
+ ENDIF()
+ TARGET_LINK_LIBRARIES(pthreadpool PRIVATE cpuinfo)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_CPUINFO=1)
+ELSE()
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_CPUINFO=0)
+ENDIF()
+
INSTALL(TARGETS pthreadpool
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
diff --git a/METADATA b/METADATA
index 4df42c6..ed792c8 100644
--- a/METADATA
+++ b/METADATA
@@ -1,9 +1,5 @@
name: "pthreadpool"
-description:
- "pthreadpool is a portable and efficient thread pool implementation. It "
- "provides similar functionality to #pragma omp parallel for, but with "
- "additional features."
-
+description: "pthreadpool is a portable and efficient thread pool implementation. It provides similar functionality to #pragma omp parallel for, but with additional features."
third_party {
url {
type: HOMEPAGE
@@ -13,7 +9,11 @@ third_party {
type: GIT
value: "https://github.com/Maratyszcza/pthreadpool"
}
- version: "d465747660ecf9ebbaddf8c3db37e4a13d0c9103"
- last_upgrade_date { year: 2020 month: 2 day: 3 }
+ version: "76042155a8b1e189c8f141429fd72219472c32e1"
license_type: NOTICE
+ last_upgrade_date {
+ year: 2020
+ month: 4
+ day: 1
+ }
}
diff --git a/README.md b/README.md
index 3faafaa..164aab5 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ It provides similar functionality to `#pragma omp parallel for`, but with additi
* Run on user-specified or auto-detected number of threads.
* Work-stealing scheduling for efficient work balancing.
* Wait-free synchronization of work items.
-* Compatible with Linux (including Android), macOS, iOS, Emscripten, Native Client environments.
+* Compatible with Linux (including Android), macOS, iOS, MinGW, Emscripten environments.
* 100% unit tests coverage.
* Throughput and latency microbenchmarks.
@@ -35,17 +35,17 @@ int main() {
pthreadpool_t threadpool = pthreadpool_create(0);
assert(threadpool != NULL);
-
+
const size_t threads_count = pthreadpool_get_threads_count(threadpool);
printf("Created thread pool with %zu threads\n", threads_count);
struct array_addition_context context = { augend, addend, sum };
pthreadpool_parallelize_1d(threadpool,
(pthreadpool_task_1d_t) add_arrays,
- (void**) &context,
+ (void*) &context,
ARRAY_SIZE,
PTHREADPOOL_FLAG_DISABLE_DENORMALS /* flags */);
-
+
pthreadpool_destroy(threadpool);
threadpool = NULL;
diff --git a/bench/latency.cc b/bench/latency.cc
index f500cdf..4fb59ee 100644
--- a/bench/latency.cc
+++ b/bench/latency.cc
@@ -1,12 +1,11 @@
#include <benchmark/benchmark.h>
-#include <unistd.h>
-
#include <pthreadpool.h>
+#include <thread>
static void SetNumberOfThreads(benchmark::internal::Benchmark* benchmark) {
- const int max_threads = sysconf(_SC_NPROCESSORS_ONLN);
+ const int max_threads = std::thread::hardware_concurrency();
for (int t = 1; t <= max_threads; t++) {
benchmark->Arg(t);
}
diff --git a/cmake/DownloadCpuinfo.cmake b/cmake/DownloadCpuinfo.cmake
new file mode 100644
index 0000000..25213a0
--- /dev/null
+++ b/cmake/DownloadCpuinfo.cmake
@@ -0,0 +1,15 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 3.5 FATAL_ERROR)
+
+PROJECT(cpuinfo-download NONE)
+
+INCLUDE(ExternalProject)
+ExternalProject_Add(cpuinfo
+ URL https://github.com/pytorch/cpuinfo/archive/0cc563acb9baac39f2c1349bc42098c4a1da59e3.tar.gz
+ URL_HASH SHA256=80625d0b69a3d69b70c2236f30db2c542d0922ccf9bb51a61bc39c49fac91a35
+ SOURCE_DIR "${CMAKE_BINARY_DIR}/cpuinfo-source"
+ BINARY_DIR "${CMAKE_BINARY_DIR}/cpuinfo"
+ CONFIGURE_COMMAND ""
+ BUILD_COMMAND ""
+ INSTALL_COMMAND ""
+ TEST_COMMAND ""
+)
diff --git a/include/pthreadpool.h b/include/pthreadpool.h
index 2443285..de4016b 100644
--- a/include/pthreadpool.h
+++ b/include/pthreadpool.h
@@ -16,95 +16,450 @@ typedef void (*pthreadpool_task_4d_tile_2d_t)(void*, size_t, size_t, size_t, siz
typedef void (*pthreadpool_task_5d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_6d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_1d_with_id_t)(void*, uint32_t, size_t);
+typedef void (*pthreadpool_task_2d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_3d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_4d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t, size_t);
+
+/**
+ * Disable support for denormalized numbers to the maximum extent possible for
+ * the duration of the computation.
+ *
+ * Handling denormalized floating-point numbers is often implemented in
+ * microcode, and incurs significant performance degradation. This hint
+ * instructs the thread pool to disable support for denormalized numbers before
+ * running the computation by manipulating architecture-specific control
+ * registers, and restore the initial value of control registers after the
+ * computation is complete. The thread pool temporary disables denormalized
+ * numbers on all threads involved in the computation (i.e. the caller threads,
+ * and potentially worker threads).
+ *
+ * Disabling denormalized numbers may have a small negative effect on results'
+ * accuracy. As various architectures differ in capabilities to control
+ * processing of denormalized numbers, using this flag may also hurt results'
+ * reproducibility across different instruction set architectures.
+ */
#define PTHREADPOOL_FLAG_DISABLE_DENORMALS 0x00000001
+/**
+ * Yield worker threads to the system scheduler after the operation is finished.
+ *
+ * Force workers to use kernel wait (instead of active spin-wait by default) for
+ * new commands after this command is processed. This flag affects only the
+ * immediate next operation on this thread pool. To make the thread pool always
+ * use kernel wait, pass this flag to all parallelization functions.
+ */
+#define PTHREADPOOL_FLAG_YIELD_WORKERS 0x00000002
+
#ifdef __cplusplus
extern "C" {
#endif
/**
- * Creates a thread pool with the specified number of threads.
+ * Create a thread pool with the specified number of threads.
*
- * @param[in] threads_count The number of threads in the thread pool.
- * A value of 0 has special interpretation: it creates a thread for each
- * processor core available in the system.
+ * @param threads_count the number of threads in the thread pool.
+ * A value of 0 has special interpretation: it creates a thread pool with as
+ * many threads as there are logical processors in the system.
*
- * @returns A pointer to an opaque thread pool object.
- * On error the function returns NULL and sets errno accordingly.
+ * @returns A pointer to an opaque thread pool object if the call is
+ * successful, or NULL pointer if the call failed.
*/
pthreadpool_t pthreadpool_create(size_t threads_count);
/**
- * Queries the number of threads in a thread pool.
+ * Query the number of threads in a thread pool.
*
- * @param[in] threadpool The thread pool to query.
+ * @param threadpool the thread pool to query.
*
* @returns The number of threads in the thread pool.
*/
size_t pthreadpool_get_threads_count(pthreadpool_t threadpool);
/**
- * Processes items in parallel using threads from a thread pool.
+ * Process items on a 1D grid.
*
- * When the call returns, all items have been processed and the thread pool is
- * ready for a new task.
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range; i++)
+ * function(context, i);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
- * @param[in] threadpool The thread pool to use for parallelisation.
- * @param[in] function The function to call for each item.
- * @param[in] argument The first argument passed to the @a function.
- * @param[in] items The number of items to process. The @a function
- * will be called once for each item.
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each item.
+ * @param context the first argument passed to the specified function.
+ * @param range the number of items on the 1D grid to process. The
+ * specified function will be called once for each item.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_1d(
pthreadpool_t threadpool,
pthreadpool_task_1d_t function,
- void* argument,
+ void* context,
+ size_t range,
+ uint32_t flags);
+
+/**
+ * Process items on a 1D grid using a microarchitecture-aware task function.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * uint32_t uarch_index = cpuinfo_initialize() ?
+ * cpuinfo_get_current_uarch_index() : default_uarch_index;
+ * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
+ * for (size_t i = 0; i < range; i++)
+ * function(context, uarch_index, i);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If
+ * threadpool is NULL, all items are processed serially on the calling
+ * thread.
+ * @param function the function to call for each item.
+ * @param context the first argument passed to the specified
+ * function.
+ * @param default_uarch_index the microarchitecture index to use when
+ * pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
+ * or index returned by cpuinfo_get_current_uarch_index() exceeds the
+ * max_uarch_index value.
+ * @param max_uarch_index the maximum microarchitecture index expected by
+ * the specified function. If the index returned by
+ * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
+ * will be used instead. default_uarch_index can exceed max_uarch_index.
+ * @param range the number of items on the 1D grid to process.
+ * The specified function will be called once for each item.
+ * @param flags a bitwise combination of zero or more optional
+ * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
+ * PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_1d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_1d_with_id_t function,
+ void* context,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
size_t range,
uint32_t flags);
+/**
+ * Process items on a 1D grid with specified maximum tile size.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range; i += tile)
+ * function(context, i, min(range - i, tile));
+ *
+ * When the call returns, all items have been processed and the thread pool is
+ * ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool,
+ * the calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range the number of items on the 1D grid to process.
+ * @param tile the maximum number of items on the 1D grid to process in
+ * one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_1d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_1d_tile_1d_t function,
- void* argument,
+ void* context,
size_t range,
size_t tile,
uint32_t flags);
+/**
+ * Process items on a 2D grid.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * function(context, i, j);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each item.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 2D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 2D grid.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_2d(
pthreadpool_t threadpool,
pthreadpool_task_2d_t function,
- void* argument,
+ void* context,
size_t range_i,
size_t range_j,
uint32_t flags);
+/**
+ * Process items on a 2D grid with the specified maximum tile size along the
+ * last grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j += tile_j)
+ * function(context, i, j, min(range_j - j, tile_j));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 2D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 2D grid.
+ * @param tile_j the maximum number of items along the second dimension of
+ * the 2D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_2d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_2d_tile_1d_t function,
- void* argument,
+ void* context,
size_t range_i,
size_t range_j,
size_t tile_j,
uint32_t flags);
+/**
+ * Process items on a 2D grid with the specified maximum tile size along each
+ * grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i += tile_i)
+ * for (size_t j = 0; j < range_j; j += tile_j)
+ * function(context, i, j,
+ * min(range_i - i, tile_i), min(range_j - j, tile_j));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 2D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 2D grid.
+ * @param tile_j the maximum number of items along the first dimension of
+ * the 2D grid to process in one function call.
+ * @param tile_j the maximum number of items along the second dimension of
+ * the 2D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_2d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_2d_tile_2d_t function,
- void* argument,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t tile_i,
+ size_t tile_j,
+ uint32_t flags);
+
+/**
+ * Process items on a 2D grid with the specified maximum tile size along each
+ * grid dimension using a microarchitecture-aware task function.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * uint32_t uarch_index = cpuinfo_initialize() ?
+ * cpuinfo_get_current_uarch_index() : default_uarch_index;
+ * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
+ * for (size_t i = 0; i < range_i; i += tile_i)
+ * for (size_t j = 0; j < range_j; j += tile_j)
+ * function(context, uarch_index, i, j,
+ * min(range_i - i, tile_i), min(range_j - j, tile_j));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If
+ * threadpool is NULL, all items are processed serially on the calling
+ * thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified
+ * function.
+ * @param default_uarch_index the microarchitecture index to use when
+ * pthreadpool is configured without cpuinfo,
+ * cpuinfo initialization failed, or index returned
+ * by cpuinfo_get_current_uarch_index() exceeds
+ * the max_uarch_index value.
+ * @param max_uarch_index the maximum microarchitecture index expected
+ * by the specified function. If the index returned
+ * by cpuinfo_get_current_uarch_index() exceeds this
+ * value, default_uarch_index will be used instead.
+ * default_uarch_index can exceed max_uarch_index.
+ * @param range_i the number of items to process along the first
+ * dimension of the 2D grid.
+ * @param range_j the number of items to process along the second
+ * dimension of the 2D grid.
+ * @param tile_j the maximum number of items along the first
+ * dimension of the 2D grid to process in one function call.
+ * @param tile_j the maximum number of items along the second
+ * dimension of the 2D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional
+ * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
+ * PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_tile_2d_with_id_t function,
+ void* context,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t tile_i,
size_t tile_j,
uint32_t flags);
+/**
+ * Process items on a 3D grid with the specified maximum tile size along the
+ * last two grid dimensions.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j += tile_j)
+ * for (size_t k = 0; k < range_k; k += tile_k)
+ * function(context, i, j, k,
+ * min(range_j - j, tile_j), min(range_k - k, tile_k));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 3D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 3D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 3D grid.
+ * @param tile_j the maximum number of items along the second dimension of
+ * the 3D grid to process in one function call.
+ * @param tile_k the maximum number of items along the third dimension of
+ * the 3D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_3d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_3d_tile_2d_t function,
- void* argument,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_j,
+ size_t tile_k,
+ uint32_t flags);
+
+/**
+ * Process items on a 3D grid with the specified maximum tile size along the
+ * last two grid dimensions using a microarchitecture-aware task function.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * uint32_t uarch_index = cpuinfo_initialize() ?
+ * cpuinfo_get_current_uarch_index() : default_uarch_index;
+ * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j += tile_j)
+ * for (size_t k = 0; k < range_k; k += tile_k)
+ * function(context, uarch_index, i, j, k,
+ * min(range_j - j, tile_j), min(range_k - k, tile_k));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If
+ * threadpool is NULL, all items are processed serially on the calling
+ * thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified
+ * function.
+ * @param default_uarch_index the microarchitecture index to use when
+ * pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
+ * or index returned by cpuinfo_get_current_uarch_index() exceeds the
+ * max_uarch_index value.
+ * @param max_uarch_index the maximum microarchitecture index expected by
+ * the specified function. If the index returned by
+ * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
+ * will be used instead. default_uarch_index can exceed max_uarch_index.
+ * @param range_i the number of items to process along the first
+ * dimension of the 3D grid.
+ * @param range_j the number of items to process along the second
+ * dimension of the 3D grid.
+ * @param range_k the number of items to process along the third
+ * dimension of the 3D grid.
+ * @param tile_j the maximum number of items along the second
+ * dimension of the 3D grid to process in one function call.
+ * @param tile_k the maximum number of items along the third
+ * dimension of the 3D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional
+ * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
+ * PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_2d_with_id_t function,
+ void* context,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t range_k,
@@ -112,10 +467,114 @@ void pthreadpool_parallelize_3d_tile_2d(
size_t tile_k,
uint32_t flags);
+/**
+ * Process items on a 4D grid with the specified maximum tile size along the
+ * last two grid dimensions.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k += tile_k)
+ * for (size_t l = 0; l < range_l; l += tile_l)
+ * function(context, i, j, k, l,
+ * min(range_k - k, tile_k), min(range_l - l, tile_l));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 4D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 4D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 4D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 4D grid.
+ * @param tile_k the maximum number of items along the third dimension of
+ * the 4D grid to process in one function call.
+ * @param tile_l the maximum number of items along the fourth dimension of
+ * the 4D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_4d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_4d_tile_2d_t function,
- void* argument,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_k,
+ size_t tile_l,
+ uint32_t flags);
+
+/**
+ * Process items on a 4D grid with the specified maximum tile size along the
+ * last two grid dimensions using a microarchitecture-aware task function.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * uint32_t uarch_index = cpuinfo_initialize() ?
+ * cpuinfo_get_current_uarch_index() : default_uarch_index;
+ * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k += tile_k)
+ * for (size_t l = 0; l < range_l; l += tile_l)
+ * function(context, uarch_index, i, j, k, l,
+ * min(range_k - k, tile_k), min(range_l - l, tile_l));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If
+ * threadpool is NULL, all items are processed serially on the calling
+ * thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified
+ * function.
+ * @param default_uarch_index the microarchitecture index to use when
+ * pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
+ * or index returned by cpuinfo_get_current_uarch_index() exceeds the
+ * max_uarch_index value.
+ * @param max_uarch_index the maximum microarchitecture index expected by
+ * the specified function. If the index returned by
+ * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
+ * will be used instead. default_uarch_index can exceed max_uarch_index.
+ * @param range_i the number of items to process along the first
+ * dimension of the 4D grid.
+ * @param range_j the number of items to process along the second
+ * dimension of the 4D grid.
+ * @param range_k the number of items to process along the third
+ * dimension of the 4D grid.
+ * @param range_l the number of items to process along the fourth
+ * dimension of the 4D grid.
+ * @param tile_k the maximum number of items along the third
+ * dimension of the 4D grid to process in one function call.
+ * @param tile_l the maximum number of items along the fourth
+ * dimension of the 4D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional
+ * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
+ * PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_2d_with_id_t function,
+ void* context,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t range_k,
@@ -124,10 +583,51 @@ void pthreadpool_parallelize_4d_tile_2d(
size_t tile_l,
uint32_t flags);
+/**
+ * Process items on a 5D grid with the specified maximum tile size along the
+ * last two grid dimensions.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l += tile_l)
+ * for (size_t m = 0; m < range_m; m += tile_m)
+ * function(context, i, j, k, l, m,
+ * min(range_l - l, tile_l), min(range_m - m, tile_m));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 5D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 5D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 5D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 5D grid.
+ * @param range_m the number of items to process along the fifth dimension
+ * of the 5D grid.
+ * @param tile_l the maximum number of items along the fourth dimension of
+ * the 5D grid to process in one function call.
+ * @param tile_m the maximum number of items along the fifth dimension of
+ * the 5D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_5d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_5d_tile_2d_t function,
- void* argument,
+ void* context,
size_t range_i,
size_t range_j,
size_t range_k,
@@ -137,10 +637,54 @@ void pthreadpool_parallelize_5d_tile_2d(
size_t tile_m,
uint32_t flags);
+/**
+ * Process items on a 6D grid with the specified maximum tile size along the
+ * last two grid dimensions.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l++)
+ * for (size_t m = 0; m < range_m; m += tile_m)
+ * for (size_t n = 0; n < range_n; n += tile_n)
+ * function(context, i, j, k, l, m, n,
+ * min(range_m - m, tile_m), min(range_n - n, tile_n));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 6D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 6D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 6D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 6D grid.
+ * @param range_m the number of items to process along the fifth dimension
+ * of the 6D grid.
+ * @param range_n the number of items to process along the sixth dimension
+ * of the 6D grid.
+ * @param tile_m the maximum number of items along the fifth dimension of
+ * the 6D grid to process in one function call.
+ * @param tile_n the maximum number of items along the sixth dimension of
+ * the 6D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_6d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_6d_tile_2d_t function,
- void* argument,
+ void* context,
size_t range_i,
size_t range_j,
size_t range_k,
diff --git a/src/threadpool-atomics.h b/src/threadpool-atomics.h
new file mode 100644
index 0000000..92fcd8d
--- /dev/null
+++ b/src/threadpool-atomics.h
@@ -0,0 +1,178 @@
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#if defined(__wasm__) && defined(__EMSCRIPTEN_PTHREADS__) && defined(__clang__)
+ /*
+ * Clang for WebAssembly target lacks stdatomic.h header,
+ * even though it supports the necessary low-level intrinsics.
+ * Thus, we implement pthreadpool atomic functions on top of
+ * low-level Clang-specific interfaces for this target.
+ */
+
+ typedef _Atomic(uint32_t) pthreadpool_atomic_uint32_t;
+ typedef _Atomic(size_t) pthreadpool_atomic_size_t;
+ typedef _Atomic(void*) pthreadpool_atomic_void_p;
+
+ static inline uint32_t pthreadpool_load_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return __c11_atomic_load(address, __ATOMIC_RELAXED);
+ }
+
+ static inline size_t pthreadpool_load_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return __c11_atomic_load(address, __ATOMIC_RELAXED);
+ }
+
+ static inline void* pthreadpool_load_relaxed_void_p(
+ pthreadpool_atomic_void_p* address)
+ {
+ return __c11_atomic_load(address, __ATOMIC_RELAXED);
+ }
+
+ static inline void pthreadpool_store_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ __c11_atomic_store(address, value, __ATOMIC_RELAXED);
+ }
+
+ static inline void pthreadpool_store_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ __c11_atomic_store(address, value, __ATOMIC_RELAXED);
+ }
+
+ static inline void pthreadpool_store_relaxed_void_p(
+ pthreadpool_atomic_void_p* address,
+ void* value)
+ {
+ __c11_atomic_store(address, value, __ATOMIC_RELAXED);
+ }
+
+ static inline void pthreadpool_store_release_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ __c11_atomic_store(address, value, __ATOMIC_RELEASE);
+ }
+
+ static inline void pthreadpool_store_release_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ __c11_atomic_store(address, value, __ATOMIC_RELEASE);
+ }
+
+ static inline uint32_t pthreadpool_fetch_sub_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ uint32_t decrement)
+ {
+ return __c11_atomic_fetch_sub(address, decrement, __ATOMIC_RELAXED);
+ }
+
+ static inline bool pthreadpool_compare_exchange_weak_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t* expected_value,
+ size_t new_value)
+ {
+ return __c11_atomic_compare_exchange_weak(
+ address, expected_value, new_value, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ }
+
+ static inline void pthreadpool_fence_acquire() {
+ __c11_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ }
+
+ static inline void pthreadpool_fence_release() {
+ __c11_atomic_thread_fence(__ATOMIC_RELEASE);
+ }
+#else
+ #include <stdatomic.h>
+
+ typedef _Atomic(uint32_t) pthreadpool_atomic_uint32_t;
+ typedef _Atomic(size_t) pthreadpool_atomic_size_t;
+ typedef _Atomic(void*) pthreadpool_atomic_void_p;
+
+ static inline uint32_t pthreadpool_load_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return atomic_load_explicit(address, memory_order_relaxed);
+ }
+
+ static inline size_t pthreadpool_load_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return atomic_load_explicit(address, memory_order_relaxed);
+ }
+
+ static inline void* pthreadpool_load_relaxed_void_p(
+ pthreadpool_atomic_void_p* address)
+ {
+ return atomic_load_explicit(address, memory_order_relaxed);
+ }
+
+ static inline void pthreadpool_store_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ atomic_store_explicit(address, value, memory_order_relaxed);
+ }
+
+ static inline void pthreadpool_store_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ atomic_store_explicit(address, value, memory_order_relaxed);
+ }
+
+ static inline void pthreadpool_store_relaxed_void_p(
+ pthreadpool_atomic_void_p* address,
+ void* value)
+ {
+ atomic_store_explicit(address, value, memory_order_relaxed);
+ }
+
+ static inline void pthreadpool_store_release_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ atomic_store_explicit(address, value, memory_order_release);
+ }
+
+ static inline void pthreadpool_store_release_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ atomic_store_explicit(address, value, memory_order_release);
+ }
+
+ static inline uint32_t pthreadpool_fetch_sub_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ uint32_t decrement)
+ {
+ return atomic_fetch_sub_explicit(address, decrement, memory_order_relaxed);
+ }
+
+ static inline bool pthreadpool_compare_exchange_weak_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t* expected_value,
+ size_t new_value)
+ {
+ return atomic_compare_exchange_weak_explicit(
+ address, expected_value, new_value, memory_order_relaxed, memory_order_relaxed);
+ }
+
+ static inline void pthreadpool_fence_acquire() {
+ atomic_thread_fence(memory_order_acquire);
+ }
+
+ static inline void pthreadpool_fence_release() {
+ atomic_thread_fence(memory_order_release);
+ }
+#endif
diff --git a/src/threadpool-pthreads.c b/src/threadpool-pthreads.c
index 6c6a6d4..0a9c06d 100644
--- a/src/threadpool-pthreads.c
+++ b/src/threadpool-pthreads.c
@@ -1,5 +1,5 @@
/* Standard C headers */
-#include <stdatomic.h>
+#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
@@ -9,10 +9,27 @@
#include <pthread.h>
#include <unistd.h>
-/* Futex-specific headers */
+#ifndef PTHREADPOOL_USE_CPUINFO
+ #define PTHREADPOOL_USE_CPUINFO 0
+#endif
+
#ifndef PTHREADPOOL_USE_FUTEX
#if defined(__linux__)
#define PTHREADPOOL_USE_FUTEX 1
+ #elif defined(__EMSCRIPTEN__)
+ #define PTHREADPOOL_USE_FUTEX 1
+ #else
+ #define PTHREADPOOL_USE_FUTEX 0
+ #endif
+#endif
+
+#if PTHREADPOOL_USE_CPUINFO
+ #include <cpuinfo.h>
+#endif
+
+/* Futex-specific headers */
+#if PTHREADPOOL_USE_FUTEX
+ #if defined(__linux__)
#include <sys/syscall.h>
#include <linux/futex.h>
@@ -23,14 +40,22 @@
#ifndef FUTEX_PRIVATE_FLAG
#define FUTEX_PRIVATE_FLAG 128
#endif
- #elif defined(__native_client__)
- #define PTHREADPOOL_USE_FUTEX 1
- #include <irt.h>
+ #elif defined(__EMSCRIPTEN__)
+ /* math.h for INFINITY constant */
+ #include <math.h>
+
+ #include <emscripten/threading.h>
#else
- #define PTHREADPOOL_USE_FUTEX 0
+ #error "Platform-specific implementation of futex_wait and futex_wake_all required"
#endif
#endif
+#ifdef _WIN32
+ #define NOMINMAX
+ #include <malloc.h>
+ #include <sysinfoapi.h>
+#endif
+
/* Dependencies */
#include <fxdiv.h>
@@ -39,6 +64,7 @@
/* Internal headers */
#include "threadpool-utils.h"
+#include "threadpool-atomics.h"
/* Number of iterations in spin-wait loop before going into futex/mutex wait */
#define PTHREADPOOL_SPIN_WAIT_ITERATIONS 1000000
@@ -83,27 +109,20 @@ static inline size_t min(size_t a, size_t b) {
#if PTHREADPOOL_USE_FUTEX
#if defined(__linux__)
- static int futex_wait(_Atomic uint32_t* address, uint32_t value) {
+ static int futex_wait(pthreadpool_atomic_uint32_t* address, uint32_t value) {
return syscall(SYS_futex, address, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, NULL);
}
- static int futex_wake_all(_Atomic uint32_t* address) {
+ static int futex_wake_all(pthreadpool_atomic_uint32_t* address) {
return syscall(SYS_futex, address, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT_MAX);
}
- #elif defined(__native_client__)
- static struct nacl_irt_futex nacl_irt_futex = { 0 };
- static pthread_once_t nacl_init_guard = PTHREAD_ONCE_INIT;
- static void nacl_init(void) {
- nacl_interface_query(NACL_IRT_FUTEX_v0_1, &nacl_irt_futex, sizeof(nacl_irt_futex));
+ #elif defined(__EMSCRIPTEN__)
+ static int futex_wait(pthreadpool_atomic_uint32_t* address, uint32_t value) {
+ return emscripten_futex_wait((volatile void*) address, value, INFINITY);
}
- static int futex_wait(_Atomic uint32_t* address, uint32_t value) {
- return nacl_irt_futex.futex_wait_abs((_Atomic int*) address, (int) value, NULL);
- }
-
- static int futex_wake_all(_Atomic uint32_t* address) {
- int count;
- return nacl_irt_futex.futex_wake((_Atomic int*) address, INT_MAX, &count);
+ static int futex_wake_all(pthreadpool_atomic_uint32_t* address) {
+ return emscripten_futex_wake((volatile void*) address, INT_MAX);
}
#else
#error "Platform-specific implementation of futex_wait and futex_wake_all required"
@@ -114,7 +133,7 @@ static inline size_t min(size_t a, size_t b) {
enum threadpool_command {
threadpool_command_init,
- threadpool_command_compute_1d,
+ threadpool_command_parallelize,
threadpool_command_shutdown,
};
@@ -123,19 +142,19 @@ struct PTHREADPOOL_CACHELINE_ALIGNED thread_info {
* Index of the first element in the work range.
* Before processing a new element the owning worker thread increments this value.
*/
- atomic_size_t range_start;
+ pthreadpool_atomic_size_t range_start;
/**
* Index of the element after the last element of the work range.
* Before processing a new element the stealing worker thread decrements this value.
*/
- atomic_size_t range_end;
+ pthreadpool_atomic_size_t range_end;
/**
* The number of elements in the work range.
* Due to race conditions range_length <= range_end - range_start.
* The owning worker thread must decrement this value before incrementing @a range_start.
* The stealing worker thread must decrement this value before decrementing @a range_end.
*/
- atomic_size_t range_length;
+ pthreadpool_atomic_size_t range_length;
/**
* Thread number in the 0..threads_count-1 range.
*/
@@ -153,11 +172,22 @@ struct PTHREADPOOL_CACHELINE_ALIGNED thread_info {
PTHREADPOOL_STATIC_ASSERT(sizeof(struct thread_info) % PTHREADPOOL_CACHELINE_SIZE == 0, "thread_info structure must occupy an integer number of cache lines (64 bytes)");
+struct pthreadpool_1d_with_uarch_params {
+ /**
+ * Copy of the default uarch index argument passed to a microarchitecture-aware parallelization function.
+ */
+ uint32_t default_uarch_index;
+ /**
+ * Copy of the max uarch index argument passed to a microarchitecture-aware parallelization function.
+ */
+ uint32_t max_uarch_index;
+};
+
struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool {
/**
* The number of threads that are processing an operation.
*/
- atomic_size_t active_threads;
+ pthreadpool_atomic_size_t active_threads;
#if PTHREADPOOL_USE_FUTEX
/**
* Indicates if there are active threads.
@@ -165,24 +195,35 @@ struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool {
* - has_active_threads == 0 if active_threads == 0
* - has_active_threads == 1 if active_threads != 0
*/
- _Atomic uint32_t has_active_threads;
+ pthreadpool_atomic_uint32_t has_active_threads;
#endif
/**
* The last command submitted to the thread pool.
*/
- _Atomic uint32_t command;
+ pthreadpool_atomic_uint32_t command;
+ /**
+ * The entry point function to call for each thread in the thread pool for parallelization tasks.
+ */
+ pthreadpool_atomic_void_p thread_function;
/**
* The function to call for each item.
*/
- void *_Atomic task;
+ pthreadpool_atomic_void_p task;
/**
* The first argument to the item processing function.
*/
- void *_Atomic argument;
+ pthreadpool_atomic_void_p argument;
+ /**
+ * Additional parallelization parameters.
+ * These parameters are specific for each thread_function.
+ */
+ union {
+ struct pthreadpool_1d_with_uarch_params parallelize_1d_with_uarch;
+ } params;
/**
- * Copy of the flags passed to parallelization function.
+ * Copy of the flags passed to a parallelization function.
*/
- _Atomic uint32_t flags;
+ pthreadpool_atomic_uint32_t flags;
/**
* Serializes concurrent calls to @a pthreadpool_parallelize_* from different threads.
*/
@@ -205,8 +246,14 @@ struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool {
*/
pthread_cond_t command_condvar;
#endif
+#if PTHREADPOOL_USE_CPUINFO
+ /**
+ * Indication whether cpuinfo library initialized successfully. Never changes after pthreadpool_create.
+ */
+ bool cpuinfo_is_initialized;
+#endif
/**
- * The number of threads in the thread pool. Never changes after initialization.
+ * The number of threads in the thread pool. Never changes after pthreadpool_create.
*/
size_t threads_count;
/**
@@ -219,13 +266,13 @@ PTHREADPOOL_STATIC_ASSERT(sizeof(struct pthreadpool) % PTHREADPOOL_CACHELINE_SIZ
static void checkin_worker_thread(struct pthreadpool* threadpool) {
#if PTHREADPOOL_USE_FUTEX
- if (atomic_fetch_sub_explicit(&threadpool->active_threads, 1, memory_order_relaxed) == 1) {
- atomic_store_explicit(&threadpool->has_active_threads, 0, memory_order_release);
+ if (pthreadpool_fetch_sub_relaxed_size_t(&threadpool->active_threads, 1) == 1) {
+ pthreadpool_store_relaxed_uint32_t(&threadpool->has_active_threads, 0);
futex_wake_all(&threadpool->has_active_threads);
}
#else
pthread_mutex_lock(&threadpool->completion_mutex);
- if (atomic_fetch_sub_explicit(&threadpool->active_threads, 1, memory_order_relaxed) == 1) {
+ if (pthreadpool_fetch_sub_relaxed_size_t(&threadpool->active_threads, 1) == 1) {
pthread_cond_signal(&threadpool->completion_condvar);
}
pthread_mutex_unlock(&threadpool->completion_mutex);
@@ -235,12 +282,12 @@ static void checkin_worker_thread(struct pthreadpool* threadpool) {
static void wait_worker_threads(struct pthreadpool* threadpool) {
/* Initial check */
#if PTHREADPOOL_USE_FUTEX
- uint32_t has_active_threads = atomic_load_explicit(&threadpool->has_active_threads, memory_order_relaxed);
+ uint32_t has_active_threads = pthreadpool_load_relaxed_uint32_t(&threadpool->has_active_threads);
if (has_active_threads == 0) {
return;
}
#else
- size_t active_threads = atomic_load_explicit(&threadpool->active_threads, memory_order_relaxed);
+ size_t active_threads = pthreadpool_load_relaxed_size_t(&threadpool->active_threads);
if (active_threads == 0) {
return;
}
@@ -249,15 +296,15 @@ static void wait_worker_threads(struct pthreadpool* threadpool) {
/* Spin-wait */
for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
/* This fence serves as a sleep instruction */
- atomic_thread_fence(memory_order_acquire);
+ pthreadpool_fence_acquire();
#if PTHREADPOOL_USE_FUTEX
- has_active_threads = atomic_load_explicit(&threadpool->has_active_threads, memory_order_relaxed);
+ has_active_threads = pthreadpool_load_relaxed_uint32_t(&threadpool->has_active_threads);
if (has_active_threads == 0) {
return;
}
#else
- active_threads = atomic_load_explicit(&threadpool->active_threads, memory_order_relaxed);
+ active_threads = pthreadpool_load_relaxed_size_t(&threadpool->active_threads);
if (active_threads == 0) {
return;
}
@@ -266,31 +313,41 @@ static void wait_worker_threads(struct pthreadpool* threadpool) {
/* Fall-back to mutex/futex wait */
#if PTHREADPOOL_USE_FUTEX
- while ((has_active_threads = atomic_load(&threadpool->has_active_threads)) != 0) {
+ while ((has_active_threads = pthreadpool_load_relaxed_uint32_t(&threadpool->has_active_threads)) != 0) {
futex_wait(&threadpool->has_active_threads, 1);
}
#else
pthread_mutex_lock(&threadpool->completion_mutex);
- while (atomic_load_explicit(&threadpool->active_threads, memory_order_relaxed) != 0) {
+ while (pthreadpool_load_relaxed_size_t(&threadpool->active_threads) != 0) {
pthread_cond_wait(&threadpool->completion_condvar, &threadpool->completion_mutex);
};
pthread_mutex_unlock(&threadpool->completion_mutex);
#endif
}
-inline static bool atomic_decrement(atomic_size_t* value) {
- size_t actual_value = atomic_load_explicit(value, memory_order_relaxed);
- if (actual_value == 0) {
- return false;
- }
- while (!atomic_compare_exchange_weak_explicit(
- value, &actual_value, actual_value - 1, memory_order_relaxed, memory_order_relaxed))
- {
+inline static bool atomic_decrement(pthreadpool_atomic_size_t* value) {
+ #if defined(__clang__) && (defined(__arm__) || defined(__aarch64__))
+ size_t actual_value;
+ do {
+ actual_value = __builtin_arm_ldrex((const volatile size_t*) value);
+ if (actual_value == 0) {
+ __builtin_arm_clrex();
+ return false;
+ }
+ } while (__builtin_arm_strex(actual_value - 1, (volatile size_t*) value) != 0);
+ return true;
+ #else
+ size_t actual_value = pthreadpool_load_relaxed_size_t(value);
if (actual_value == 0) {
return false;
}
- }
- return true;
+ while (!pthreadpool_compare_exchange_weak_relaxed_size_t(value, &actual_value, actual_value - 1)) {
+ if (actual_value == 0) {
+ return false;
+ }
+ }
+ return true;
+ #endif
}
inline static size_t modulo_decrement(uint32_t i, uint32_t n) {
@@ -302,11 +359,13 @@ inline static size_t modulo_decrement(uint32_t i, uint32_t n) {
return i - 1;
}
+typedef void (*thread_function_t)(struct pthreadpool* threadpool, struct thread_info* thread);
+
static void thread_parallelize_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
- const pthreadpool_task_1d_t task = (pthreadpool_task_1d_t) atomic_load_explicit(&threadpool->task, memory_order_relaxed);
- void *const argument = atomic_load_explicit(&threadpool->argument, memory_order_relaxed);
+ const pthreadpool_task_1d_t task = (pthreadpool_task_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
/* Process thread's own range of items */
- size_t range_start = atomic_load_explicit(&thread->range_start, memory_order_relaxed);
+ size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
while (atomic_decrement(&thread->range_length)) {
task(argument, range_start++);
}
@@ -320,53 +379,94 @@ static void thread_parallelize_1d(struct pthreadpool* threadpool, struct thread_
{
struct thread_info* other_thread = &threadpool->threads[tid];
while (atomic_decrement(&other_thread->range_length)) {
- const size_t item_id = atomic_fetch_sub_explicit(&other_thread->range_end, 1, memory_order_relaxed) - 1;
+ const size_t item_id = pthreadpool_fetch_sub_relaxed_size_t(&other_thread->range_end, 1) - 1;
task(argument, item_id);
}
}
- atomic_thread_fence(memory_order_release);
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_1d_with_uarch(struct pthreadpool* threadpool, struct thread_info* thread) {
+ const pthreadpool_task_1d_with_id_t task = (pthreadpool_task_1d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_1d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ if (threadpool && threadpool->cpuinfo_is_initialized) {
+ uarch_index = cpuinfo_get_current_uarch_index();
+ if (uarch_index > threadpool->params.parallelize_1d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ }
+ #endif
+
+ /* Process thread's own range of items */
+ size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ while (atomic_decrement(&thread->range_length)) {
+ task(argument, uarch_index, range_start++);
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (atomic_decrement(&other_thread->range_length)) {
+ const size_t item_id = pthreadpool_fetch_sub_relaxed_size_t(&other_thread->range_end, 1) - 1;
+ task(argument, uarch_index, item_id);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
}
static uint32_t wait_for_new_command(
struct pthreadpool* threadpool,
- uint32_t last_command)
+ uint32_t last_command,
+ uint32_t last_flags)
{
- uint32_t command = atomic_load_explicit(&threadpool->command, memory_order_relaxed);
+ uint32_t command = pthreadpool_load_relaxed_uint32_t(&threadpool->command);
if (command != last_command) {
- atomic_thread_fence(memory_order_acquire);
return command;
}
- /* Spin-wait loop */
- for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
- /* This fence serves as a sleep instruction */
- atomic_thread_fence(memory_order_acquire);
+ if ((last_flags & PTHREADPOOL_FLAG_YIELD_WORKERS) == 0) {
+ /* Spin-wait loop */
+ for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
+ /* This fence serves as a sleep instruction */
+ pthreadpool_fence_acquire();
- command = atomic_load_explicit(&threadpool->command, memory_order_relaxed);
- if (command != last_command) {
- atomic_thread_fence(memory_order_acquire);
- return command;
+ command = pthreadpool_load_relaxed_uint32_t(&threadpool->command);
+ if (command != last_command) {
+ return command;
+ }
}
}
- /* Spin-wait timed out, fall back to mutex/futex wait */
+ /* Spin-wait disabled or timed out, fall back to mutex/futex wait */
#if PTHREADPOOL_USE_FUTEX
do {
futex_wait(&threadpool->command, last_command);
- command = atomic_load_explicit(&threadpool->command, memory_order_relaxed);
+ command = pthreadpool_load_relaxed_uint32_t(&threadpool->command);
} while (command == last_command);
#else
/* Lock the command mutex */
pthread_mutex_lock(&threadpool->command_mutex);
/* Read the command */
- while ((command = atomic_load_explicit(&threadpool->command, memory_order_relaxed)) == last_command) {
+ while ((command = pthreadpool_load_relaxed_uint32_t(&threadpool->command)) == last_command) {
/* Wait for new command */
pthread_cond_wait(&threadpool->command_condvar, &threadpool->command_mutex);
}
/* Read a new command */
pthread_mutex_unlock(&threadpool->command_mutex);
#endif
- atomic_thread_fence(memory_order_acquire);
return command;
}
@@ -375,24 +475,30 @@ static void* thread_main(void* arg) {
struct pthreadpool* threadpool = ((struct pthreadpool*) (thread - thread->thread_number)) - 1;
uint32_t last_command = threadpool_command_init;
struct fpu_state saved_fpu_state = { 0 };
+ uint32_t flags = 0;
/* Check in */
checkin_worker_thread(threadpool);
/* Monitor new commands and act accordingly */
for (;;) {
- uint32_t command = wait_for_new_command(threadpool, last_command);
- const uint32_t flags = atomic_load_explicit(&threadpool->flags, memory_order_relaxed);
+ uint32_t command = wait_for_new_command(threadpool, last_command, flags);
+ pthreadpool_fence_acquire();
+
+ flags = pthreadpool_load_relaxed_uint32_t(&threadpool->flags);
/* Process command */
switch (command & THREADPOOL_COMMAND_MASK) {
- case threadpool_command_compute_1d:
+ case threadpool_command_parallelize:
{
+ const thread_function_t thread_function =
+ (thread_function_t) pthreadpool_load_relaxed_void_p(&threadpool->thread_function);
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
saved_fpu_state = get_fpu_state();
disable_fpu_denormals();
}
- thread_parallelize_1d(threadpool, thread);
+
+ thread_function(threadpool, thread);
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
set_fpu_state(saved_fpu_state);
}
@@ -424,6 +530,11 @@ static struct pthreadpool* pthreadpool_allocate(size_t threads_count) {
if (threadpool == NULL) {
return NULL;
}
+ #elif defined(_WIN32)
+ threadpool = _aligned_malloc(threadpool_size, PTHREADPOOL_CACHELINE_SIZE);
+ if (threadpool == NULL) {
+ return NULL;
+ }
#else
if (posix_memalign((void**) &threadpool, PTHREADPOOL_CACHELINE_SIZE, threadpool_size) != 0) {
return NULL;
@@ -434,13 +545,25 @@ static struct pthreadpool* pthreadpool_allocate(size_t threads_count) {
}
struct pthreadpool* pthreadpool_create(size_t threads_count) {
-#if defined(__native_client__)
- pthread_once(&nacl_init_guard, nacl_init);
-#endif
-
if (threads_count == 0) {
- threads_count = (size_t) sysconf(_SC_NPROCESSORS_ONLN);
+ #if defined(_SC_NPROCESSORS_ONLN)
+ threads_count = (size_t) sysconf(_SC_NPROCESSORS_ONLN);
+ #if defined(__EMSCRIPTEN_PTHREADS__)
+ /* Limit the number of threads to 8 to match link-time PTHREAD_POOL_SIZE option */
+ if (threads_count >= 8) {
+ threads_count = 8;
+ }
+ #endif
+ #elif defined(_WIN32)
+ SYSTEM_INFO system_info;
+ ZeroMemory(&system_info, sizeof(system_info));
+ GetSystemInfo(&system_info);
+ threads_count = (size_t) system_info.dwNumberOfProcessors;
+ #else
+ #error "Unsupported platform"
+ #endif
}
+
struct pthreadpool* threadpool = pthreadpool_allocate(threads_count);
if (threadpool == NULL) {
return NULL;
@@ -449,6 +572,9 @@ struct pthreadpool* pthreadpool_create(size_t threads_count) {
for (size_t tid = 0; tid < threads_count; tid++) {
threadpool->threads[tid].thread_number = tid;
}
+ #if PTHREADPOOL_USE_CPUINFO
+ threadpool->cpuinfo_is_initialized = cpuinfo_initialize();
+ #endif
/* Thread pool with a single thread computes everything on the caller thread. */
if (threads_count > 1) {
@@ -461,10 +587,9 @@ struct pthreadpool* pthreadpool_create(size_t threads_count) {
#endif
#if PTHREADPOOL_USE_FUTEX
- atomic_store_explicit(&threadpool->has_active_threads, 1, memory_order_relaxed);
+ pthreadpool_store_relaxed_uint32_t(&threadpool->has_active_threads, 1);
#endif
- atomic_store_explicit(
- &threadpool->active_threads, threadpool->threads_count - 1 /* caller thread */, memory_order_release);
+ pthreadpool_store_release_size_t(&threadpool->active_threads, threads_count - 1 /* caller thread */);
/* Caller thread serves as worker #0. Thus, we create system threads starting with worker #1. */
for (size_t tid = 1; tid < threads_count; tid++) {
@@ -485,6 +610,114 @@ size_t pthreadpool_get_threads_count(struct pthreadpool* threadpool) {
}
}
+static void pthreadpool_parallelize(
+ struct pthreadpool* threadpool,
+ thread_function_t thread_function,
+ const void* params,
+ size_t params_size,
+ void* task,
+ void* context,
+ size_t linear_range,
+ uint32_t flags)
+{
+ assert(threadpool != NULL);
+ assert(thread_function != NULL);
+ assert(task != NULL);
+ assert(linear_range > 1);
+
+ /* Protect the global threadpool structures */
+ pthread_mutex_lock(&threadpool->execution_mutex);
+
+ #if !PTHREADPOOL_USE_FUTEX
+ /* Lock the command variables to ensure that threads don't start processing before they observe complete command with all arguments */
+ pthread_mutex_lock(&threadpool->command_mutex);
+ #endif
+
+ /* Setup global arguments */
+ pthreadpool_store_relaxed_void_p(&threadpool->thread_function, (void*) thread_function);
+ pthreadpool_store_relaxed_void_p(&threadpool->task, task);
+ pthreadpool_store_relaxed_void_p(&threadpool->argument, context);
+ pthreadpool_store_relaxed_uint32_t(&threadpool->flags, flags);
+
+ /* Locking of completion_mutex not needed: readers are sleeping on command_condvar */
+ const size_t threads_count = threadpool->threads_count;
+ pthreadpool_store_relaxed_size_t(&threadpool->active_threads, threads_count - 1 /* caller thread */);
+ #if PTHREADPOOL_USE_FUTEX
+ pthreadpool_store_relaxed_uint32_t(&threadpool->has_active_threads, 1);
+ #endif
+
+ if (params_size != 0) {
+ memcpy(&threadpool->params, params, params_size);
+ pthreadpool_fence_release();
+ }
+
+ /* Spread the work between threads */
+ size_t range_start = 0;
+ for (size_t tid = 0; tid < threads_count; tid++) {
+ struct thread_info* thread = &threadpool->threads[tid];
+ const size_t range_end = multiply_divide(linear_range, tid + 1, threads_count);
+ pthreadpool_store_relaxed_size_t(&thread->range_start, range_start);
+ pthreadpool_store_relaxed_size_t(&thread->range_end, range_end);
+ pthreadpool_store_relaxed_size_t(&thread->range_length, range_end - range_start);
+
+ /* The next subrange starts where the previous ended */
+ range_start = range_end;
+ }
+
+ /*
+ * Update the threadpool command.
+ * Imporantly, do it after initializing command parameters (range, task, argument, flags)
+ * ~(threadpool->command | THREADPOOL_COMMAND_MASK) flips the bits not in command mask
+ * to ensure the unmasked command is different then the last command, because worker threads
+ * monitor for change in the unmasked command.
+ */
+ const uint32_t old_command = pthreadpool_load_relaxed_uint32_t(&threadpool->command);
+ const uint32_t new_command = ~(old_command | THREADPOOL_COMMAND_MASK) | threadpool_command_parallelize;
+
+ /*
+ * Store the command with release semantics to guarantee that if a worker thread observes
+ * the new command value, it also observes the updated command parameters.
+ *
+ * Note: release semantics is necessary even with a conditional variable, because the workers might
+ * be waiting in a spin-loop rather than the conditional variable.
+ */
+ pthreadpool_store_release_uint32_t(&threadpool->command, new_command);
+ #if PTHREADPOOL_USE_FUTEX
+ /* Wake up the threads */
+ futex_wake_all(&threadpool->command);
+ #else
+ /* Unlock the command variables before waking up the threads for better performance */
+ pthread_mutex_unlock(&threadpool->command_mutex);
+
+ /* Wake up the threads */
+ pthread_cond_broadcast(&threadpool->command_condvar);
+ #endif
+
+ /* Save and modify FPU denormals control, if needed */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+
+ /* Do computations as worker #0 */
+ thread_function(threadpool, &threadpool->threads[0]);
+
+ /* Restore FPU denormals control, if needed */
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+
+ /* Wait until the threads finish computation */
+ wait_worker_threads(threadpool);
+
+ /* Make changes by other threads visible to this thread */
+ pthreadpool_fence_acquire();
+
+ /* Unprotect the global threadpool structures */
+ pthread_mutex_unlock(&threadpool->execution_mutex);
+}
+
void pthreadpool_parallelize_1d(
struct pthreadpool* threadpool,
pthreadpool_task_1d_t task,
@@ -492,7 +725,7 @@ void pthreadpool_parallelize_1d(
size_t range,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
+ if (threadpool == NULL || threadpool->threads_count <= 1 || range <= 1) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -506,92 +739,53 @@ void pthreadpool_parallelize_1d(
set_fpu_state(saved_fpu_state);
}
} else {
- /* Protect the global threadpool structures */
- pthread_mutex_lock(&threadpool->execution_mutex);
-
- #if !PTHREADPOOL_USE_FUTEX
- /* Lock the command variables to ensure that threads don't start processing before they observe complete command with all arguments */
- pthread_mutex_lock(&threadpool->command_mutex);
- #endif
-
- /* Setup global arguments */
- atomic_store_explicit(&threadpool->task, task, memory_order_relaxed);
- atomic_store_explicit(&threadpool->argument, argument, memory_order_relaxed);
- atomic_store_explicit(&threadpool->flags, flags, memory_order_relaxed);
-
- /* Locking of completion_mutex not needed: readers are sleeping on command_condvar */
- atomic_store_explicit(
- &threadpool->active_threads, threadpool->threads_count - 1 /* caller thread */, memory_order_relaxed);
- #if PTHREADPOOL_USE_FUTEX
- atomic_store_explicit(&threadpool->has_active_threads, 1, memory_order_relaxed);
- #endif
-
- /* Spread the work between threads */
- for (size_t tid = 0; tid < threadpool->threads_count; tid++) {
- struct thread_info* thread = &threadpool->threads[tid];
- const size_t range_start = multiply_divide(range, tid, threadpool->threads_count);
- const size_t range_end = multiply_divide(range, tid + 1, threadpool->threads_count);
- atomic_store_explicit(&thread->range_start, range_start, memory_order_relaxed);
- atomic_store_explicit(&thread->range_end, range_end, memory_order_relaxed);
- atomic_store_explicit(&thread->range_length, range_end - range_start, memory_order_relaxed);
- }
-
- #if PTHREADPOOL_USE_FUTEX
- /*
- * Make new command parameters globally visible. Having this fence before updating the command is imporatnt: it
- * guarantees that if a worker thread observes new command value, it also observes the updated command parameters.
- */
- atomic_thread_fence(memory_order_release);
- #endif
-
- /*
- * Update the threadpool command.
- * Imporantly, do it after initializing command parameters (range, task, argument)
- * ~(threadpool->command | THREADPOOL_COMMAND_MASK) flips the bits not in command mask
- * to ensure the unmasked command is different then the last command, because worker threads
- * monitor for change in the unmasked command.
- */
- const uint32_t old_command = atomic_load_explicit(&threadpool->command, memory_order_relaxed);
- const uint32_t new_command = ~(old_command | THREADPOOL_COMMAND_MASK) | threadpool_command_compute_1d;
-
- #if PTHREADPOOL_USE_FUTEX
- atomic_store_explicit(&threadpool->command, new_command, memory_order_release);
-
- /* Wake up the threads */
- futex_wake_all(&threadpool->command);
- #else
- atomic_store_explicit(&threadpool->command, new_command, memory_order_relaxed);
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d, NULL, 0,
+ (void*) task, argument, range, flags);
+ }
+}
- /* Unlock the command variables before waking up the threads for better performance */
- pthread_mutex_unlock(&threadpool->command_mutex);
+void pthreadpool_parallelize_1d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_1d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range,
+ uint32_t flags)
+{
+ if (threadpool == NULL || threadpool->threads_count <= 1 || range <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
- /* Wake up the threads */
- pthread_cond_broadcast(&threadpool->command_condvar);
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ if (threadpool && threadpool->cpuinfo_is_initialized) {
+ uarch_index = cpuinfo_get_current_uarch_index();
+ if (uarch_index > max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ }
#endif
- /* Save and modify FPU denormals control, if needed */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
saved_fpu_state = get_fpu_state();
disable_fpu_denormals();
}
-
- /* Do computations as worker #0 */
- thread_parallelize_1d(threadpool, &threadpool->threads[0]);
-
- /* Restore FPU denormals control, if needed */
+ for (size_t i = 0; i < range; i++) {
+ task(argument, uarch_index, i);
+ }
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
set_fpu_state(saved_fpu_state);
}
-
- /* Wait until the threads finish computation */
- wait_worker_threads(threadpool);
-
- /* Make changes by other threads visible to this thread */
- atomic_thread_fence(memory_order_acquire);
-
- /* Unprotect the global threadpool structures */
- pthread_mutex_unlock(&threadpool->execution_mutex);
+ } else {
+ const struct pthreadpool_1d_with_uarch_params params = {
+ .default_uarch_index = default_uarch_index,
+ .max_uarch_index = max_uarch_index,
+ };
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d_with_uarch, &params, sizeof(params),
+ task, argument, range, flags);
}
}
@@ -617,7 +811,7 @@ void pthreadpool_parallelize_1d_tile_1d(
size_t tile,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
+ if (threadpool == NULL || threadpool->threads_count <= 1 || range <= tile) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -639,7 +833,9 @@ void pthreadpool_parallelize_1d_tile_1d(
.range = range,
.tile = tile
};
- pthreadpool_parallelize_1d(threadpool, (pthreadpool_task_1d_t) compute_1d_tile_1d, &context, tile_range, flags);
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d, NULL, 0,
+ (void*) compute_1d_tile_1d, &context, tile_range, flags);
}
}
@@ -663,7 +859,7 @@ void pthreadpool_parallelize_2d(
size_t range_j,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
+ if (threadpool == NULL || threadpool->threads_count <= 1 || (range_i | range_j) <= 1) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -685,7 +881,9 @@ void pthreadpool_parallelize_2d(
.argument = argument,
.range_j = fxdiv_init_size_t(range_j)
};
- pthreadpool_parallelize_1d(threadpool, (pthreadpool_task_1d_t) compute_2d, &context, range_i * range_j, flags);
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d, NULL, 0,
+ (void*) compute_2d, &context, range_i * range_j, flags);
}
}
@@ -717,7 +915,7 @@ void pthreadpool_parallelize_2d_tile_1d(
size_t tile_j,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
+ if (threadpool == NULL || threadpool->threads_count <= 1 || (range_i <= 1 && range_j <= tile_j)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -743,7 +941,9 @@ void pthreadpool_parallelize_2d_tile_1d(
.range_j = range_j,
.tile_j = tile_j
};
- pthreadpool_parallelize_1d(threadpool, (pthreadpool_task_1d_t) compute_2d_tile_1d, &context, range_i * tile_range_j, flags);
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d, NULL, 0,
+ (void*) compute_2d_tile_1d, &context, range_i * tile_range_j, flags);
}
}
@@ -779,7 +979,7 @@ void pthreadpool_parallelize_2d_tile_2d(
size_t tile_j,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
+ if (threadpool == NULL || threadpool->threads_count <= 1 || (range_i <= tile_i && range_j <= tile_j)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -807,7 +1007,92 @@ void pthreadpool_parallelize_2d_tile_2d(
.tile_i = tile_i,
.tile_j = tile_j
};
- pthreadpool_parallelize_1d(threadpool, (pthreadpool_task_1d_t) compute_2d_tile_2d, &context, tile_range_i * tile_range_j, flags);
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d, NULL, 0,
+ (void*) compute_2d_tile_2d, &context, tile_range_i * tile_range_j, flags);
+ }
+}
+
+struct compute_2d_tile_2d_with_uarch_context {
+ pthreadpool_task_2d_tile_2d_with_id_t task;
+ void* argument;
+ struct fxdiv_divisor_size_t tile_range_j;
+ size_t range_i;
+ size_t range_j;
+ size_t tile_i;
+ size_t tile_j;
+};
+
+static void compute_2d_tile_2d_with_uarch(const struct compute_2d_tile_2d_with_uarch_context* context, uint32_t uarch_index, size_t linear_index) {
+ const struct fxdiv_divisor_size_t tile_range_j = context->tile_range_j;
+ const struct fxdiv_result_size_t tile_index = fxdiv_divide_size_t(linear_index, tile_range_j);
+ const size_t max_tile_i = context->tile_i;
+ const size_t max_tile_j = context->tile_j;
+ const size_t index_i = tile_index.quotient * max_tile_i;
+ const size_t index_j = tile_index.remainder * max_tile_j;
+ const size_t tile_i = min(max_tile_i, context->range_i - index_i);
+ const size_t tile_j = min(max_tile_j, context->range_j - index_j);
+ context->task(context->argument, uarch_index, index_i, index_j, tile_i, tile_j);
+}
+
+void pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t tile_i,
+ size_t tile_j,
+ uint32_t flags)
+{
+ if (threadpool == NULL || threadpool->threads_count <= 1 || (range_i <= tile_i && range_j <= tile_j)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ if (threadpool && threadpool->cpuinfo_is_initialized) {
+ uarch_index = cpuinfo_get_current_uarch_index();
+ if (uarch_index > max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ }
+ #endif
+
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i += tile_i) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ task(argument, uarch_index, i, j, min(range_i - i, tile_i), min(range_j - j, tile_j));
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ /* Execute in parallel on the thread pool using linearized index */
+ const size_t tile_range_i = divide_round_up(range_i, tile_i);
+ const size_t tile_range_j = divide_round_up(range_j, tile_j);
+ const struct pthreadpool_1d_with_uarch_params params = {
+ .default_uarch_index = default_uarch_index,
+ .max_uarch_index = max_uarch_index,
+ };
+ struct compute_2d_tile_2d_with_uarch_context context = {
+ .task = task,
+ .argument = argument,
+ .tile_range_j = fxdiv_init_size_t(tile_range_j),
+ .range_i = range_i,
+ .range_j = range_j,
+ .tile_i = tile_i,
+ .tile_j = tile_j
+ };
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d_with_uarch, &params, sizeof(params),
+ (void*) compute_2d_tile_2d_with_uarch, &context, tile_range_i * tile_range_j, flags);
}
}
@@ -848,7 +1133,7 @@ void pthreadpool_parallelize_3d_tile_2d(
size_t tile_k,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
+ if (threadpool == NULL || threadpool->threads_count <= 1 || (range_i <= 1 && range_j <= tile_j && range_k <= tile_k)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -879,9 +1164,100 @@ void pthreadpool_parallelize_3d_tile_2d(
.tile_j = tile_j,
.tile_k = tile_k
};
- pthreadpool_parallelize_1d(threadpool,
- (pthreadpool_task_1d_t) compute_3d_tile_2d, &context,
- range_i * tile_range_j * tile_range_k, flags);
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d, NULL, 0,
+ (void*) compute_3d_tile_2d, &context, range_i * tile_range_j * tile_range_k, flags);
+ }
+}
+
+struct compute_3d_tile_2d_with_uarch_context {
+ pthreadpool_task_3d_tile_2d_with_id_t task;
+ void* argument;
+ struct fxdiv_divisor_size_t tile_range_j;
+ struct fxdiv_divisor_size_t tile_range_k;
+ size_t range_j;
+ size_t range_k;
+ size_t tile_j;
+ size_t tile_k;
+};
+
+static void compute_3d_tile_2d_with_uarch(const struct compute_3d_tile_2d_with_uarch_context* context, uint32_t uarch_index, size_t linear_index) {
+ const struct fxdiv_divisor_size_t tile_range_k = context->tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_divisor_size_t tile_range_j = context->tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t max_tile_j = context->tile_j;
+ const size_t max_tile_k = context->tile_k;
+ const size_t index_i = tile_index_i_j.quotient;
+ const size_t index_j = tile_index_i_j.remainder * max_tile_j;
+ const size_t index_k = tile_index_ij_k.remainder * max_tile_k;
+ const size_t tile_j = min(max_tile_j, context->range_j - index_j);
+ const size_t tile_k = min(max_tile_k, context->range_k - index_k);
+ context->task(context->argument, uarch_index, index_i, index_j, index_k, tile_j, tile_k);
+}
+
+void pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_j,
+ size_t tile_k,
+ uint32_t flags)
+{
+ if (threadpool == NULL || threadpool->threads_count <= 1 || (range_i <= 1 && range_j <= tile_j && range_k <= tile_k)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ if (threadpool && threadpool->cpuinfo_is_initialized) {
+ uarch_index = cpuinfo_get_current_uarch_index();
+ if (uarch_index > max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ }
+ #endif
+
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ task(argument, uarch_index, i, j, k, min(range_j - j, tile_j), min(range_k - k, tile_k));
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ /* Execute in parallel on the thread pool using linearized index */
+ const size_t tile_range_j = divide_round_up(range_j, tile_j);
+ const size_t tile_range_k = divide_round_up(range_k, tile_k);
+ const struct pthreadpool_1d_with_uarch_params params = {
+ .default_uarch_index = default_uarch_index,
+ .max_uarch_index = max_uarch_index,
+ };
+ struct compute_3d_tile_2d_with_uarch_context context = {
+ .task = task,
+ .argument = argument,
+ .tile_range_j = fxdiv_init_size_t(tile_range_j),
+ .tile_range_k = fxdiv_init_size_t(tile_range_k),
+ .range_j = range_j,
+ .range_k = range_k,
+ .tile_j = tile_j,
+ .tile_k = tile_k
+ };
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d_with_uarch, &params, sizeof(params),
+ (void*) compute_3d_tile_2d_with_uarch, &context, range_i * tile_range_j * tile_range_k, flags);
}
}
@@ -927,7 +1303,7 @@ void pthreadpool_parallelize_4d_tile_2d(
size_t tile_l,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
+ if (threadpool == NULL || threadpool->threads_count <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k && range_l <= tile_l)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -962,9 +1338,109 @@ void pthreadpool_parallelize_4d_tile_2d(
.tile_k = tile_k,
.tile_l = tile_l
};
- pthreadpool_parallelize_1d(threadpool,
- (pthreadpool_task_1d_t) compute_4d_tile_2d, &context,
- range_i * range_j * tile_range_k * tile_range_l, flags);
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d, NULL, 0,
+ (void*) compute_4d_tile_2d, &context, range_i * range_j * tile_range_k * tile_range_l, flags);
+ }
+}
+
+struct compute_4d_tile_2d_with_uarch_context {
+ pthreadpool_task_4d_tile_2d_with_id_t task;
+ void* argument;
+ struct fxdiv_divisor_size_t tile_range_kl;
+ struct fxdiv_divisor_size_t range_j;
+ struct fxdiv_divisor_size_t tile_range_l;
+ size_t range_k;
+ size_t range_l;
+ size_t tile_k;
+ size_t tile_l;
+};
+
+static void compute_4d_tile_2d_with_uarch(const struct compute_4d_tile_2d_with_uarch_context* context, uint32_t uarch_index, size_t linear_index) {
+ const struct fxdiv_divisor_size_t tile_range_kl = context->tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = context->range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = context->tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t max_tile_k = context->tile_k;
+ const size_t max_tile_l = context->tile_l;
+ const size_t index_i = tile_index_i_j.quotient;
+ const size_t index_j = tile_index_i_j.remainder;
+ const size_t index_k = tile_index_k_l.quotient * max_tile_k;
+ const size_t index_l = tile_index_k_l.remainder * max_tile_l;
+ const size_t tile_k = min(max_tile_k, context->range_k - index_k);
+ const size_t tile_l = min(max_tile_l, context->range_l - index_l);
+ context->task(context->argument, uarch_index, index_i, index_j, index_k, index_l, tile_k, tile_l);
+}
+
+void pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_k,
+ size_t tile_l,
+ uint32_t flags)
+{
+ if (threadpool == NULL || threadpool->threads_count <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k && range_l <= tile_l)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ if (threadpool && threadpool->cpuinfo_is_initialized) {
+ uarch_index = cpuinfo_get_current_uarch_index();
+ if (uarch_index > max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ }
+ #endif
+
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ task(argument, uarch_index, i, j, k, l,
+ min(range_k - k, tile_k), min(range_l - l, tile_l));
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ /* Execute in parallel on the thread pool using linearized index */
+ const size_t tile_range_k = divide_round_up(range_k, tile_k);
+ const size_t tile_range_l = divide_round_up(range_l, tile_l);
+ const struct pthreadpool_1d_with_uarch_params params = {
+ .default_uarch_index = default_uarch_index,
+ .max_uarch_index = max_uarch_index,
+ };
+ struct compute_4d_tile_2d_with_uarch_context context = {
+ .task = task,
+ .argument = argument,
+ .tile_range_kl = fxdiv_init_size_t(tile_range_k * tile_range_l),
+ .range_j = fxdiv_init_size_t(range_j),
+ .tile_range_l = fxdiv_init_size_t(tile_range_l),
+ .range_k = range_k,
+ .range_l = range_l,
+ .tile_k = tile_k,
+ .tile_l = tile_l
+ };
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d_with_uarch, &params, sizeof(params),
+ (void*) compute_4d_tile_2d_with_uarch, &context, range_i * range_j * tile_range_k * tile_range_l, flags);
}
}
@@ -1016,7 +1492,7 @@ void pthreadpool_parallelize_5d_tile_2d(
size_t tile_m,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
+ if (threadpool == NULL || threadpool->threads_count <= 1 || ((range_i | range_j | range_k) <= 1 && range_l <= tile_l && range_m <= tile_m)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -1054,9 +1530,9 @@ void pthreadpool_parallelize_5d_tile_2d(
.tile_l = tile_l,
.tile_m = tile_m,
};
- pthreadpool_parallelize_1d(threadpool,
- (pthreadpool_task_1d_t) compute_5d_tile_2d, &context,
- range_i * range_j * range_k * tile_range_l * tile_range_m, flags);
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d, NULL, 0,
+ (void*) compute_5d_tile_2d, &context, range_i * range_j * range_k * tile_range_l * tile_range_m, flags);
}
}
@@ -1113,7 +1589,7 @@ void pthreadpool_parallelize_6d_tile_2d(
size_t tile_n,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
+ if (threadpool == NULL || threadpool->threads_count <= 1 || ((range_i | range_j | range_k | range_l) <= 1 && range_m <= tile_m && range_n <= tile_n)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -1154,21 +1630,25 @@ void pthreadpool_parallelize_6d_tile_2d(
.tile_m = tile_m,
.tile_n = tile_n,
};
- pthreadpool_parallelize_1d(threadpool,
- (pthreadpool_task_1d_t) compute_6d_tile_2d, &context,
- range_i * range_j * range_k * range_l * tile_range_m * tile_range_n, flags);
+ pthreadpool_parallelize(
+ threadpool, &thread_parallelize_1d, NULL, 0,
+ (void*) compute_6d_tile_2d, &context, range_i * range_j * range_k * range_l * tile_range_m * tile_range_n, flags);
}
}
void pthreadpool_destroy(struct pthreadpool* threadpool) {
if (threadpool != NULL) {
- if (threadpool->threads_count > 1) {
+ const size_t threads_count = threadpool->threads_count;
+ if (threads_count > 1) {
#if PTHREADPOOL_USE_FUTEX
- atomic_store_explicit(
- &threadpool->active_threads, threadpool->threads_count - 1 /* caller thread */, memory_order_relaxed);
- atomic_store_explicit(&threadpool->has_active_threads, 1, memory_order_release);
+ pthreadpool_store_relaxed_size_t(&threadpool->active_threads, threads_count - 1 /* caller thread */);
+ pthreadpool_store_relaxed_uint32_t(&threadpool->has_active_threads, 1);
- atomic_store_explicit(&threadpool->command, threadpool_command_shutdown, memory_order_release);
+ /*
+ * Store the command with release semantics to guarantee that if a worker thread observes
+ * the new command value, it also observes the updated active_threads/has_active_threads values.
+ */
+ pthreadpool_store_release_uint32_t(&threadpool->command, threadpool_command_shutdown);
/* Wake up worker threads */
futex_wake_all(&threadpool->command);
@@ -1176,12 +1656,16 @@ void pthreadpool_destroy(struct pthreadpool* threadpool) {
/* Lock the command variable to ensure that threads don't shutdown until both command and active_threads are updated */
pthread_mutex_lock(&threadpool->command_mutex);
- /* Locking of completion_mutex not needed: readers are sleeping on command_condvar */
- atomic_store_explicit(
- &threadpool->active_threads, threadpool->threads_count - 1 /* caller thread */, memory_order_release);
+ pthreadpool_store_relaxed_size_t(&threadpool->active_threads, threads_count - 1 /* caller thread */);
- /* Update the threadpool command. */
- atomic_store_explicit(&threadpool->command, threadpool_command_shutdown, memory_order_release);
+ /*
+ * Store the command with release semantics to guarantee that if a worker thread observes
+ * the new command value, it also observes the updated active_threads value.
+ *
+ * Note: the release fence inside pthread_mutex_unlock is insufficient,
+ * because the workers might be waiting in a spin-loop rather than the conditional variable.
+ */
+ pthreadpool_store_release_uint32_t(&threadpool->command, threadpool_command_shutdown);
/* Wake up worker threads */
pthread_cond_broadcast(&threadpool->command_condvar);
@@ -1191,7 +1675,7 @@ void pthreadpool_destroy(struct pthreadpool* threadpool) {
#endif
/* Wait until all threads return */
- for (size_t thread = 1; thread < threadpool->threads_count; thread++) {
+ for (size_t thread = 1; thread < threads_count; thread++) {
pthread_join(threadpool->threads[thread].thread_object, NULL);
}
@@ -1204,6 +1688,15 @@ void pthreadpool_destroy(struct pthreadpool* threadpool) {
pthread_cond_destroy(&threadpool->command_condvar);
#endif
}
- free(threadpool);
+ #if PTHREADPOOL_USE_CPUINFO
+ if (threadpool->cpuinfo_is_initialized) {
+ cpuinfo_deinitialize();
+ }
+ #endif
+ #ifdef _WIN32
+ _aligned_free(threadpool);
+ #else
+ free(threadpool);
+ #endif
}
}
diff --git a/src/threadpool-shim.c b/src/threadpool-shim.c
index c8ef51d..b5670ea 100644
--- a/src/threadpool-shim.c
+++ b/src/threadpool-shim.c
@@ -28,6 +28,20 @@ void pthreadpool_parallelize_1d(
}
}
+void pthreadpool_parallelize_1d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_1d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range; i++) {
+ task(argument, default_uarch_index, i);
+ }
+}
+
void pthreadpool_parallelize_1d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_1d_tile_1d_t task,
@@ -89,6 +103,26 @@ void pthreadpool_parallelize_2d_tile_2d(
}
}
+void pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t tile_i,
+ size_t tile_j,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i += tile_i) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ task(argument, default_uarch_index, i, j,
+ min(range_i - i, tile_i), min(range_j - j, tile_j));
+ }
+ }
+}
+
void pthreadpool_parallelize_3d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_3d_tile_2d_t task,
@@ -110,6 +144,29 @@ void pthreadpool_parallelize_3d_tile_2d(
}
}
+void pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_j,
+ size_t tile_k,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ task(argument, default_uarch_index, i, j, k,
+ min(range_j - j, tile_j), min(range_k - k, tile_k));
+ }
+ }
+ }
+}
+
void pthreadpool_parallelize_4d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_4d_tile_2d_t task,
@@ -134,6 +191,32 @@ void pthreadpool_parallelize_4d_tile_2d(
}
}
+void pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_k,
+ size_t tile_l,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ task(argument, default_uarch_index, i, j, k, l,
+ min(range_k - k, tile_k), min(range_l - l, tile_l));
+ }
+ }
+ }
+ }
+}
+
void pthreadpool_parallelize_5d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_5d_tile_2d_t task,
diff --git a/test/pthreadpool.cc b/test/pthreadpool.cc
index 4faf3be..b8a6803 100644
--- a/test/pthreadpool.cc
+++ b/test/pthreadpool.cc
@@ -54,6 +54,9 @@ const size_t kIncrementIterations = 101;
const size_t kIncrementIterations5D = 7;
const size_t kIncrementIterations6D = 3;
+const uint32_t kMaxUArchIndex = 0;
+const uint32_t kDefaultUArchIndex = 42;
+
TEST(CreateAndDestroy, NullThreadPool) {
pthreadpool* threadpool = nullptr;
@@ -274,6 +277,29 @@ TEST(Parallelize1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame1D(std::atomic_int* num_processed_items, size_t i) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_t>(IncrementSame1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize1DRange,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange);
+}
+
static void WorkImbalance1D(std::atomic_int* num_processed_items, size_t i) {
num_processed_items->fetch_add(1, std::memory_order_relaxed);
if (i == 0) {
@@ -303,6 +329,321 @@ TEST(Parallelize1D, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange);
}
+static void ComputeNothing1DWithUArch(void*, uint32_t, size_t) {
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_1d_with_uarch(threadpool.get(),
+ ComputeNothing1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ ComputeNothing1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+static void CheckUArch1DWithUArch(void*, uint32_t uarch_index, size_t) {
+ if (uarch_index != kDefaultUArchIndex) {
+ EXPECT_LE(uarch_index, kMaxUArchIndex);
+ }
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_1d_with_uarch(threadpool.get(),
+ CheckUArch1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ CheckUArch1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+static void CheckBounds1DWithUArch(void*, uint32_t, size_t i) {
+ EXPECT_LT(i, kParallelize1DRange);
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ CheckBounds1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ CheckBounds1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+static void SetTrue1DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t i) {
+ processed_indicators[i].store(true, std::memory_order_relaxed);
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(SetTrue1DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_TRUE(indicators[i].load(std::memory_order_relaxed))
+ << "Element " << i << " not processed";
+ }
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(SetTrue1DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_TRUE(indicators[i].load(std::memory_order_relaxed))
+ << "Element " << i << " not processed";
+ }
+}
+
+static void Increment1DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t i) {
+ processed_counters[i].fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_EQ(counters[i].load(std::memory_order_relaxed), 1)
+ << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_EQ(counters[i].load(std::memory_order_relaxed), 1)
+ << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_EQ(counters[i].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_EQ(counters[i].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+}
+
+static void IncrementSame1DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(IncrementSame1DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange);
+}
+
+static void WorkImbalance1DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ if (i == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize1DRange) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(WorkImbalance1DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange);
+}
+
static void ComputeNothing1DTile1D(void*, size_t, size_t) {
}
@@ -545,6 +886,31 @@ TEST(Parallelize1DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame1DTile1D(std::atomic_int* num_processed_items, size_t start_i, size_t tile_i) {
+ for (size_t i = start_i; i < start_i + tile_i; i++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize1DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(IncrementSame1DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize1DTile1DRange, kParallelize1DTile1DTile,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DTile1DRange);
+}
+
static void WorkImbalance1DTile1D(std::atomic_int* num_processed_items, size_t start_i, size_t tile_i) {
num_processed_items->fetch_add(tile_i, std::memory_order_relaxed);
if (start_i == 0) {
@@ -801,6 +1167,29 @@ TEST(Parallelize2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame2D(std::atomic_int* num_processed_items, size_t i, size_t j) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_t>(IncrementSame2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize2DRangeI, kParallelize2DRangeJ,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DRangeI * kParallelize2DRangeJ);
+}
+
static void WorkImbalance2D(std::atomic_int* num_processed_items, size_t i, size_t j) {
num_processed_items->fetch_add(1, std::memory_order_relaxed);
if (i == 0 && j == 0) {
@@ -1097,6 +1486,31 @@ TEST(Parallelize2DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame2DTile1D(std::atomic_int* num_processed_items, size_t i, size_t start_j, size_t tile_j) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize2DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_1d_t>(IncrementSame2DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize2DTile1DRangeI, kParallelize2DTile1DRangeJ, kParallelize2DTile1DTileJ,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile1DRangeI * kParallelize2DTile1DRangeJ);
+}
+
static void WorkImbalance2DTile1D(std::atomic_int* num_processed_items, size_t i, size_t start_j, size_t tile_j) {
num_processed_items->fetch_add(tile_j, std::memory_order_relaxed);
if (i == 0 && start_j == 0) {
@@ -1415,6 +1829,34 @@ TEST(Parallelize2DTile2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame2DTile2D(std::atomic_int* num_processed_items, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ for (size_t i = start_i; i < start_i + tile_i; i++) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize2DTile2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_t>(IncrementSame2DTile2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+}
+
static void WorkImbalance2DTile2D(std::atomic_int* num_processed_items, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
num_processed_items->fetch_add(tile_i * tile_j, std::memory_order_relaxed);
if (start_i == 0 && start_j == 0) {
@@ -1445,6 +1887,405 @@ TEST(Parallelize2DTile2D, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
}
+static void ComputeNothing2DTile2DWithUArch(void*, uint32_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(threadpool.get(),
+ ComputeNothing2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ ComputeNothing2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+static void CheckUArch2DTile2DWithUArch(void*, uint32_t uarch_index, size_t, size_t, size_t, size_t) {
+ if (uarch_index != kDefaultUArchIndex) {
+ EXPECT_LE(uarch_index, kMaxUArchIndex);
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+static void CheckBounds2DTile2DWithUArch(void*, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ EXPECT_LT(start_i, kParallelize2DTile2DRangeI);
+ EXPECT_LT(start_j, kParallelize2DTile2DRangeJ);
+ EXPECT_LE(start_i + tile_i, kParallelize2DTile2DRangeI);
+ EXPECT_LE(start_j + tile_j, kParallelize2DTile2DRangeJ);
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+static void CheckTiling2DTile2DWithUArch(void*, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ EXPECT_GT(tile_i, 0);
+ EXPECT_LE(tile_i, kParallelize2DTile2DTileI);
+ EXPECT_EQ(start_i % kParallelize2DTile2DTileI, 0);
+ EXPECT_EQ(tile_i, std::min<size_t>(kParallelize2DTile2DTileI, kParallelize2DTile2DRangeI - start_i));
+
+ EXPECT_GT(tile_j, 0);
+ EXPECT_LE(tile_j, kParallelize2DTile2DTileJ);
+ EXPECT_EQ(start_j % kParallelize2DTile2DTileJ, 0);
+ EXPECT_EQ(tile_j, std::min<size_t>(kParallelize2DTile2DTileJ, kParallelize2DTile2DRangeJ - start_j));
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+static void SetTrue2DTile2DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ for (size_t i = start_i; i < start_i + tile_i; i++) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(SetTrue2DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ") not processed";
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(SetTrue2DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ") not processed";
+ }
+ }
+}
+
+static void Increment2DTile2DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ for (size_t i = start_i; i < start_i + tile_i; i++) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+}
+
+static void IncrementSame2DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ for (size_t i = start_i; i < start_i + tile_i; i++) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(IncrementSame2DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+}
+
+static void WorkImbalance2DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ num_processed_items->fetch_add(tile_i * tile_j, std::memory_order_relaxed);
+ if (start_i == 0 && start_j == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(WorkImbalance2DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+}
+
static void ComputeNothing3DTile2D(void*, size_t, size_t, size_t, size_t, size_t) {
}
@@ -1747,6 +2588,34 @@ TEST(Parallelize3DTile2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame3DTile2D(std::atomic_int* num_processed_items, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize3DTile2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_t>(IncrementSame3DTile2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+}
+
static void WorkImbalance3DTile2D(std::atomic_int* num_processed_items, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
num_processed_items->fetch_add(tile_j * tile_k, std::memory_order_relaxed);
if (i == 0 && start_j == 0 && start_k == 0) {
@@ -1777,6 +2646,418 @@ TEST(Parallelize3DTile2D, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
}
+static void ComputeNothing3DTile2DWithUArch(void*, uint32_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(threadpool.get(),
+ ComputeNothing3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ ComputeNothing3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+static void CheckUArch3DTile2DWithUArch(void*, uint32_t uarch_index, size_t, size_t, size_t, size_t, size_t) {
+ if (uarch_index != kDefaultUArchIndex) {
+ EXPECT_LE(uarch_index, kMaxUArchIndex);
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+static void CheckBounds3DTile2DWithUArch(void*, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ EXPECT_LT(i, kParallelize3DTile2DRangeI);
+ EXPECT_LT(start_j, kParallelize3DTile2DRangeJ);
+ EXPECT_LT(start_k, kParallelize3DTile2DRangeK);
+ EXPECT_LE(start_j + tile_j, kParallelize3DTile2DRangeJ);
+ EXPECT_LE(start_k + tile_k, kParallelize3DTile2DRangeK);
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+static void CheckTiling3DTile2DWithUArch(void*, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ EXPECT_GT(tile_j, 0);
+ EXPECT_LE(tile_j, kParallelize3DTile2DTileJ);
+ EXPECT_EQ(start_j % kParallelize3DTile2DTileJ, 0);
+ EXPECT_EQ(tile_j, std::min<size_t>(kParallelize3DTile2DTileJ, kParallelize3DTile2DRangeJ - start_j));
+
+ EXPECT_GT(tile_k, 0);
+ EXPECT_LE(tile_k, kParallelize3DTile2DTileK);
+ EXPECT_EQ(start_k % kParallelize3DTile2DTileK, 0);
+ EXPECT_EQ(tile_k, std::min<size_t>(kParallelize3DTile2DTileK, kParallelize3DTile2DRangeK - start_k));
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+static void SetTrue3DTile2DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(SetTrue3DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(SetTrue3DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+static void Increment3DTile2DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+static void IncrementSame3DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(IncrementSame3DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+}
+
+static void WorkImbalance3DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ num_processed_items->fetch_add(tile_j * tile_k, std::memory_order_relaxed);
+ if (i == 0 && start_j == 0 && start_k == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(WorkImbalance3DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+}
+
static void ComputeNothing4DTile2D(void*, size_t, size_t, size_t, size_t, size_t, size_t) {
}
@@ -2092,6 +3373,34 @@ TEST(Parallelize4DTile2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame4DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize4DTile2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_t>(IncrementSame4DTile2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+}
+
static void WorkImbalance4DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
num_processed_items->fetch_add(tile_k * tile_l, std::memory_order_relaxed);
if (i == 0 && j == 0 && start_k == 0 && start_l == 0) {
@@ -2122,6 +3431,431 @@ TEST(Parallelize4DTile2D, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
}
+static void ComputeNothing4DTile2DWithUArch(void*, uint32_t, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(threadpool.get(),
+ ComputeNothing4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ ComputeNothing4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+static void CheckUArch4DTile2DWithUArch(void*, uint32_t uarch_index, size_t, size_t, size_t, size_t, size_t, size_t) {
+ if (uarch_index != kDefaultUArchIndex) {
+ EXPECT_LE(uarch_index, kMaxUArchIndex);
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+static void CheckBounds4DTile2DWithUArch(void*, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ EXPECT_LT(i, kParallelize4DTile2DRangeI);
+ EXPECT_LT(j, kParallelize4DTile2DRangeJ);
+ EXPECT_LT(start_k, kParallelize4DTile2DRangeK);
+ EXPECT_LT(start_l, kParallelize4DTile2DRangeL);
+ EXPECT_LE(start_k + tile_k, kParallelize4DTile2DRangeK);
+ EXPECT_LE(start_l + tile_l, kParallelize4DTile2DRangeL);
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+static void CheckTiling4DTile2DWithUArch(void*, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ EXPECT_GT(tile_k, 0);
+ EXPECT_LE(tile_k, kParallelize4DTile2DTileK);
+ EXPECT_EQ(start_k % kParallelize4DTile2DTileK, 0);
+ EXPECT_EQ(tile_k, std::min<size_t>(kParallelize4DTile2DTileK, kParallelize4DTile2DRangeK - start_k));
+
+ EXPECT_GT(tile_l, 0);
+ EXPECT_LE(tile_l, kParallelize4DTile2DTileL);
+ EXPECT_EQ(start_l % kParallelize4DTile2DTileL, 0);
+ EXPECT_EQ(tile_l, std::min<size_t>(kParallelize4DTile2DTileL, kParallelize4DTile2DRangeL - start_l));
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+static void SetTrue4DTile2DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(SetTrue4DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(SetTrue4DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+static void Increment4DTile2DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame4DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(IncrementSame4DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+}
+
+static void WorkImbalance4DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ num_processed_items->fetch_add(tile_k * tile_l, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && start_k == 0 && start_l == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(WorkImbalance4DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+}
+
static void ComputeNothing5DTile2D(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t) {
}
@@ -2450,6 +4184,34 @@ TEST(Parallelize5DTile2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame5DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t start_l, size_t start_m, size_t tile_l, size_t tile_m) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ for (size_t m = start_m; m < start_m + tile_m; m++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize5DTile2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_2d_t>(IncrementSame5DTile2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize5DTile2DRangeI, kParallelize5DTile2DRangeJ, kParallelize5DTile2DRangeK, kParallelize5DTile2DRangeL, kParallelize5DTile2DRangeM,
+ kParallelize5DTile2DTileL, kParallelize5DTile2DTileM,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DTile2DRangeI * kParallelize5DTile2DRangeJ * kParallelize5DTile2DRangeK * kParallelize5DTile2DRangeL * kParallelize5DTile2DRangeM);
+}
+
static void WorkImbalance5DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t start_l, size_t start_m, size_t tile_l, size_t tile_m) {
num_processed_items->fetch_add(tile_l * tile_m, std::memory_order_relaxed);
if (i == 0 && j == 0 && k == 0 && start_l == 0 && start_m == 0) {
@@ -2821,6 +4583,34 @@ TEST(Parallelize6DTile2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame6DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t start_n, size_t tile_m, size_t tile_n) {
+ for (size_t m = start_m; m < start_m + tile_m; m++) {
+ for (size_t n = start_n; n < start_n + tile_n; n++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize6DTile2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d_tile_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_tile_2d_t>(IncrementSame6DTile2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize6DTile2DRangeI, kParallelize6DTile2DRangeJ, kParallelize6DTile2DRangeK, kParallelize6DTile2DRangeL, kParallelize6DTile2DRangeM, kParallelize6DTile2DRangeN,
+ kParallelize6DTile2DTileM, kParallelize6DTile2DTileN,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize6DTile2DRangeI * kParallelize6DTile2DRangeJ * kParallelize6DTile2DRangeK * kParallelize6DTile2DRangeL * kParallelize6DTile2DRangeM * kParallelize6DTile2DRangeN);
+}
+
static void WorkImbalance6DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t start_n, size_t tile_m, size_t tile_n) {
num_processed_items->fetch_add(tile_m * tile_n, std::memory_order_relaxed);
if (i == 0 && j == 0 && k == 0 && l == 0 && start_m == 0 && start_n == 0) {