aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHaibo Huang <hhb@google.com>2020-07-10 20:22:01 -0700
committerHaibo Huang <hhb@google.com>2020-08-07 20:05:14 +0000
commita85203c348b589bfd282decff8a9525794284881 (patch)
treebdff5f4a93033f04736f1ff8780f11f96c44e31f
parent94500f81562850b9b727e3426d241fddcdfe0a60 (diff)
parent029c88620802e1361ccf41d1970bd5b07fd6b7bb (diff)
downloadplatform_external_pthreadpool-a85203c348b589bfd282decff8a9525794284881.tar.gz
platform_external_pthreadpool-a85203c348b589bfd282decff8a9525794284881.tar.bz2
platform_external_pthreadpool-a85203c348b589bfd282decff8a9525794284881.zip
Upgrade pthreadpool to 029c88620802e1361ccf41d1970bd5b07fd6b7bb
Exempt-From-Owner-Approval: upgrade Change-Id: I1c68c335e82551cd3aaa76f54cfde86077d09479
-rw-r--r--BUILD.bazel141
-rw-r--r--CMakeLists.txt13
-rw-r--r--METADATA6
-rw-r--r--cmake/DownloadCpuinfo.cmake4
-rw-r--r--include/pthreadpool.h270
-rw-r--r--src/fastpath.c1170
-rw-r--r--src/portable-api.c873
-rw-r--r--src/pthreads.c6
-rw-r--r--src/shim.c129
-rw-r--r--src/threadpool-atomics.h193
-rw-r--r--src/threadpool-object.h212
-rw-r--r--src/threadpool-utils.h67
-rw-r--r--src/windows.c6
-rw-r--r--test/pthreadpool.cc2025
14 files changed, 4929 insertions, 186 deletions
diff --git a/BUILD.bazel b/BUILD.bazel
index af1401b..0b832cf 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -16,22 +16,8 @@ PORTABLE_SRCS = [
"src/portable-api.c",
]
-PTHREADS_IMPL_SRCS = PORTABLE_SRCS + ["src/pthreads.c"]
-
-GCD_IMPL_SRCS = PORTABLE_SRCS + ["src/gcd.c"]
-
-SHIM_IMPL_SRCS = ["src/shim.c"]
-
-INTERNAL_HDRS = [
- "src/threadpool-atomics.h",
- "src/threadpool-common.h",
- "src/threadpool-object.h",
- "src/threadpool-utils.h",
-]
-
-PORTABLE_SRCS = [
- "src/memory.c",
- "src/portable-api.c",
+ARCH_SPECIFIC_SRCS = [
+ "src/fastpath.c",
]
PTHREADS_IMPL_SRCS = PORTABLE_SRCS + ["src/pthreads.c"]
@@ -50,13 +36,27 @@ cc_library(
":pthreadpool_sync_primitive_explicit_gcd": INTERNAL_HDRS + GCD_IMPL_SRCS,
":pthreadpool_sync_primitive_explicit_event": INTERNAL_HDRS + WINDOWS_IMPL_SRCS,
":emscripten_with_threads": INTERNAL_HDRS + PTHREADS_IMPL_SRCS,
- ":emscripten": SHIM_IMPL_SRCS,
+ ":emscripten": INTERNAL_HDRS + SHIM_IMPL_SRCS,
":macos_x86": INTERNAL_HDRS + GCD_IMPL_SRCS,
":macos_x86_64": INTERNAL_HDRS + GCD_IMPL_SRCS,
":ios": INTERNAL_HDRS + GCD_IMPL_SRCS,
+ ":watchos": INTERNAL_HDRS + GCD_IMPL_SRCS,
+ ":tvos": INTERNAL_HDRS + GCD_IMPL_SRCS,
":windows_x86_64": INTERNAL_HDRS + WINDOWS_IMPL_SRCS,
- ":windows_x86_64_msvc": INTERNAL_HDRS + WINDOWS_IMPL_SRCS,
"//conditions:default": INTERNAL_HDRS + PTHREADS_IMPL_SRCS,
+ }) + select({
+ ":linux_x86_64": ARCH_SPECIFIC_SRCS,
+ ":android_x86": ARCH_SPECIFIC_SRCS,
+ ":android_x86_64": ARCH_SPECIFIC_SRCS,
+ ":windows_x86_64": ARCH_SPECIFIC_SRCS,
+ ":macos_x86": ARCH_SPECIFIC_SRCS,
+ ":macos_x86_64": ARCH_SPECIFIC_SRCS,
+ ":ios_x86": ARCH_SPECIFIC_SRCS,
+ ":ios_x86_64": ARCH_SPECIFIC_SRCS,
+ ":watchos_x86": ARCH_SPECIFIC_SRCS,
+ ":watchos_x86_64": ARCH_SPECIFIC_SRCS,
+ ":tvos_x86_64": ARCH_SPECIFIC_SRCS,
+ "//conditions:default": [],
}),
copts = [
"-std=gnu11",
@@ -65,7 +65,9 @@ cc_library(
"//conditions:default": [],
}) + select({
":linux_arm": ["-DPTHREADPOOL_USE_CPUINFO=1"],
+ ":linux_armeabi": ["-DPTHREADPOOL_USE_CPUINFO=1"],
":linux_armhf": ["-DPTHREADPOOL_USE_CPUINFO=1"],
+ ":linux_armv7a": ["-DPTHREADPOOL_USE_CPUINFO=1"],
":linux_aarch64": ["-DPTHREADPOOL_USE_CPUINFO=1"],
":android_armv7": ["-DPTHREADPOOL_USE_CPUINFO=1"],
":android_arm64": ["-DPTHREADPOOL_USE_CPUINFO=1"],
@@ -96,6 +98,19 @@ cc_library(
"-DPTHREADPOOL_USE_EVENT=1",
],
"//conditions:default": [],
+ }) + select({
+ ":linux_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":android_x86": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":android_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":windows_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":macos_x86": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":macos_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":ios_x86": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":ios_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":watchos_x86": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":watchos_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":tvos_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ "//conditions:default": ["-DPTHREADPOOL_USE_FASTPATH=0"],
}),
hdrs = [
"include/pthreadpool.h",
@@ -118,7 +133,9 @@ cc_library(
"@FXdiv",
] + select({
":linux_arm": ["@cpuinfo"],
+ ":linux_armeabi": ["@cpuinfo"],
":linux_armhf": ["@cpuinfo"],
+ ":linux_armv7a": ["@cpuinfo"],
":linux_aarch64": ["@cpuinfo"],
":android_armv7": ["@cpuinfo"],
":android_arm64": ["@cpuinfo"],
@@ -220,21 +237,52 @@ config_setting(
)
config_setting(
+ name = "linux_x86_64",
+ values = {"cpu": "k8"},
+)
+
+config_setting(
name = "linux_arm",
values = {"cpu": "arm"},
)
config_setting(
+ name = "linux_armeabi",
+ values = {"cpu": "armeabi"},
+)
+
+config_setting(
name = "linux_armhf",
values = {"cpu": "armhf"},
)
config_setting(
+ name = "linux_armv7a",
+ values = {"cpu": "armv7a"},
+)
+
+config_setting(
name = "linux_aarch64",
values = {"cpu": "aarch64"},
)
config_setting(
+ name = "android_x86",
+ values = {
+ "crosstool_top": "//external:android/crosstool",
+ "cpu": "x86",
+ },
+)
+
+config_setting(
+ name = "android_x86_64",
+ values = {
+ "crosstool_top": "//external:android/crosstool",
+ "cpu": "x86_64",
+ },
+)
+
+config_setting(
name = "android_armv7",
values = {
"crosstool_top": "//external:android/crosstool",
@@ -278,16 +326,65 @@ config_setting(
)
config_setting(
- name = "windows_x86_64",
+ name = "ios_x86",
values = {
- "cpu": "x64_windows",
+ "apple_platform_type": "ios",
+ "cpu": "ios_i386",
+ },
+)
+
+config_setting(
+ name = "ios_x86_64",
+ values = {
+ "apple_platform_type": "ios",
+ "cpu": "ios_x86_64",
},
)
config_setting(
- name = "windows_x86_64_msvc",
+ name = "watchos",
values = {
- "cpu": "x64_windows_msvc",
+ "crosstool_top": "@bazel_tools//tools/cpp:toolchain",
+ "apple_platform_type": "watchos",
+ },
+)
+
+config_setting(
+ name = "watchos_x86",
+ values = {
+ "apple_platform_type": "watchos",
+ "cpu": "watchos_i386",
+ },
+)
+
+config_setting(
+ name = "watchos_x86_64",
+ values = {
+ "apple_platform_type": "watchos",
+ "cpu": "watchos_x86_64",
+ },
+)
+
+config_setting(
+ name = "tvos",
+ values = {
+ "crosstool_top": "@bazel_tools//tools/cpp:toolchain",
+ "apple_platform_type": "tvos",
+ },
+)
+
+config_setting(
+ name = "tvos_x86_64",
+ values = {
+ "apple_platform_type": "tvos",
+ "cpu": "tvos_x86_64",
+ },
+)
+
+config_setting(
+ name = "windows_x86_64",
+ values = {
+ "cpu": "x64_windows",
},
)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 51b0105..0db3264 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -9,6 +9,11 @@ SET_PROPERTY(CACHE PTHREADPOOL_LIBRARY_TYPE PROPERTY STRINGS default static shar
OPTION(PTHREADPOOL_ALLOW_DEPRECATED_API "Enable deprecated API functions" ON)
SET(PTHREADPOOL_SYNC_PRIMITIVE "default" CACHE STRING "Synchronization primitive (condvar, futex, gcd, event, or default) for worker threads")
SET_PROPERTY(CACHE PTHREADPOOL_SYNC_PRIMITIVE PROPERTY STRINGS default condvar futex gcd event)
+IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$")
+ OPTION(PTHREADPOOL_ENABLE_FASTPATH "Enable fast path using atomic decrement instead of atomic compare-and-swap" ON)
+ELSE()
+ OPTION(PTHREADPOOL_ENABLE_FASTPATH "Enable fast path using atomic decrement instead of atomic compare-and-swap" OFF)
+ENDIF()
IF("${CMAKE_SOURCE_DIR}" STREQUAL "${PROJECT_SOURCE_DIR}")
OPTION(PTHREADPOOL_BUILD_TESTS "Build pthreadpool unit tests" ON)
OPTION(PTHREADPOOL_BUILD_BENCHMARKS "Build pthreadpool micro-benchmarks" ON)
@@ -76,6 +81,9 @@ ELSE()
ELSE()
LIST(APPEND PTHREADPOOL_SRCS src/pthreads.c)
ENDIF()
+ IF(PTHREADPOOL_ENABLE_FASTPATH)
+ LIST(APPEND PTHREADPOOL_SRCS src/fastpath.c)
+ ENDIF()
ENDIF()
ADD_LIBRARY(pthreadpool_interface INTERFACE)
@@ -114,6 +122,11 @@ ELSEIF(PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "event")
ELSEIF(NOT PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "default")
MESSAGE(FATAL_ERROR "Unsupported synchronization primitive ${PTHREADPOOL_SYNC_PRIMITIVE}")
ENDIF()
+IF(PTHREADPOOL_ENABLE_FASTPATH)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_ENABLE_FASTPATH=1)
+ELSE()
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_ENABLE_FASTPATH=0)
+ENDIF()
SET_TARGET_PROPERTIES(pthreadpool PROPERTIES
C_STANDARD 11
diff --git a/METADATA b/METADATA
index f63d162..29ec4e3 100644
--- a/METADATA
+++ b/METADATA
@@ -9,11 +9,11 @@ third_party {
type: GIT
value: "https://github.com/Maratyszcza/pthreadpool"
}
- version: "9b2c0caf7d9843f25709178b0cd7030892a1ff88"
+ version: "029c88620802e1361ccf41d1970bd5b07fd6b7bb"
license_type: NOTICE
last_upgrade_date {
year: 2020
- month: 5
- day: 1
+ month: 7
+ day: 10
}
}
diff --git a/cmake/DownloadCpuinfo.cmake b/cmake/DownloadCpuinfo.cmake
index 25213a0..e6f2893 100644
--- a/cmake/DownloadCpuinfo.cmake
+++ b/cmake/DownloadCpuinfo.cmake
@@ -4,8 +4,8 @@ PROJECT(cpuinfo-download NONE)
INCLUDE(ExternalProject)
ExternalProject_Add(cpuinfo
- URL https://github.com/pytorch/cpuinfo/archive/0cc563acb9baac39f2c1349bc42098c4a1da59e3.tar.gz
- URL_HASH SHA256=80625d0b69a3d69b70c2236f30db2c542d0922ccf9bb51a61bc39c49fac91a35
+ URL https://github.com/pytorch/cpuinfo/archive/19b9316c71e4e45b170a664bf62ddefd7ac9feb5.zip
+ URL_HASH SHA256=e0a485c072de957668eb324c49d726dc0fd736cfb9436b334325f20d93085003
SOURCE_DIR "${CMAKE_BINARY_DIR}/cpuinfo-source"
BINARY_DIR "${CMAKE_BINARY_DIR}/cpuinfo"
CONFIGURE_COMMAND ""
diff --git a/include/pthreadpool.h b/include/pthreadpool.h
index de4016b..6a7d61f 100644
--- a/include/pthreadpool.h
+++ b/include/pthreadpool.h
@@ -11,8 +11,14 @@ typedef void (*pthreadpool_task_1d_tile_1d_t)(void*, size_t, size_t);
typedef void (*pthreadpool_task_2d_t)(void*, size_t, size_t);
typedef void (*pthreadpool_task_2d_tile_1d_t)(void*, size_t, size_t, size_t);
typedef void (*pthreadpool_task_2d_tile_2d_t)(void*, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_3d_t)(void*, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_3d_tile_1d_t)(void*, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_3d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_4d_t)(void*, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_4d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_4d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_5d_t)(void*, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_5d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_5d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_6d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
@@ -360,6 +366,86 @@ void pthreadpool_parallelize_2d_tile_2d_with_uarch(
uint32_t flags);
/**
+ * Process items on a 3D grid.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * function(context, i, j, k);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 3D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 3D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 3D grid.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_3d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ uint32_t flags);
+
+/**
+ * Process items on a 3D grid with the specified maximum tile size along the
+ * last grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k += tile_k)
+ * function(context, i, j, k, min(range_k - k, tile_k));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 3D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 3D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 3D grid.
+ * @param tile_k the maximum number of items along the third dimension of
+ * the 3D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_3d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_1d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_k,
+ uint32_t flags);
+
+/**
* Process items on a 3D grid with the specified maximum tile size along the
* last two grid dimensions.
*
@@ -468,6 +554,94 @@ void pthreadpool_parallelize_3d_tile_2d_with_uarch(
uint32_t flags);
/**
+ * Process items on a 4D grid.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l++)
+ * function(context, i, j, k, l);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 4D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 4D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 4D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 4D grid.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_4d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ uint32_t flags);
+
+/**
+ * Process items on a 4D grid with the specified maximum tile size along the
+ * last grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l += tile_l)
+ * function(context, i, j, k, l, min(range_l - l, tile_l));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 4D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 4D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 4D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 4D grid.
+ * @param tile_l the maximum number of items along the fourth dimension of
+ * the 4D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_4d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_1d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_l,
+ uint32_t flags);
+
+/**
* Process items on a 4D grid with the specified maximum tile size along the
* last two grid dimensions.
*
@@ -584,6 +758,102 @@ void pthreadpool_parallelize_4d_tile_2d_with_uarch(
uint32_t flags);
/**
+ * Process items on a 5D grid.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l++)
+ * for (size_t m = 0; m < range_m; m++)
+ * function(context, i, j, k, l, m);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 5D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 5D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 5D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 5D grid.
+ * @param range_m the number of items to process along the fifth dimension
+ * of the 5D grid.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_5d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ uint32_t flags);
+
+/**
+ * Process items on a 5D grid with the specified maximum tile size along the
+ * last grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l++)
+ * for (size_t m = 0; m < range_m; m += tile_m)
+ * function(context, i, j, k, l, m, min(range_m - m, tile_m));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 5D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 5D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 5D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 5D grid.
+ * @param range_m the number of items to process along the fifth dimension
+ * of the 5D grid.
+ * @param tile_m the maximum number of items along the fifth dimension of
+ * the 5D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_5d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_tile_1d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t tile_m,
+ uint32_t flags);
+
+/**
* Process items on a 5D grid with the specified maximum tile size along the
* last two grid dimensions.
*
diff --git a/src/fastpath.c b/src/fastpath.c
new file mode 100644
index 0000000..6abbebe
--- /dev/null
+++ b/src/fastpath.c
@@ -0,0 +1,1170 @@
+/* Standard C headers */
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if PTHREADPOOL_USE_CPUINFO
+ #include <cpuinfo.h>
+#endif
+
+/* Dependencies */
+#include <fxdiv.h>
+
+/* Public library header */
+#include <pthreadpool.h>
+
+/* Internal library headers */
+#include "threadpool-atomics.h"
+#include "threadpool-common.h"
+#include "threadpool-object.h"
+#include "threadpool-utils.h"
+
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_1d_t task = (pthreadpool_task_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, range_start++);
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ task(argument, index);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_1d_with_id_t task = (pthreadpool_task_1d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_1d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_1d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, uarch_index, range_start++);
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ task(argument, uarch_index, index);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_1d_tile_1d_t task = (pthreadpool_task_1d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const size_t tile = threadpool->params.parallelize_1d_tile_1d.tile;
+ size_t tile_start = range_start * tile;
+
+ const size_t range = threadpool->params.parallelize_1d_tile_1d.range;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, tile_start, min(range - tile_start, tile));
+ tile_start += tile;
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t tile_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const size_t tile_start = tile_index * tile;
+ task(argument, tile_start, min(range - tile_start, tile));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_t task = (pthreadpool_task_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(range_start, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j);
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(linear_index, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_tile_1d_t task = (pthreadpool_task_2d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_2d_tile_1d.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(range_start, tile_range_j);
+ const size_t tile_j = threadpool->params.parallelize_2d_tile_1d.tile_j;
+ size_t i = tile_index_i_j.quotient;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+
+ const size_t range_j = threadpool->params.parallelize_2d_tile_1d.range_j;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, start_j, min(range_j - start_j, tile_j));
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ i += 1;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(linear_index, tile_range_j);
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ task(argument, tile_index_i_j.quotient, start_j, min(range_j - start_j, tile_j));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_tile_2d_t task = (pthreadpool_task_2d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_2d_tile_2d.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(range_start, tile_range_j);
+ const size_t tile_i = threadpool->params.parallelize_2d_tile_2d.tile_i;
+ const size_t tile_j = threadpool->params.parallelize_2d_tile_2d.tile_j;
+ size_t start_i = tile_index_i_j.quotient * tile_i;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+
+ const size_t range_i = threadpool->params.parallelize_2d_tile_2d.range_i;
+ const size_t range_j = threadpool->params.parallelize_2d_tile_2d.range_j;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ start_i += tile_i;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(linear_index, tile_range_j);
+ const size_t start_i = tile_index_i_j.quotient * tile_i;
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ task(argument, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_tile_2d_with_id_t task = (pthreadpool_task_2d_tile_2d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_2d_tile_2d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_2d_tile_2d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_2d_tile_2d_with_uarch.tile_range_j;
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_result_size_t index = fxdiv_divide_size_t(range_start, tile_range_j);
+ const size_t range_i = threadpool->params.parallelize_2d_tile_2d_with_uarch.range_i;
+ const size_t tile_i = threadpool->params.parallelize_2d_tile_2d_with_uarch.tile_i;
+ const size_t range_j = threadpool->params.parallelize_2d_tile_2d_with_uarch.range_j;
+ const size_t tile_j = threadpool->params.parallelize_2d_tile_2d_with_uarch.tile_j;
+ size_t start_i = index.quotient * tile_i;
+ size_t start_j = index.remainder * tile_j;
+
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, uarch_index, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ start_i += tile_i;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(linear_index, tile_range_j);
+ const size_t start_i = tile_index_i_j.quotient * tile_i;
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ task(argument, uarch_index, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_t task = (pthreadpool_task_3d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_3d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(range_start, range_k);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_3d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k);
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(linear_index, range_k);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_tile_1d_t task = (pthreadpool_task_3d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_k = threadpool->params.parallelize_3d_tile_1d.tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(range_start, tile_range_k);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_3d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, range_j);
+ const size_t tile_k = threadpool->params.parallelize_3d_tile_1d.tile_k;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t start_k = tile_index_ij_k.remainder * tile_k;
+
+ const size_t range_k = threadpool->params.parallelize_3d_tile_1d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, start_k, min(range_k - start_k, tile_k));
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, range_j);
+ const size_t start_k = tile_index_ij_k.remainder * tile_k;
+ task(argument, index_i_j.quotient, index_i_j.remainder, start_k, min(range_k - start_k, tile_k));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_tile_2d_t task = (pthreadpool_task_3d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_k = threadpool->params.parallelize_3d_tile_2d.tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(range_start, tile_range_k);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_3d_tile_2d.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t tile_j = threadpool->params.parallelize_3d_tile_2d.tile_j;
+ const size_t tile_k = threadpool->params.parallelize_3d_tile_2d.tile_k;
+ size_t i = tile_index_i_j.quotient;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+ size_t start_k = tile_index_ij_k.remainder * tile_k;
+
+ const size_t range_k = threadpool->params.parallelize_3d_tile_2d.range_k;
+ const size_t range_j = threadpool->params.parallelize_3d_tile_2d.range_j;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ const size_t start_k = tile_index_ij_k.remainder * tile_k;
+ task(argument, tile_index_i_j.quotient, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_tile_2d_with_id_t task = (pthreadpool_task_3d_tile_2d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_3d_tile_2d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_3d_tile_2d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_k = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(range_start, tile_range_k);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t tile_j = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_j;
+ const size_t tile_k = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_k;
+ size_t i = tile_index_i_j.quotient;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+ size_t start_k = tile_index_ij_k.remainder * tile_k;
+
+ const size_t range_k = threadpool->params.parallelize_3d_tile_2d_with_uarch.range_k;
+ const size_t range_j = threadpool->params.parallelize_3d_tile_2d_with_uarch.range_j;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, uarch_index, i, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ const size_t start_k = tile_index_ij_k.remainder * tile_k;
+ task(argument, uarch_index, tile_index_i_j.quotient, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_t task = (pthreadpool_task_4d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_4d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(range_start, range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_4d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+
+ const size_t range_k = threadpool->params.parallelize_4d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, l);
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(linear_index, range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_tile_1d_t task = (pthreadpool_task_4d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_kl = threadpool->params.parallelize_4d_tile_1d.tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(range_start, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = threadpool->params.parallelize_4d_tile_1d.tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t tile_l = threadpool->params.parallelize_4d_tile_1d.tile_l;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = tile_index_k_l.quotient;
+ size_t start_l = tile_index_k_l.remainder * tile_l;
+
+ const size_t range_l = threadpool->params.parallelize_4d_tile_1d.range_l;
+ const size_t range_k = threadpool->params.parallelize_4d_tile_1d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, start_l, min(range_l - start_l, tile_l));
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t start_l = tile_index_k_l.remainder * tile_l;
+ task(argument, index_i_j.quotient, index_i_j.remainder, tile_index_k_l.quotient, start_l, min(range_l - start_l, tile_l));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_tile_2d_t task = (pthreadpool_task_4d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_kl = threadpool->params.parallelize_4d_tile_2d.tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(range_start, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d_tile_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = threadpool->params.parallelize_4d_tile_2d.tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t tile_k = threadpool->params.parallelize_4d_tile_2d.tile_k;
+ const size_t tile_l = threadpool->params.parallelize_4d_tile_2d.tile_l;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t start_k = tile_index_k_l.quotient * tile_k;
+ size_t start_l = tile_index_k_l.remainder * tile_l;
+
+ const size_t range_l = threadpool->params.parallelize_4d_tile_2d.range_l;
+ const size_t range_k = threadpool->params.parallelize_4d_tile_2d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t start_k = tile_index_k_l.quotient * tile_k;
+ const size_t start_l = tile_index_k_l.remainder * tile_l;
+ task(argument, index_i_j.quotient, index_i_j.remainder, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_tile_2d_with_id_t task = (pthreadpool_task_4d_tile_2d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_4d_tile_2d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_4d_tile_2d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_kl = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(range_start, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d_tile_2d_with_uarch.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t tile_k = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_k;
+ const size_t tile_l = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_l;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t start_k = tile_index_k_l.quotient * tile_k;
+ size_t start_l = tile_index_k_l.remainder * tile_l;
+
+ const size_t range_l = threadpool->params.parallelize_4d_tile_2d_with_uarch.range_l;
+ const size_t range_k = threadpool->params.parallelize_4d_tile_2d_with_uarch.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, uarch_index, i, j, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t start_k = tile_index_k_l.quotient * tile_k;
+ const size_t start_l = tile_index_k_l.remainder * tile_l;
+ task(argument, uarch_index, index_i_j.quotient, index_i_j.remainder, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_t task = (pthreadpool_task_5d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_lm = threadpool->params.parallelize_5d.range_lm;
+ const struct fxdiv_result_size_t index_ijk_lm = fxdiv_divide_size_t(range_start, range_lm);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_5d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lm.quotient, range_k);
+ const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_5d.range_m;
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_ijk_lm.remainder, range_m);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t l = index_l_m.quotient;
+ size_t m = index_l_m.remainder;
+
+ const size_t range_l = threadpool->params.parallelize_5d.range_l;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, l, m);
+ if (++m == range_m.value) {
+ m = 0;
+ if (++l == range_l) {
+ l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ijk_lm = fxdiv_divide_size_t(linear_index, range_lm);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lm.quotient, range_k);
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_ijk_lm.remainder, range_m);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_tile_1d_t task = (pthreadpool_task_5d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_m = threadpool->params.parallelize_5d_tile_1d.tile_range_m;
+ const struct fxdiv_result_size_t tile_index_ijkl_m = fxdiv_divide_size_t(range_start, tile_range_m);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_5d_tile_1d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_m.quotient, range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_5d_tile_1d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ const size_t tile_m = threadpool->params.parallelize_5d_tile_1d.tile_m;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+ size_t start_m = tile_index_ijkl_m.remainder * tile_m;
+
+ const size_t range_m = threadpool->params.parallelize_5d_tile_1d.range_m;
+ const size_t range_k = threadpool->params.parallelize_5d_tile_1d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, l, start_m, min(range_m - start_m, tile_m));
+ start_m += tile_m;
+ if (start_m >= range_m) {
+ start_m = 0;
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijkl_m = fxdiv_divide_size_t(linear_index, tile_range_m);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_m.quotient, range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ size_t start_m = tile_index_ijkl_m.remainder * tile_m;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder, start_m,
+ min(range_m - start_m, tile_m));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_tile_2d_t task = (pthreadpool_task_5d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_lm = threadpool->params.parallelize_5d_tile_2d.tile_range_lm;
+ const struct fxdiv_result_size_t tile_index_ijk_lm = fxdiv_divide_size_t(range_start, tile_range_lm);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_5d_tile_2d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lm.quotient, range_k);
+ const struct fxdiv_divisor_size_t tile_range_m = threadpool->params.parallelize_5d_tile_2d.tile_range_m;
+ const struct fxdiv_result_size_t tile_index_l_m = fxdiv_divide_size_t(tile_index_ijk_lm.remainder, tile_range_m);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d_tile_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const size_t tile_l = threadpool->params.parallelize_5d_tile_2d.tile_l;
+ const size_t tile_m = threadpool->params.parallelize_5d_tile_2d.tile_m;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t start_l = tile_index_l_m.quotient * tile_l;
+ size_t start_m = tile_index_l_m.remainder * tile_m;
+
+ const size_t range_m = threadpool->params.parallelize_5d_tile_2d.range_m;
+ const size_t range_l = threadpool->params.parallelize_5d_tile_2d.range_l;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, start_l, start_m, min(range_l - start_l, tile_l), min(range_m - start_m, tile_m));
+ start_m += tile_m;
+ if (start_m >= range_m) {
+ start_m = 0;
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijk_lm = fxdiv_divide_size_t(linear_index, tile_range_lm);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lm.quotient, range_k);
+ const struct fxdiv_result_size_t tile_index_l_m = fxdiv_divide_size_t(tile_index_ijk_lm.remainder, tile_range_m);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const size_t start_l = tile_index_l_m.quotient * tile_l;
+ const size_t start_m = tile_index_l_m.remainder * tile_m;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder,
+ start_l, start_m, min(range_l - start_l, tile_l), min(range_m - start_m, tile_m));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_6d_tile_2d_t task = (pthreadpool_task_6d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_mn = threadpool->params.parallelize_6d_tile_2d.tile_range_mn;
+ const struct fxdiv_result_size_t tile_index_ijkl_mn = fxdiv_divide_size_t(range_start, tile_range_mn);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_6d_tile_2d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_mn.quotient, range_kl);
+ const struct fxdiv_divisor_size_t tile_range_n = threadpool->params.parallelize_6d_tile_2d.tile_range_n;
+ const struct fxdiv_result_size_t tile_index_m_n = fxdiv_divide_size_t(tile_index_ijkl_mn.remainder, tile_range_n);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d_tile_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_6d_tile_2d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ const size_t tile_m = threadpool->params.parallelize_6d_tile_2d.tile_m;
+ const size_t tile_n = threadpool->params.parallelize_6d_tile_2d.tile_n;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+ size_t start_m = tile_index_m_n.quotient * tile_m;
+ size_t start_n = tile_index_m_n.remainder * tile_n;
+
+ const size_t range_n = threadpool->params.parallelize_6d_tile_2d.range_n;
+ const size_t range_m = threadpool->params.parallelize_6d_tile_2d.range_m;
+ const size_t range_k = threadpool->params.parallelize_6d_tile_2d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, l, start_m, start_n, min(range_m - start_m, tile_m), min(range_n - start_n, tile_n));
+ start_n += tile_n;
+ if (start_n >= range_n) {
+ start_n = 0;
+ start_m += tile_m;
+ if (start_m >= range_m) {
+ start_m = 0;
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijkl_mn = fxdiv_divide_size_t(linear_index, tile_range_mn);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_mn.quotient, range_kl);
+ const struct fxdiv_result_size_t tile_index_m_n = fxdiv_divide_size_t(tile_index_ijkl_mn.remainder, tile_range_n);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ const size_t start_m = tile_index_m_n.quotient * tile_m;
+ const size_t start_n = tile_index_m_n.remainder * tile_n;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder,
+ start_m, start_n, min(range_m - start_m, tile_m), min(range_n - start_n, tile_n));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
diff --git a/src/portable-api.c b/src/portable-api.c
index 84d6eda..ef36578 100644
--- a/src/portable-api.c
+++ b/src/portable-api.c
@@ -35,6 +35,7 @@ static void thread_parallelize_1d(struct pthreadpool* threadpool, struct thread_
const pthreadpool_task_1d_t task = (pthreadpool_task_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
/* Process thread's own range of items */
size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
@@ -69,7 +70,7 @@ static void thread_parallelize_1d_with_uarch(struct pthreadpool* threadpool, str
const uint32_t default_uarch_index = threadpool->params.parallelize_1d_with_uarch.default_uarch_index;
uint32_t uarch_index = default_uarch_index;
#if PTHREADPOOL_USE_CPUINFO
- uarch_index = cpuinfo_get_current_uarch_index();
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
if (uarch_index > threadpool->params.parallelize_1d_with_uarch.max_uarch_index) {
uarch_index = default_uarch_index;
}
@@ -280,7 +281,7 @@ static void thread_parallelize_2d_tile_2d_with_uarch(struct pthreadpool* threadp
const uint32_t default_uarch_index = threadpool->params.parallelize_2d_tile_2d_with_uarch.default_uarch_index;
uint32_t uarch_index = default_uarch_index;
#if PTHREADPOOL_USE_CPUINFO
- uarch_index = cpuinfo_get_current_uarch_index();
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
if (uarch_index > threadpool->params.parallelize_2d_tile_2d_with_uarch.max_uarch_index) {
uarch_index = default_uarch_index;
}
@@ -327,6 +328,106 @@ static void thread_parallelize_2d_tile_2d_with_uarch(struct pthreadpool* threadp
pthreadpool_fence_release();
}
+static void thread_parallelize_3d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_t task = (pthreadpool_task_3d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_3d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(range_start, range_k);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_3d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k);
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(linear_index, range_k);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_3d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_tile_1d_t task = (pthreadpool_task_3d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_k = threadpool->params.parallelize_3d_tile_1d.tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(range_start, tile_range_k);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_3d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, range_j);
+ const size_t tile_k = threadpool->params.parallelize_3d_tile_1d.tile_k;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t start_k = tile_index_ij_k.remainder * tile_k;
+
+ const size_t range_k = threadpool->params.parallelize_3d_tile_1d.range_k;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, start_k, min(range_k - start_k, tile_k));
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, range_j);
+ const size_t start_k = tile_index_ij_k.remainder * tile_k;
+ task(argument, index_i_j.quotient, index_i_j.remainder, start_k, min(range_k - start_k, tile_k));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
static void thread_parallelize_3d_tile_2d(struct pthreadpool* threadpool, struct thread_info* thread) {
assert(threadpool != NULL);
assert(thread != NULL);
@@ -393,7 +494,7 @@ static void thread_parallelize_3d_tile_2d_with_uarch(struct pthreadpool* threadp
const uint32_t default_uarch_index = threadpool->params.parallelize_3d_tile_2d_with_uarch.default_uarch_index;
uint32_t uarch_index = default_uarch_index;
#if PTHREADPOOL_USE_CPUINFO
- uarch_index = cpuinfo_get_current_uarch_index();
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
if (uarch_index > threadpool->params.parallelize_3d_tile_2d_with_uarch.max_uarch_index) {
uarch_index = default_uarch_index;
}
@@ -448,6 +549,122 @@ static void thread_parallelize_3d_tile_2d_with_uarch(struct pthreadpool* threadp
pthreadpool_fence_release();
}
+static void thread_parallelize_4d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_t task = (pthreadpool_task_4d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_4d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(range_start, range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_4d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+
+ const size_t range_k = threadpool->params.parallelize_4d.range_k;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, l);
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(linear_index, range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_4d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_tile_1d_t task = (pthreadpool_task_4d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_kl = threadpool->params.parallelize_4d_tile_1d.tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(range_start, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = threadpool->params.parallelize_4d_tile_1d.tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t tile_l = threadpool->params.parallelize_4d_tile_1d.tile_l;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = tile_index_k_l.quotient;
+ size_t start_l = tile_index_k_l.remainder * tile_l;
+
+ const size_t range_k = threadpool->params.parallelize_4d_tile_1d.range_k;
+ const size_t range_l = threadpool->params.parallelize_4d_tile_1d.range_l;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, start_l, min(range_l - start_l, tile_l));
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t start_l = tile_index_k_l.remainder * tile_l;
+ task(argument, index_i_j.quotient, index_i_j.remainder, tile_index_k_l.quotient, start_l, min(range_l - start_l, tile_l));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
static void thread_parallelize_4d_tile_2d(struct pthreadpool* threadpool, struct thread_info* thread) {
assert(threadpool != NULL);
assert(thread != NULL);
@@ -521,7 +738,7 @@ static void thread_parallelize_4d_tile_2d_with_uarch(struct pthreadpool* threadp
const uint32_t default_uarch_index = threadpool->params.parallelize_4d_tile_2d_with_uarch.default_uarch_index;
uint32_t uarch_index = default_uarch_index;
#if PTHREADPOOL_USE_CPUINFO
- uarch_index = cpuinfo_get_current_uarch_index();
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
if (uarch_index > threadpool->params.parallelize_4d_tile_2d_with_uarch.max_uarch_index) {
uarch_index = default_uarch_index;
}
@@ -583,6 +800,137 @@ static void thread_parallelize_4d_tile_2d_with_uarch(struct pthreadpool* threadp
pthreadpool_fence_release();
}
+static void thread_parallelize_5d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_t task = (pthreadpool_task_5d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_lm = threadpool->params.parallelize_5d.range_lm;
+ const struct fxdiv_result_size_t index_ijk_lm = fxdiv_divide_size_t(range_start, range_lm);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_5d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lm.quotient, range_k);
+ const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_5d.range_m;
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_ijk_lm.remainder, range_m);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t l = index_l_m.quotient;
+ size_t m = index_l_m.remainder;
+
+ const size_t range_l = threadpool->params.parallelize_5d.range_l;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, l, m);
+ if (++m == range_m.value) {
+ m = 0;
+ if (++l == range_l) {
+ l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ijk_lm = fxdiv_divide_size_t(linear_index, range_lm);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lm.quotient, range_k);
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_ijk_lm.remainder, range_m);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_5d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_tile_1d_t task = (pthreadpool_task_5d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_m = threadpool->params.parallelize_5d_tile_1d.tile_range_m;
+ const struct fxdiv_result_size_t tile_index_ijkl_m = fxdiv_divide_size_t(range_start, tile_range_m);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_5d_tile_1d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_m.quotient, range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_5d_tile_1d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ const size_t tile_m = threadpool->params.parallelize_5d_tile_1d.tile_m;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+ size_t start_m = tile_index_ijkl_m.remainder * tile_m;
+
+ const size_t range_m = threadpool->params.parallelize_5d_tile_1d.range_m;
+ const size_t range_k = threadpool->params.parallelize_5d_tile_1d.range_k;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, l, start_m, min(range_m - start_m, tile_m));
+ start_m += tile_m;
+ if (start_m >= range_m) {
+ start_m = 0;
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijkl_m = fxdiv_divide_size_t(linear_index, tile_range_m);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_m.quotient, range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ size_t start_m = tile_index_ijkl_m.remainder * tile_m;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder, start_m,
+ min(range_m - start_m, tile_m));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
static void thread_parallelize_5d_tile_2d(struct pthreadpool* threadpool, struct thread_info* thread) {
assert(threadpool != NULL);
assert(thread != NULL);
@@ -740,7 +1088,8 @@ void pthreadpool_parallelize_1d(
size_t range,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || range <= 1) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || range <= 1) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -754,8 +1103,15 @@ void pthreadpool_parallelize_1d(
set_fpu_state(saved_fpu_state);
}
} else {
+ thread_function_t parallelize_1d = &thread_parallelize_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_1d = &pthreadpool_thread_parallelize_1d_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_1d, NULL, 0,
+ threadpool, parallelize_1d, NULL, 0,
(void*) task, argument, range, flags);
}
}
@@ -769,12 +1125,13 @@ void pthreadpool_parallelize_1d_with_uarch(
size_t range,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || range <= 1) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || range <= 1) {
/* No thread pool used: execute task sequentially on the calling thread */
uint32_t uarch_index = default_uarch_index;
#if PTHREADPOOL_USE_CPUINFO
- uarch_index = cpuinfo_get_current_uarch_index();
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
if (uarch_index > max_uarch_index) {
uarch_index = default_uarch_index;
}
@@ -796,8 +1153,15 @@ void pthreadpool_parallelize_1d_with_uarch(
.default_uarch_index = default_uarch_index,
.max_uarch_index = max_uarch_index,
};
+ thread_function_t parallelize_1d_with_uarch = &thread_parallelize_1d_with_uarch;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_1d_with_uarch = &pthreadpool_thread_parallelize_1d_with_uarch_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_1d_with_uarch, &params, sizeof(params),
+ threadpool, parallelize_1d_with_uarch, &params, sizeof(params),
task, argument, range, flags);
}
}
@@ -810,7 +1174,8 @@ void pthreadpool_parallelize_1d_tile_1d(
size_t tile,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || range <= tile) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || range <= tile) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -824,13 +1189,21 @@ void pthreadpool_parallelize_1d_tile_1d(
set_fpu_state(saved_fpu_state);
}
} else {
+ const size_t tile_range = divide_round_up(range, tile);
const struct pthreadpool_1d_tile_1d_params params = {
.range = range,
.tile = tile,
};
+ thread_function_t parallelize_1d_tile_1d = &thread_parallelize_1d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_1d_tile_1d = &pthreadpool_thread_parallelize_1d_tile_1d_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_1d_tile_1d, &params, sizeof(params),
- task, argument, divide_round_up(range, tile), flags);
+ threadpool, parallelize_1d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
}
}
@@ -842,7 +1215,8 @@ void pthreadpool_parallelize_2d(
size_t range_j,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || (range_i | range_j) <= 1) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j) <= 1) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -858,12 +1232,20 @@ void pthreadpool_parallelize_2d(
set_fpu_state(saved_fpu_state);
}
} else {
+ const size_t range = range_i * range_j;
const struct pthreadpool_2d_params params = {
.range_j = fxdiv_init_size_t(range_j),
};
+ thread_function_t parallelize_2d = &thread_parallelize_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_2d = &pthreadpool_thread_parallelize_2d_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_2d, &params, sizeof(params),
- task, argument, range_i * range_j, flags);
+ threadpool, parallelize_2d, &params, sizeof(params),
+ task, argument, range, flags);
}
}
@@ -876,7 +1258,8 @@ void pthreadpool_parallelize_2d_tile_1d(
size_t tile_j,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || (range_i <= 1 && range_j <= tile_j)) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i <= 1 && range_j <= tile_j)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -893,14 +1276,22 @@ void pthreadpool_parallelize_2d_tile_1d(
}
} else {
const size_t tile_range_j = divide_round_up(range_j, tile_j);
+ const size_t tile_range = range_i * tile_range_j;
const struct pthreadpool_2d_tile_1d_params params = {
.range_j = range_j,
.tile_j = tile_j,
.tile_range_j = fxdiv_init_size_t(tile_range_j),
};
+ thread_function_t parallelize_2d_tile_1d = &thread_parallelize_2d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_2d_tile_1d = &pthreadpool_thread_parallelize_2d_tile_1d_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_2d_tile_1d, &params, sizeof(params),
- task, argument, range_i * tile_range_j, flags);
+ threadpool, parallelize_2d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
}
}
@@ -914,7 +1305,8 @@ void pthreadpool_parallelize_2d_tile_2d(
size_t tile_j,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || (range_i <= tile_i && range_j <= tile_j)) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i <= tile_i && range_j <= tile_j)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -932,6 +1324,7 @@ void pthreadpool_parallelize_2d_tile_2d(
} else {
const size_t tile_range_i = divide_round_up(range_i, tile_i);
const size_t tile_range_j = divide_round_up(range_j, tile_j);
+ const size_t tile_range = tile_range_i * tile_range_j;
const struct pthreadpool_2d_tile_2d_params params = {
.range_i = range_i,
.tile_i = tile_i,
@@ -939,9 +1332,16 @@ void pthreadpool_parallelize_2d_tile_2d(
.tile_j = tile_j,
.tile_range_j = fxdiv_init_size_t(tile_range_j),
};
+ thread_function_t parallelize_2d_tile_2d = &thread_parallelize_2d_tile_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_2d_tile_2d = &pthreadpool_thread_parallelize_2d_tile_2d_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_2d_tile_2d, &params, sizeof(params),
- task, argument, tile_range_i * tile_range_j, flags);
+ threadpool, parallelize_2d_tile_2d, &params, sizeof(params),
+ task, argument, tile_range, flags);
}
}
@@ -957,12 +1357,13 @@ void pthreadpool_parallelize_2d_tile_2d_with_uarch(
size_t tile_j,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || (range_i <= tile_i && range_j <= tile_j)) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i <= tile_i && range_j <= tile_j)) {
/* No thread pool used: execute task sequentially on the calling thread */
uint32_t uarch_index = default_uarch_index;
#if PTHREADPOOL_USE_CPUINFO
- uarch_index = cpuinfo_get_current_uarch_index();
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
if (uarch_index > max_uarch_index) {
uarch_index = default_uarch_index;
}
@@ -984,6 +1385,7 @@ void pthreadpool_parallelize_2d_tile_2d_with_uarch(
} else {
const size_t tile_range_i = divide_round_up(range_i, tile_i);
const size_t tile_range_j = divide_round_up(range_j, tile_j);
+ const size_t tile_range = tile_range_i * tile_range_j;
const struct pthreadpool_2d_tile_2d_with_uarch_params params = {
.default_uarch_index = default_uarch_index,
.max_uarch_index = max_uarch_index,
@@ -993,9 +1395,112 @@ void pthreadpool_parallelize_2d_tile_2d_with_uarch(
.tile_j = tile_j,
.tile_range_j = fxdiv_init_size_t(tile_range_j),
};
+ thread_function_t parallelize_2d_tile_2d_with_uarch = &thread_parallelize_2d_tile_2d_with_uarch;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_2d_tile_2d_with_uarch = &pthreadpool_thread_parallelize_2d_tile_2d_with_uarch_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_2d_tile_2d_with_uarch, &params, sizeof(params),
- task, argument, tile_range_i * tile_range_j, flags);
+ threadpool, parallelize_2d_tile_2d_with_uarch, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_3d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j | range_k) <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ task(argument, i, j, k);
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t range = range_i * range_j * range_k;
+ const struct pthreadpool_3d_params params = {
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_k = fxdiv_init_size_t(range_k),
+ };
+ thread_function_t parallelize_3d = &thread_parallelize_3d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_3d = &pthreadpool_thread_parallelize_3d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_3d, &params, sizeof(params),
+ task, argument, range, flags);
+ }
+}
+
+void pthreadpool_parallelize_3d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_k,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ task(argument, i, j, k, min(range_k - k, tile_k));
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_k = divide_round_up(range_k, tile_k);
+ const size_t tile_range = range_i * range_j * tile_range_k;
+ const struct pthreadpool_3d_tile_1d_params params = {
+ .range_k = range_k,
+ .tile_k = tile_k,
+ .range_j = fxdiv_init_size_t(range_j),
+ .tile_range_k = fxdiv_init_size_t(tile_range_k),
+ };
+ thread_function_t parallelize_3d_tile_1d = &thread_parallelize_3d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_3d_tile_1d = &pthreadpool_thread_parallelize_3d_tile_1d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_3d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
}
}
@@ -1010,7 +1515,8 @@ void pthreadpool_parallelize_3d_tile_2d(
size_t tile_k,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || (range_i <= 1 && range_j <= tile_j && range_k <= tile_k)) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i <= 1 && range_j <= tile_j && range_k <= tile_k)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -1030,6 +1536,7 @@ void pthreadpool_parallelize_3d_tile_2d(
} else {
const size_t tile_range_j = divide_round_up(range_j, tile_j);
const size_t tile_range_k = divide_round_up(range_k, tile_k);
+ const size_t tile_range = range_i * tile_range_j * tile_range_k;
const struct pthreadpool_3d_tile_2d_params params = {
.range_j = range_j,
.tile_j = tile_j,
@@ -1038,9 +1545,16 @@ void pthreadpool_parallelize_3d_tile_2d(
.tile_range_j = fxdiv_init_size_t(tile_range_j),
.tile_range_k = fxdiv_init_size_t(tile_range_k),
};
+ thread_function_t parallelize_3d_tile_2d = &thread_parallelize_3d_tile_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_3d_tile_2d = &pthreadpool_thread_parallelize_3d_tile_2d_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_3d_tile_2d, &params, sizeof(params),
- task, argument, range_i * tile_range_j * tile_range_k, flags);
+ threadpool, parallelize_3d_tile_2d, &params, sizeof(params),
+ task, argument, tile_range, flags);
}
}
@@ -1057,12 +1571,13 @@ void pthreadpool_parallelize_3d_tile_2d_with_uarch(
size_t tile_k,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || (range_i <= 1 && range_j <= tile_j && range_k <= tile_k)) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i <= 1 && range_j <= tile_j && range_k <= tile_k)) {
/* No thread pool used: execute task sequentially on the calling thread */
uint32_t uarch_index = default_uarch_index;
#if PTHREADPOOL_USE_CPUINFO
- uarch_index = cpuinfo_get_current_uarch_index();
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
if (uarch_index > max_uarch_index) {
uarch_index = default_uarch_index;
}
@@ -1086,6 +1601,7 @@ void pthreadpool_parallelize_3d_tile_2d_with_uarch(
} else {
const size_t tile_range_j = divide_round_up(range_j, tile_j);
const size_t tile_range_k = divide_round_up(range_k, tile_k);
+ const size_t tile_range = range_i * tile_range_j * tile_range_k;
const struct pthreadpool_3d_tile_2d_with_uarch_params params = {
.default_uarch_index = default_uarch_index,
.max_uarch_index = max_uarch_index,
@@ -1096,9 +1612,124 @@ void pthreadpool_parallelize_3d_tile_2d_with_uarch(
.tile_range_j = fxdiv_init_size_t(tile_range_j),
.tile_range_k = fxdiv_init_size_t(tile_range_k),
};
+ thread_function_t parallelize_3d_tile_2d_with_uarch = &thread_parallelize_3d_tile_2d_with_uarch;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_3d_tile_2d_with_uarch = &pthreadpool_thread_parallelize_3d_tile_2d_with_uarch_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_3d_tile_2d_with_uarch, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_4d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j | range_k | range_l) <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ task(argument, i, j, k, l);
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t range_kl = range_k * range_l;
+ const size_t range = range_i * range_j * range_kl;
+ const struct pthreadpool_4d_params params = {
+ .range_k = range_k,
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_kl = fxdiv_init_size_t(range_kl),
+ .range_l = fxdiv_init_size_t(range_l),
+ };
+ thread_function_t parallelize_4d = &thread_parallelize_4d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_4d = &pthreadpool_thread_parallelize_4d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_4d, &params, sizeof(params),
+ task, argument, range, flags);
+ }
+}
+
+void pthreadpool_parallelize_4d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_l,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k) <= 1 && range_l <= tile_l)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ task(argument, i, j, k, l, min(range_l - l, tile_l));
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_l = divide_round_up(range_l, tile_l);
+ const size_t tile_range_kl = range_k * tile_range_l;
+ const size_t tile_range = range_i * range_j * tile_range_kl;
+ const struct pthreadpool_4d_tile_1d_params params = {
+ .range_k = range_k,
+ .range_l = range_l,
+ .tile_l = tile_l,
+ .range_j = fxdiv_init_size_t(range_j),
+ .tile_range_kl = fxdiv_init_size_t(tile_range_kl),
+ .tile_range_l = fxdiv_init_size_t(tile_range_l),
+ };
+ thread_function_t parallelize_4d_tile_1d = &thread_parallelize_4d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_4d_tile_1d = &pthreadpool_thread_parallelize_4d_tile_1d_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_3d_tile_2d_with_uarch, &params, sizeof(params),
- task, argument, range_i * tile_range_j * tile_range_k, flags);
+ threadpool, parallelize_4d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
}
}
@@ -1114,7 +1745,8 @@ void pthreadpool_parallelize_4d_tile_2d(
size_t tile_l,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k && range_l <= tile_l)) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k && range_l <= tile_l)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -1137,6 +1769,7 @@ void pthreadpool_parallelize_4d_tile_2d(
} else {
const size_t tile_range_l = divide_round_up(range_l, tile_l);
const size_t tile_range_kl = divide_round_up(range_k, tile_k) * tile_range_l;
+ const size_t tile_range = range_i * range_j * tile_range_kl;
const struct pthreadpool_4d_tile_2d_params params = {
.range_k = range_k,
.tile_k = tile_k,
@@ -1146,9 +1779,16 @@ void pthreadpool_parallelize_4d_tile_2d(
.tile_range_kl = fxdiv_init_size_t(tile_range_kl),
.tile_range_l = fxdiv_init_size_t(tile_range_l),
};
+ thread_function_t parallelize_4d_tile_2d = &thread_parallelize_4d_tile_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_4d_tile_2d = &pthreadpool_thread_parallelize_4d_tile_2d_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_4d_tile_2d, &params, sizeof(params),
- task, argument, range_i * range_j * tile_range_kl, flags);
+ threadpool, parallelize_4d_tile_2d, &params, sizeof(params),
+ task, argument, tile_range, flags);
}
}
@@ -1166,12 +1806,13 @@ void pthreadpool_parallelize_4d_tile_2d_with_uarch(
size_t tile_l,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k && range_l <= tile_l)) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k && range_l <= tile_l)) {
/* No thread pool used: execute task sequentially on the calling thread */
uint32_t uarch_index = default_uarch_index;
#if PTHREADPOOL_USE_CPUINFO
- uarch_index = cpuinfo_get_current_uarch_index();
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
if (uarch_index > max_uarch_index) {
uarch_index = default_uarch_index;
}
@@ -1198,6 +1839,7 @@ void pthreadpool_parallelize_4d_tile_2d_with_uarch(
} else {
const size_t tile_range_l = divide_round_up(range_l, tile_l);
const size_t tile_range_kl = divide_round_up(range_k, tile_k) * tile_range_l;
+ const size_t tile_range = range_i * range_j * tile_range_kl;
const struct pthreadpool_4d_tile_2d_with_uarch_params params = {
.default_uarch_index = default_uarch_index,
.max_uarch_index = max_uarch_index,
@@ -1209,9 +1851,132 @@ void pthreadpool_parallelize_4d_tile_2d_with_uarch(
.tile_range_kl = fxdiv_init_size_t(tile_range_kl),
.tile_range_l = fxdiv_init_size_t(tile_range_l),
};
+ thread_function_t parallelize_4d_tile_2d_with_uarch = &thread_parallelize_4d_tile_2d_with_uarch;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_4d_tile_2d_with_uarch = &pthreadpool_thread_parallelize_4d_tile_2d_with_uarch_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_4d_tile_2d_with_uarch, &params, sizeof(params),
- task, argument, range_i * range_j * tile_range_kl, flags);
+ threadpool, parallelize_4d_tile_2d_with_uarch, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_5d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j | range_k | range_l | range_m) <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m++) {
+ task(argument, i, j, k, l, m);
+ }
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t range_lm = range_l * range_m;
+ const size_t range = range_i * range_j * range_k * range_lm;
+ const struct pthreadpool_5d_params params = {
+ .range_l = range_l,
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_k = fxdiv_init_size_t(range_k),
+ .range_lm = fxdiv_init_size_t(range_lm),
+ .range_m = fxdiv_init_size_t(range_m),
+ };
+ thread_function_t parallelize_5d = &thread_parallelize_5d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_5d = &pthreadpool_thread_parallelize_5d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_5d, &params, sizeof(params),
+ task, argument, range, flags);
+ }
+}
+
+void pthreadpool_parallelize_5d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t tile_m,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k | range_l) <= 1 && range_m <= tile_m)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m += tile_m) {
+ task(argument, i, j, k, l, m, min(range_m - m, tile_m));
+ }
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_m = divide_round_up(range_m, tile_m);
+ const size_t range_kl = range_k * range_l;
+ const size_t tile_range = range_i * range_j * range_kl * tile_range_m;
+ const struct pthreadpool_5d_tile_1d_params params = {
+ .range_k = range_k,
+ .range_m = range_m,
+ .tile_m = tile_m,
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_kl = fxdiv_init_size_t(range_kl),
+ .range_l = fxdiv_init_size_t(range_l),
+ .tile_range_m = fxdiv_init_size_t(tile_range_m),
+ };
+ thread_function_t parallelize_5d_tile_1d = &thread_parallelize_5d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_5d_tile_1d = &pthreadpool_thread_parallelize_5d_tile_1d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_5d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
}
}
@@ -1228,7 +1993,8 @@ void pthreadpool_parallelize_5d_tile_2d(
size_t tile_m,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || ((range_i | range_j | range_k) <= 1 && range_l <= tile_l && range_m <= tile_m)) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k) <= 1 && range_l <= tile_l && range_m <= tile_m)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -1253,6 +2019,7 @@ void pthreadpool_parallelize_5d_tile_2d(
} else {
const size_t tile_range_m = divide_round_up(range_m, tile_m);
const size_t tile_range_lm = divide_round_up(range_l, tile_l) * tile_range_m;
+ const size_t tile_range = range_i * range_j * range_k * tile_range_lm;
const struct pthreadpool_5d_tile_2d_params params = {
.range_l = range_l,
.tile_l = tile_l,
@@ -1263,9 +2030,16 @@ void pthreadpool_parallelize_5d_tile_2d(
.tile_range_lm = fxdiv_init_size_t(tile_range_lm),
.tile_range_m = fxdiv_init_size_t(tile_range_m),
};
+ thread_function_t parallelize_5d_tile_2d = &thread_parallelize_5d_tile_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_5d_tile_2d = &pthreadpool_thread_parallelize_5d_tile_2d_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_5d_tile_2d, &params, sizeof(params),
- task, argument, range_i * range_j * range_k * tile_range_lm, flags);
+ threadpool, parallelize_5d_tile_2d, &params, sizeof(params),
+ task, argument, tile_range, flags);
}
}
@@ -1283,7 +2057,8 @@ void pthreadpool_parallelize_6d_tile_2d(
size_t tile_n,
uint32_t flags)
{
- if (threadpool == NULL || threadpool->threads_count.value <= 1 || ((range_i | range_j | range_k | range_l) <= 1 && range_m <= tile_m && range_n <= tile_n)) {
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k | range_l) <= 1 && range_m <= tile_m && range_n <= tile_n)) {
/* No thread pool used: execute task sequentially on the calling thread */
struct fpu_state saved_fpu_state = { 0 };
if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
@@ -1311,6 +2086,7 @@ void pthreadpool_parallelize_6d_tile_2d(
const size_t range_kl = range_k * range_l;
const size_t tile_range_n = divide_round_up(range_n, tile_n);
const size_t tile_range_mn = divide_round_up(range_m, tile_m) * tile_range_n;
+ const size_t tile_range = range_i * range_j * range_kl * tile_range_mn;
const struct pthreadpool_6d_tile_2d_params params = {
.range_k = range_k,
.range_m = range_m,
@@ -1323,8 +2099,15 @@ void pthreadpool_parallelize_6d_tile_2d(
.tile_range_mn = fxdiv_init_size_t(tile_range_mn),
.tile_range_n = fxdiv_init_size_t(tile_range_n),
};
+ thread_function_t parallelize_6d_tile_2d = &thread_parallelize_6d_tile_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_6d_tile_2d = &pthreadpool_thread_parallelize_6d_tile_2d_fastpath;
+ }
+ #endif
pthreadpool_parallelize(
- threadpool, &thread_parallelize_6d_tile_2d, &params, sizeof(params),
- task, argument, range_i * range_j * range_kl * tile_range_mn, flags);
+ threadpool, parallelize_6d_tile_2d, &params, sizeof(params),
+ task, argument, tile_range, flags);
}
}
diff --git a/src/pthreads.c b/src/pthreads.c
index 2d945a0..430ca79 100644
--- a/src/pthreads.c
+++ b/src/pthreads.c
@@ -108,8 +108,7 @@ static void wait_worker_threads(struct pthreadpool* threadpool) {
/* Spin-wait */
for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
- /* This fence serves as a sleep instruction */
- pthreadpool_fence_acquire();
+ pthreadpool_yield();
#if PTHREADPOOL_USE_FUTEX
has_active_threads = pthreadpool_load_acquire_uint32_t(&threadpool->has_active_threads);
@@ -151,8 +150,7 @@ static uint32_t wait_for_new_command(
if ((last_flags & PTHREADPOOL_FLAG_YIELD_WORKERS) == 0) {
/* Spin-wait loop */
for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
- /* This fence serves as a sleep instruction */
- pthreadpool_fence_acquire();
+ pthreadpool_yield();
command = pthreadpool_load_acquire_uint32_t(&threadpool->command);
if (command != last_command) {
diff --git a/src/shim.c b/src/shim.c
index 7bf378c..e90ac45 100644
--- a/src/shim.c
+++ b/src/shim.c
@@ -133,6 +133,43 @@ void pthreadpool_parallelize_2d_tile_2d_with_uarch(
}
}
+void pthreadpool_parallelize_3d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ task(argument, i, j, k);
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_3d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_k,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ task(argument, i, j, k, min(range_k - k, tile_k));
+ }
+ }
+ }
+}
+
void pthreadpool_parallelize_3d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_3d_tile_2d_t task,
@@ -177,6 +214,49 @@ void pthreadpool_parallelize_3d_tile_2d_with_uarch(
}
}
+void pthreadpool_parallelize_4d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ task(argument, i, j, k, l);
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_4d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_l,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ task(argument, i, j, k, l, min(range_l - l, tile_l));
+ }
+ }
+ }
+ }
+}
+
void pthreadpool_parallelize_4d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_4d_tile_2d_t task,
@@ -227,6 +307,55 @@ void pthreadpool_parallelize_4d_tile_2d_with_uarch(
}
}
+void pthreadpool_parallelize_5d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m++) {
+ task(argument, i, j, k, l, m);
+ }
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_5d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t tile_m,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m += tile_m) {
+ task(argument, i, j, k, l, m, min(range_m - m, tile_m));
+ }
+ }
+ }
+ }
+ }
+}
+
void pthreadpool_parallelize_5d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_5d_tile_2d_t task,
diff --git a/src/threadpool-atomics.h b/src/threadpool-atomics.h
index 474d12b..f0ddd89 100644
--- a/src/threadpool-atomics.h
+++ b/src/threadpool-atomics.h
@@ -4,16 +4,23 @@
#include <stddef.h>
#include <stdint.h>
+/* SSE-specific headers */
+#if defined(__i386__) || defined(__i686__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+ #include <xmmintrin.h>
+#endif
+
+/* ARM-specific headers */
+#if defined(__ARM_ACLE)
+ #include <arm_acle.h>
+#endif
+
/* MSVC-specific headers */
#ifdef _MSC_VER
#include <intrin.h>
- #if defined(_M_IX86) || defined(_M_X64) || defined(_M_AMD64)
- #include <immintrin.h>
- #endif
#endif
-#if defined(__wasm__) && defined(__EMSCRIPTEN_PTHREADS__) && defined(__clang__)
+#if defined(__wasm__) && defined(__clang__)
/*
* Clang for WebAssembly target lacks stdatomic.h header,
* even though it supports the necessary low-level intrinsics.
@@ -123,239 +130,239 @@
static inline void pthreadpool_fence_release() {
__c11_atomic_thread_fence(__ATOMIC_RELEASE);
}
-#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_AMD64))
- typedef volatile uint32_t pthreadpool_atomic_uint32_t;
- typedef volatile size_t pthreadpool_atomic_size_t;
- typedef void *volatile pthreadpool_atomic_void_p;
+#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
+ #include <stdatomic.h>
+
+ typedef _Atomic(uint32_t) pthreadpool_atomic_uint32_t;
+ typedef _Atomic(size_t) pthreadpool_atomic_size_t;
+ typedef _Atomic(void*) pthreadpool_atomic_void_p;
static inline uint32_t pthreadpool_load_relaxed_uint32_t(
pthreadpool_atomic_uint32_t* address)
{
- return *address;
+ return atomic_load_explicit(address, memory_order_relaxed);
}
static inline size_t pthreadpool_load_relaxed_size_t(
pthreadpool_atomic_size_t* address)
{
- return *address;
+ return atomic_load_explicit(address, memory_order_relaxed);
}
static inline void* pthreadpool_load_relaxed_void_p(
pthreadpool_atomic_void_p* address)
{
- return *address;
+ return atomic_load_explicit(address, memory_order_relaxed);
}
static inline uint32_t pthreadpool_load_acquire_uint32_t(
pthreadpool_atomic_uint32_t* address)
{
- /* x86-64 loads always have acquire semantics; use only a compiler barrier */
- const uint32_t value = *address;
- _ReadBarrier();
- return value;
+ return atomic_load_explicit(address, memory_order_acquire);
}
static inline size_t pthreadpool_load_acquire_size_t(
pthreadpool_atomic_size_t* address)
{
- /* x86-64 loads always have acquire semantics; use only a compiler barrier */
- const size_t value = *address;
- _ReadBarrier();
- return value;
+ return atomic_load_explicit(address, memory_order_acquire);
}
static inline void pthreadpool_store_relaxed_uint32_t(
pthreadpool_atomic_uint32_t* address,
uint32_t value)
{
- *address = value;
+ atomic_store_explicit(address, value, memory_order_relaxed);
}
static inline void pthreadpool_store_relaxed_size_t(
pthreadpool_atomic_size_t* address,
size_t value)
{
- *address = value;
+ atomic_store_explicit(address, value, memory_order_relaxed);
}
static inline void pthreadpool_store_relaxed_void_p(
pthreadpool_atomic_void_p* address,
void* value)
{
- *address = value;
+ atomic_store_explicit(address, value, memory_order_relaxed);
}
static inline void pthreadpool_store_release_uint32_t(
pthreadpool_atomic_uint32_t* address,
uint32_t value)
{
- /* x86-64 stores always have release semantics; use only a compiler barrier */
- _WriteBarrier();
- *address = value;
+ atomic_store_explicit(address, value, memory_order_release);
}
static inline void pthreadpool_store_release_size_t(
pthreadpool_atomic_size_t* address,
size_t value)
{
- /* x86-64 stores always have release semantics; use only a compiler barrier */
- _WriteBarrier();
- *address = value;
+ atomic_store_explicit(address, value, memory_order_release);
}
static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
pthreadpool_atomic_size_t* address)
{
- return (size_t) _InterlockedDecrement64((volatile __int64*) address);
+ return atomic_fetch_sub_explicit(address, 1, memory_order_relaxed) - 1;
}
static inline size_t pthreadpool_decrement_fetch_release_size_t(
pthreadpool_atomic_size_t* address)
{
- return (size_t) _InterlockedDecrement64((volatile __int64*) address);
+ return atomic_fetch_sub_explicit(address, 1, memory_order_release) - 1;
}
static inline bool pthreadpool_try_decrement_relaxed_size_t(
pthreadpool_atomic_size_t* value)
{
- size_t actual_value = *value;
- while (actual_value != 0) {
- const size_t new_value = actual_value - 1;
- const size_t expected_value = actual_value;
- actual_value = _InterlockedCompareExchange64(
- (volatile __int64*) value, (__int64) new_value, (__int64) expected_value);
- if (actual_value == expected_value) {
- return true;
+ #if defined(__clang__) && (defined(__arm__) || defined(__aarch64__))
+ size_t actual_value;
+ do {
+ actual_value = __builtin_arm_ldrex((const volatile size_t*) value);
+ if (actual_value == 0) {
+ __builtin_arm_clrex();
+ return false;
+ }
+ } while (__builtin_arm_strex(actual_value - 1, (volatile size_t*) value) != 0);
+ return true;
+ #else
+ size_t actual_value = pthreadpool_load_relaxed_size_t(value);
+ while (actual_value != 0) {
+ if (atomic_compare_exchange_weak_explicit(
+ value, &actual_value, actual_value - 1, memory_order_relaxed, memory_order_relaxed))
+ {
+ return true;
+ }
}
- }
- return false;
+ return false;
+ #endif
}
static inline void pthreadpool_fence_acquire() {
- _mm_lfence();
- _ReadBarrier();
+ atomic_thread_fence(memory_order_acquire);
}
static inline void pthreadpool_fence_release() {
- _WriteBarrier();
- _mm_sfence();
+ atomic_thread_fence(memory_order_release);
}
-#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
- #include <stdatomic.h>
-
- typedef _Atomic(uint32_t) pthreadpool_atomic_uint32_t;
- typedef _Atomic(size_t) pthreadpool_atomic_size_t;
- typedef _Atomic(void*) pthreadpool_atomic_void_p;
+#elif defined(_MSC_VER) && defined(_M_X64)
+ typedef volatile uint32_t pthreadpool_atomic_uint32_t;
+ typedef volatile size_t pthreadpool_atomic_size_t;
+ typedef void *volatile pthreadpool_atomic_void_p;
static inline uint32_t pthreadpool_load_relaxed_uint32_t(
pthreadpool_atomic_uint32_t* address)
{
- return atomic_load_explicit(address, memory_order_relaxed);
+ return *address;
}
static inline size_t pthreadpool_load_relaxed_size_t(
pthreadpool_atomic_size_t* address)
{
- return atomic_load_explicit(address, memory_order_relaxed);
+ return *address;
}
static inline void* pthreadpool_load_relaxed_void_p(
pthreadpool_atomic_void_p* address)
{
- return atomic_load_explicit(address, memory_order_relaxed);
+ return *address;
}
static inline uint32_t pthreadpool_load_acquire_uint32_t(
pthreadpool_atomic_uint32_t* address)
{
- return atomic_load_explicit(address, memory_order_acquire);
+ /* x86-64 loads always have acquire semantics; use only a compiler barrier */
+ const uint32_t value = *address;
+ _ReadBarrier();
+ return value;
}
static inline size_t pthreadpool_load_acquire_size_t(
pthreadpool_atomic_size_t* address)
{
- return atomic_load_explicit(address, memory_order_acquire);
+ /* x86-64 loads always have acquire semantics; use only a compiler barrier */
+ const size_t value = *address;
+ _ReadBarrier();
+ return value;
}
static inline void pthreadpool_store_relaxed_uint32_t(
pthreadpool_atomic_uint32_t* address,
uint32_t value)
{
- atomic_store_explicit(address, value, memory_order_relaxed);
+ *address = value;
}
static inline void pthreadpool_store_relaxed_size_t(
pthreadpool_atomic_size_t* address,
size_t value)
{
- atomic_store_explicit(address, value, memory_order_relaxed);
+ *address = value;
}
static inline void pthreadpool_store_relaxed_void_p(
pthreadpool_atomic_void_p* address,
void* value)
{
- atomic_store_explicit(address, value, memory_order_relaxed);
+ *address = value;
}
static inline void pthreadpool_store_release_uint32_t(
pthreadpool_atomic_uint32_t* address,
uint32_t value)
{
- atomic_store_explicit(address, value, memory_order_release);
+ /* x86-64 stores always have release semantics; use only a compiler barrier */
+ _WriteBarrier();
+ *address = value;
}
static inline void pthreadpool_store_release_size_t(
pthreadpool_atomic_size_t* address,
size_t value)
{
- atomic_store_explicit(address, value, memory_order_release);
+ /* x86-64 stores always have release semantics; use only a compiler barrier */
+ _WriteBarrier();
+ *address = value;
}
static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
pthreadpool_atomic_size_t* address)
{
- return atomic_fetch_sub_explicit(address, 1, memory_order_relaxed) - 1;
+ return (size_t) _InterlockedDecrement64((volatile __int64*) address);
}
static inline size_t pthreadpool_decrement_fetch_release_size_t(
pthreadpool_atomic_size_t* address)
{
- return atomic_fetch_sub_explicit(address, 1, memory_order_release) - 1;
+ return (size_t) _InterlockedDecrement64((volatile __int64*) address);
}
static inline bool pthreadpool_try_decrement_relaxed_size_t(
pthreadpool_atomic_size_t* value)
{
- #if defined(__clang__) && (defined(__arm__) || defined(__aarch64__))
- size_t actual_value;
- do {
- actual_value = __builtin_arm_ldrex((const volatile size_t*) value);
- if (actual_value == 0) {
- __builtin_arm_clrex();
- return false;
- }
- } while (__builtin_arm_strex(actual_value - 1, (volatile size_t*) value) != 0);
- return true;
- #else
- size_t actual_value = pthreadpool_load_relaxed_size_t(value);
- while (actual_value != 0) {
- if (atomic_compare_exchange_weak_explicit(
- value, &actual_value, actual_value - 1, memory_order_relaxed, memory_order_relaxed))
- {
- return true;
- }
+ size_t actual_value = *value;
+ while (actual_value != 0) {
+ const size_t new_value = actual_value - 1;
+ const size_t expected_value = actual_value;
+ actual_value = _InterlockedCompareExchange64(
+ (volatile __int64*) value, (__int64) new_value, (__int64) expected_value);
+ if (actual_value == expected_value) {
+ return true;
}
- return false;
- #endif
+ }
+ return false;
}
static inline void pthreadpool_fence_acquire() {
- atomic_thread_fence(memory_order_acquire);
+ _mm_lfence();
+ _ReadBarrier();
}
static inline void pthreadpool_fence_release() {
- atomic_thread_fence(memory_order_release);
+ _WriteBarrier();
+ _mm_sfence();
}
#elif defined(_MSC_VER) && defined(_M_IX86)
typedef volatile uint32_t pthreadpool_atomic_uint32_t;
@@ -701,3 +708,21 @@
#else
#error "Platform-specific implementation of threadpool-atomics.h required"
#endif
+
+#if defined(__i386__) || defined(__i686__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+ static inline void pthreadpool_yield() {
+ _mm_pause();
+ }
+#elif defined(__ARM_ACLE) || defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64))
+ static inline void pthreadpool_yield() {
+ __yield();
+ }
+#elif defined(__GNUC__) && (defined(__ARM_ARCH) && (__ARM_ARCH >= 7) || (defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6KZ__)) && !defined(__thumb__))
+ static inline void pthreadpool_yield() {
+ __asm__ __volatile__("yield");
+ }
+#else
+ static inline void pthreadpool_yield() {
+ pthreadpool_fence_acquire();
+ }
+#endif
diff --git a/src/threadpool-object.h b/src/threadpool-object.h
index 239d116..9870e8a 100644
--- a/src/threadpool-object.h
+++ b/src/threadpool-object.h
@@ -179,6 +179,36 @@ struct pthreadpool_2d_tile_2d_with_uarch_params {
struct fxdiv_divisor_size_t tile_range_j;
};
+struct pthreadpool_3d_params {
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_3d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k argument passed to the pthreadpool_parallelize_3d function.
+ */
+ struct fxdiv_divisor_size_t range_k;
+};
+
+struct pthreadpool_3d_tile_1d_params {
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_3d_tile_1d function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the tile_k argument passed to the pthreadpool_parallelize_3d_tile_1d function.
+ */
+ size_t tile_k;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_3d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the divide_round_up(range_k, tile_k) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_k;
+};
+
struct pthreadpool_3d_tile_2d_params {
/**
* Copy of the range_j argument passed to the pthreadpool_parallelize_3d_tile_2d function.
@@ -241,6 +271,52 @@ struct pthreadpool_3d_tile_2d_with_uarch_params {
struct fxdiv_divisor_size_t tile_range_k;
};
+struct pthreadpool_4d_params {
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_4d function.
+ */
+ size_t range_k;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_4d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k * range_l value.
+ */
+ struct fxdiv_divisor_size_t range_kl;
+ /**
+ * FXdiv divisor for the range_l argument passed to the pthreadpool_parallelize_4d function.
+ */
+ struct fxdiv_divisor_size_t range_l;
+};
+
+struct pthreadpool_4d_tile_1d_params {
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_4d_tile_1d function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the range_l argument passed to the pthreadpool_parallelize_4d_tile_1d function.
+ */
+ size_t range_l;
+ /**
+ * Copy of the tile_l argument passed to the pthreadpool_parallelize_4d_tile_1d function.
+ */
+ size_t tile_l;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_4d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k * divide_round_up(range_l, tile_l) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_kl;
+ /**
+ * FXdiv divisor for the divide_round_up(range_l, tile_l) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_l;
+};
+
struct pthreadpool_4d_tile_2d_params {
/**
* Copy of the range_k argument passed to the pthreadpool_parallelize_4d_tile_2d function.
@@ -311,6 +387,60 @@ struct pthreadpool_4d_tile_2d_with_uarch_params {
struct fxdiv_divisor_size_t tile_range_l;
};
+struct pthreadpool_5d_params {
+ /**
+ * Copy of the range_l argument passed to the pthreadpool_parallelize_5d function.
+ */
+ size_t range_l;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_5d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k argument passed to the pthreadpool_parallelize_5d function.
+ */
+ struct fxdiv_divisor_size_t range_k;
+ /**
+ * FXdiv divisor for the range_l * range_m value.
+ */
+ struct fxdiv_divisor_size_t range_lm;
+ /**
+ * FXdiv divisor for the range_m argument passed to the pthreadpool_parallelize_5d function.
+ */
+ struct fxdiv_divisor_size_t range_m;
+};
+
+struct pthreadpool_5d_tile_1d_params {
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_5d_tile_1d function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the range_m argument passed to the pthreadpool_parallelize_5d_tile_1d function.
+ */
+ size_t range_m;
+ /**
+ * Copy of the tile_m argument passed to the pthreadpool_parallelize_5d_tile_1d function.
+ */
+ size_t tile_m;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_5d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k * range_l value.
+ */
+ struct fxdiv_divisor_size_t range_kl;
+ /**
+ * FXdiv divisor for the range_l argument passed to the pthreadpool_parallelize_5d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_l;
+ /**
+ * FXdiv divisor for the divide_round_up(range_m, tile_m) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_m;
+};
+
struct pthreadpool_5d_tile_2d_params {
/**
* Copy of the range_l argument passed to the pthreadpool_parallelize_5d_tile_2d function.
@@ -434,10 +564,16 @@ struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool {
struct pthreadpool_2d_tile_1d_params parallelize_2d_tile_1d;
struct pthreadpool_2d_tile_2d_params parallelize_2d_tile_2d;
struct pthreadpool_2d_tile_2d_with_uarch_params parallelize_2d_tile_2d_with_uarch;
+ struct pthreadpool_3d_params parallelize_3d;
+ struct pthreadpool_3d_tile_1d_params parallelize_3d_tile_1d;
struct pthreadpool_3d_tile_2d_params parallelize_3d_tile_2d;
struct pthreadpool_3d_tile_2d_with_uarch_params parallelize_3d_tile_2d_with_uarch;
+ struct pthreadpool_4d_params parallelize_4d;
+ struct pthreadpool_4d_tile_1d_params parallelize_4d_tile_1d;
struct pthreadpool_4d_tile_2d_params parallelize_4d_tile_2d;
struct pthreadpool_4d_tile_2d_with_uarch_params parallelize_4d_tile_2d_with_uarch;
+ struct pthreadpool_5d_params parallelize_5d;
+ struct pthreadpool_5d_tile_1d_params parallelize_5d_tile_1d;
struct pthreadpool_5d_tile_2d_params parallelize_5d_tile_2d;
struct pthreadpool_6d_tile_2d_params parallelize_6d_tile_2d;
} params;
@@ -526,3 +662,79 @@ PTHREADPOOL_INTERNAL void pthreadpool_parallelize(
void* context,
size_t linear_range,
uint32_t flags);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
diff --git a/src/threadpool-utils.h b/src/threadpool-utils.h
index 24fee43..91e2445 100644
--- a/src/threadpool-utils.h
+++ b/src/threadpool-utils.h
@@ -4,25 +4,22 @@
#include <stddef.h>
/* SSE-specific headers */
-#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
+#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
#include <xmmintrin.h>
#endif
/* MSVC-specific headers */
-#if defined(_MSC_VER) && _MSC_VER >= 1920
+#if defined(_MSC_VER)
#include <intrin.h>
- #if defined(_M_IX86) || defined(_M_X64) || defined(_M_AMD64)
- #include <immintrin.h>
- #endif
#endif
struct fpu_state {
-#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
+#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
uint32_t mxcsr;
-#elif defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
+#elif defined(__GNUC__) && defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0) || defined(_MSC_VER) && defined(_M_ARM)
uint32_t fpscr;
-#elif defined(__aarch64__)
+#elif defined(__GNUC__) && defined(__aarch64__) || defined(_MSC_VER) && defined(_M_ARM64)
uint64_t fpcr;
#else
char unused;
@@ -31,37 +28,63 @@ struct fpu_state {
static inline struct fpu_state get_fpu_state() {
struct fpu_state state = { 0 };
-#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
+#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
state.mxcsr = (uint32_t) _mm_getcsr();
-#elif defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
+#elif defined(_MSC_VER) && defined(_M_ARM)
+ state.fpscr = (uint32_t) _MoveFromCoprocessor(10, 7, 1, 0, 0);
+#elif defined(_MSC_VER) && defined(_M_ARM64)
+ state.fpcr = (uint64_t) _ReadStatusReg(0x5A20);
+#elif defined(__GNUC__) && defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
__asm__ __volatile__("VMRS %[fpscr], fpscr" : [fpscr] "=r" (state.fpscr));
-#elif defined(__aarch64__)
+#elif defined(__GNUC__) && defined(__aarch64__)
__asm__ __volatile__("MRS %[fpcr], fpcr" : [fpcr] "=r" (state.fpcr));
#endif
return state;
}
static inline void set_fpu_state(const struct fpu_state state) {
-#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
+#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
_mm_setcsr((unsigned int) state.mxcsr);
-#elif defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
+#elif defined(_MSC_VER) && defined(_M_ARM)
+ _MoveToCoprocessor((int) state.fpscr, 10, 7, 1, 0, 0);
+#elif defined(_MSC_VER) && defined(_M_ARM64)
+ _WriteStatusReg(0x5A20, (__int64) state.fpcr);
+#elif defined(__GNUC__) && defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
__asm__ __volatile__("VMSR fpscr, %[fpscr]" : : [fpscr] "r" (state.fpscr));
-#elif defined(__aarch64__)
+#elif defined(__GNUC__) && defined(__aarch64__)
__asm__ __volatile__("MSR fpcr, %[fpcr]" : : [fpcr] "r" (state.fpcr));
#endif
}
static inline void disable_fpu_denormals() {
-#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
+#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
_mm_setcsr(_mm_getcsr() | 0x8040);
-#elif defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
+#elif defined(_MSC_VER) && defined(_M_ARM)
+ int fpscr = _MoveFromCoprocessor(10, 7, 1, 0, 0);
+ fpscr |= 0x1000000;
+ _MoveToCoprocessor(fpscr, 10, 7, 1, 0, 0);
+#elif defined(_MSC_VER) && defined(_M_ARM64)
+ __int64 fpcr = _ReadStatusReg(0x5A20);
+ fpcr |= 0x1080000;
+ _WriteStatusReg(0x5A20, fpcr);
+#elif defined(__GNUC__) && defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
uint32_t fpscr;
- __asm__ __volatile__(
- "VMRS %[fpscr], fpscr\n"
- "ORR %[fpscr], #0x1000000\n"
- "VMSR fpscr, %[fpscr]\n"
- : [fpscr] "=r" (fpscr));
-#elif defined(__aarch64__)
+ #if defined(__thumb__) && !defined(__thumb2__)
+ __asm__ __volatile__(
+ "VMRS %[fpscr], fpscr\n"
+ "ORRS %[fpscr], %[bitmask]\n"
+ "VMSR fpscr, %[fpscr]\n"
+ : [fpscr] "=l" (fpscr)
+ : [bitmask] "l" (0x1000000)
+ : "cc");
+ #else
+ __asm__ __volatile__(
+ "VMRS %[fpscr], fpscr\n"
+ "ORR %[fpscr], #0x1000000\n"
+ "VMSR fpscr, %[fpscr]\n"
+ : [fpscr] "=r" (fpscr));
+ #endif
+#elif defined(__GNUC__) && defined(__aarch64__)
uint64_t fpcr;
__asm__ __volatile__(
"MRS %[fpcr], fpcr\n"
diff --git a/src/windows.c b/src/windows.c
index 19e534f..c9b88f7 100644
--- a/src/windows.c
+++ b/src/windows.c
@@ -35,8 +35,7 @@ static void wait_worker_threads(struct pthreadpool* threadpool, uint32_t event_i
/* Spin-wait */
for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
- /* This fence serves as a sleep instruction */
- pthreadpool_fence_acquire();
+ pthreadpool_yield();
active_threads = pthreadpool_load_acquire_size_t(&threadpool->active_threads);
if (active_threads == 0) {
@@ -63,8 +62,7 @@ static uint32_t wait_for_new_command(
if ((last_flags & PTHREADPOOL_FLAG_YIELD_WORKERS) == 0) {
/* Spin-wait loop */
for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
- /* This fence serves as a sleep instruction */
- pthreadpool_fence_acquire();
+ pthreadpool_yield();
command = pthreadpool_load_acquire_uint32_t(&threadpool->command);
if (command != last_command) {
diff --git a/test/pthreadpool.cc b/test/pthreadpool.cc
index b8a6803..f822506 100644
--- a/test/pthreadpool.cc
+++ b/test/pthreadpool.cc
@@ -23,17 +23,44 @@ const size_t kParallelize2DTile2DRangeI = 53;
const size_t kParallelize2DTile2DRangeJ = 59;
const size_t kParallelize2DTile2DTileI = 5;
const size_t kParallelize2DTile2DTileJ = 7;
+const size_t kParallelize3DRangeI = 13;
+const size_t kParallelize3DRangeJ = 17;
+const size_t kParallelize3DRangeK = 19;
+const size_t kParallelize3DTile1DRangeI = 17;
+const size_t kParallelize3DTile1DRangeJ = 19;
+const size_t kParallelize3DTile1DRangeK = 23;
+const size_t kParallelize3DTile1DTileK = 5;
const size_t kParallelize3DTile2DRangeI = 19;
const size_t kParallelize3DTile2DRangeJ = 23;
const size_t kParallelize3DTile2DRangeK = 29;
const size_t kParallelize3DTile2DTileJ = 2;
const size_t kParallelize3DTile2DTileK = 3;
+const size_t kParallelize4DRangeI = 11;
+const size_t kParallelize4DRangeJ = 13;
+const size_t kParallelize4DRangeK = 17;
+const size_t kParallelize4DRangeL = 19;
+const size_t kParallelize4DTile1DRangeI = 13;
+const size_t kParallelize4DTile1DRangeJ = 17;
+const size_t kParallelize4DTile1DRangeK = 19;
+const size_t kParallelize4DTile1DRangeL = 23;
+const size_t kParallelize4DTile1DTileL = 5;
const size_t kParallelize4DTile2DRangeI = 17;
const size_t kParallelize4DTile2DRangeJ = 19;
const size_t kParallelize4DTile2DRangeK = 23;
const size_t kParallelize4DTile2DRangeL = 29;
const size_t kParallelize4DTile2DTileK = 2;
const size_t kParallelize4DTile2DTileL = 3;
+const size_t kParallelize5DRangeI = 7;
+const size_t kParallelize5DRangeJ = 11;
+const size_t kParallelize5DRangeK = 13;
+const size_t kParallelize5DRangeL = 17;
+const size_t kParallelize5DRangeM = 19;
+const size_t kParallelize5DTile1DRangeI = 11;
+const size_t kParallelize5DTile1DRangeJ = 13;
+const size_t kParallelize5DTile1DRangeK = 17;
+const size_t kParallelize5DTile1DRangeL = 19;
+const size_t kParallelize5DTile1DRangeM = 23;
+const size_t kParallelize5DTile1DTileM = 5;
const size_t kParallelize5DTile2DRangeI = 13;
const size_t kParallelize5DTile2DRangeJ = 17;
const size_t kParallelize5DTile2DRangeK = 19;
@@ -2286,6 +2313,646 @@ TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
}
+static void ComputeNothing3D(void*, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize3D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d(threadpool.get(),
+ ComputeNothing3D,
+ nullptr,
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ ComputeNothing3D,
+ nullptr,
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+}
+
+static void CheckBounds3D(void*, size_t i, size_t j, size_t k) {
+ EXPECT_LT(i, kParallelize3DRangeI);
+ EXPECT_LT(j, kParallelize3DRangeJ);
+ EXPECT_LT(k, kParallelize3DRangeK);
+}
+
+TEST(Parallelize3D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ CheckBounds3D,
+ nullptr,
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ CheckBounds3D,
+ nullptr,
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+}
+
+static void SetTrue3D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+}
+
+TEST(Parallelize3D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(SetTrue3D),
+ static_cast<void*>(indicators.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(SetTrue3D),
+ static_cast<void*>(indicators.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+static void Increment3D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize3D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(Increment3D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(Increment3D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(Increment3D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(Increment3D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+static void IncrementSame3D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize3D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(IncrementSame3D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+}
+
+static void WorkImbalance3D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize3D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(WorkImbalance3D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+}
+
+static void ComputeNothing3DTile1D(void*, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_1d(threadpool.get(),
+ ComputeNothing3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ ComputeNothing3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+static void CheckBounds3DTile1D(void*, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ EXPECT_LT(i, kParallelize3DTile1DRangeI);
+ EXPECT_LT(j, kParallelize3DTile1DRangeJ);
+ EXPECT_LT(start_k, kParallelize3DTile1DRangeK);
+ EXPECT_LE(start_k + tile_k, kParallelize3DTile1DRangeK);
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ CheckBounds3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ CheckBounds3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+static void CheckTiling3DTile1D(void*, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ EXPECT_GT(tile_k, 0);
+ EXPECT_LE(tile_k, kParallelize3DTile1DTileK);
+ EXPECT_EQ(start_k % kParallelize3DTile1DTileK, 0);
+ EXPECT_EQ(tile_k, std::min<size_t>(kParallelize3DTile1DTileK, kParallelize3DTile1DRangeK - start_k));
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ CheckTiling3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ CheckTiling3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+static void SetTrue3DTile1D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(SetTrue3DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(SetTrue3DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+static void Increment3DTile1D(std::atomic_int* processed_counters, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(Increment3DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(Increment3DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(Increment3DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(Increment3DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+static void IncrementSame3DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(IncrementSame3DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+}
+
+static void WorkImbalance3DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ num_processed_items->fetch_add(tile_k, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && start_k == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(WorkImbalance3DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+}
+
static void ComputeNothing3DTile2D(void*, size_t, size_t, size_t, size_t, size_t) {
}
@@ -3058,6 +3725,672 @@ TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
}
+static void ComputeNothing4D(void*, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize4D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d(threadpool.get(),
+ ComputeNothing4D,
+ nullptr,
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ ComputeNothing4D,
+ nullptr,
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+}
+
+static void CheckBounds4D(void*, size_t i, size_t j, size_t k, size_t l) {
+ EXPECT_LT(i, kParallelize4DRangeI);
+ EXPECT_LT(j, kParallelize4DRangeJ);
+ EXPECT_LT(k, kParallelize4DRangeK);
+ EXPECT_LT(l, kParallelize4DRangeL);
+}
+
+TEST(Parallelize4D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ CheckBounds4D,
+ nullptr,
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ CheckBounds4D,
+ nullptr,
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+}
+
+static void SetTrue4D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t l) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+}
+
+TEST(Parallelize4D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(SetTrue4D),
+ static_cast<void*>(indicators.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(SetTrue4D),
+ static_cast<void*>(indicators.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+static void Increment4D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t l) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize4D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(Increment4D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(Increment4D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(Increment4D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(Increment4D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame4D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize4D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(IncrementSame4D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+}
+
+static void WorkImbalance4D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0 && l == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize4D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(WorkImbalance4D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+}
+
+static void ComputeNothing4DTile1D(void*, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_1d(threadpool.get(),
+ ComputeNothing4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ ComputeNothing4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+static void CheckBounds4DTile1D(void*, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ EXPECT_LT(i, kParallelize4DTile1DRangeI);
+ EXPECT_LT(j, kParallelize4DTile1DRangeJ);
+ EXPECT_LT(k, kParallelize4DTile1DRangeK);
+ EXPECT_LT(start_l, kParallelize4DTile1DRangeL);
+ EXPECT_LE(start_l + tile_l, kParallelize4DTile1DRangeL);
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ CheckBounds4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ CheckBounds4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+static void CheckTiling4DTile1D(void*, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ EXPECT_GT(tile_l, 0);
+ EXPECT_LE(tile_l, kParallelize4DTile1DTileL);
+ EXPECT_EQ(start_l % kParallelize4DTile1DTileL, 0);
+ EXPECT_EQ(tile_l, std::min<size_t>(kParallelize4DTile1DTileL, kParallelize4DTile1DRangeL - start_l));
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ CheckTiling4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ CheckTiling4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+static void SetTrue4DTile1D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(SetTrue4DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(SetTrue4DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+static void Increment4DTile1D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(Increment4DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(Increment4DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(Increment4DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(Increment4DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame4DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(IncrementSame4DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+}
+
+static void WorkImbalance4DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ num_processed_items->fetch_add(tile_l, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0 && start_l == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(WorkImbalance4DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+}
+
static void ComputeNothing4DTile2D(void*, size_t, size_t, size_t, size_t, size_t, size_t) {
}
@@ -3856,6 +5189,698 @@ TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
}
+static void ComputeNothing5D(void*, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize5D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d(threadpool.get(),
+ ComputeNothing5D,
+ nullptr,
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+}
+
+TEST(Parallelize5D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ ComputeNothing5D,
+ nullptr,
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+}
+
+static void CheckBounds5D(void*, size_t i, size_t j, size_t k, size_t l, size_t m) {
+ EXPECT_LT(i, kParallelize5DRangeI);
+ EXPECT_LT(j, kParallelize5DRangeJ);
+ EXPECT_LT(k, kParallelize5DRangeK);
+ EXPECT_LT(l, kParallelize5DRangeL);
+ EXPECT_LT(m, kParallelize5DRangeM);
+}
+
+TEST(Parallelize5D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ CheckBounds5D,
+ nullptr,
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+}
+
+TEST(Parallelize5D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ CheckBounds5D,
+ nullptr,
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+}
+
+static void SetTrue5D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t l, size_t m) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+}
+
+TEST(Parallelize5D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(SetTrue5D),
+ static_cast<void*>(indicators.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") not processed";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(SetTrue5D),
+ static_cast<void*>(indicators.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") not processed";
+ }
+ }
+ }
+ }
+ }
+}
+
+static void Increment5D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t l, size_t m) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize5D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(Increment5D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(Increment5D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations5D; iteration++) {
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(Increment5D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations5D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations5D << ")";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations5D; iteration++) {
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(Increment5D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations5D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations5D << ")";
+ }
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame5D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize5D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(IncrementSame5D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+}
+
+static void WorkImbalance5D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0 && l == 0 && m == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize5D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(WorkImbalance5D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+}
+
+static void ComputeNothing5DTile1D(void*, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d_tile_1d(threadpool.get(),
+ ComputeNothing5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ ComputeNothing5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+static void CheckBounds5DTile1D(void*, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ EXPECT_LT(i, kParallelize5DTile1DRangeI);
+ EXPECT_LT(j, kParallelize5DTile1DRangeJ);
+ EXPECT_LT(k, kParallelize5DTile1DRangeK);
+ EXPECT_LT(l, kParallelize5DTile1DRangeL);
+ EXPECT_LT(start_m, kParallelize5DTile1DRangeM);
+ EXPECT_LE(start_m + tile_m, kParallelize5DTile1DRangeM);
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ CheckBounds5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ CheckBounds5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+static void CheckTiling5DTile1D(void*, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ EXPECT_GT(tile_m, 0);
+ EXPECT_LE(tile_m, kParallelize5DTile1DTileM);
+ EXPECT_EQ(start_m % kParallelize5DTile1DTileM, 0);
+ EXPECT_EQ(tile_m, std::min<size_t>(kParallelize5DTile1DTileM, kParallelize5DTile1DRangeM - start_m));
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ CheckTiling5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ CheckTiling5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+static void SetTrue5DTile1D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ for (size_t m = start_m; m < start_m + tile_m; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(SetTrue5DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") not processed";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(SetTrue5DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") not processed";
+ }
+ }
+ }
+ }
+ }
+}
+
+static void Increment5DTile1D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ for (size_t m = start_m; m < start_m + tile_m; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(Increment5DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(Increment5DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations5D; iteration++) {
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(Increment5DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations5D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations5D << ")";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations5D; iteration++) {
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(Increment5DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations5D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations5D << ")";
+ }
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame5DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ for (size_t m = start_m; m < start_m + tile_m; m++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(IncrementSame5DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+}
+
+static void WorkImbalance5DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ num_processed_items->fetch_add(tile_m, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0 && l == 0 && start_m == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(WorkImbalance5DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+}
+
static void ComputeNothing5DTile2D(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t) {
}