aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2017-12-21 16:17:45 -0800
committerQi Wang <interwq@gmail.com>2018-01-05 14:27:58 -0800
commitba5992fe9ac1708c812ec65bff3270bba17f1e1b (patch)
tree4977c59116dad66c697efae1ee3ea37ed240e544 /src
parent41790f4fa475434ea84b8509b9a68e63d9a86f95 (diff)
downloadplatform_external_jemalloc_new-ba5992fe9ac1708c812ec65bff3270bba17f1e1b.tar.gz
platform_external_jemalloc_new-ba5992fe9ac1708c812ec65bff3270bba17f1e1b.tar.bz2
platform_external_jemalloc_new-ba5992fe9ac1708c812ec65bff3270bba17f1e1b.zip
Improve the fit for aligned allocation.
We compute the max size required to satisfy an alignment. However this can be quite pessimistic, especially with frequent reuse (and combined with state-based fragmentation). This commit adds one more fit step specific to aligned allocations, searching in all potential fit size classes.
Diffstat (limited to 'src')
-rw-r--r--src/extent.c71
1 files changed, 61 insertions, 10 deletions
diff --git a/src/extent.c b/src/extent.c
index bca703fc..517780ee 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -363,6 +363,43 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
}
+/*
+ * Find an extent with size [min_size, max_size) to satisfy the alignment
+ * requirement. For each size, try only the first extent in the heap.
+ */
+static extent_t *
+extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
+ size_t alignment) {
+ pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
+ pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
+
+ for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
+ &extents_bitmap_info, (size_t)pind); i < pind_max; i =
+ (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
+ (size_t)i+1)) {
+ assert(i < NPSIZES);
+ assert(!extent_heap_empty(&extents->heaps[i]));
+ extent_t *extent = extent_heap_first(&extents->heaps[i]);
+ uintptr_t base = (uintptr_t)extent_base_get(extent);
+ size_t candidate_size = extent_size_get(extent);
+ assert(candidate_size >= min_size);
+
+ uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
+ PAGE_CEILING(alignment));
+ if (base > next_align || base + candidate_size <= next_align) {
+ /* Overflow or not crossing the next alignment. */
+ continue;
+ }
+
+ size_t leadsize = next_align - base;
+ if (candidate_size - leadsize >= min_size) {
+ return extent;
+ }
+ }
+
+ return NULL;
+}
+
/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
static extent_t *
extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
@@ -424,12 +461,30 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
*/
static extent_t *
extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- size_t size) {
+ size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
- return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena,
- extents, size) : extents_first_fit_locked(tsdn, arena, extents,
- size);
+ size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
+ /* Beware size_t wrap-around. */
+ if (max_size < esize) {
+ return NULL;
+ }
+
+ extent_t *extent = extents->delay_coalesce ?
+ extents_best_fit_locked(tsdn, arena, extents, max_size) :
+ extents_first_fit_locked(tsdn, arena, extents, max_size);
+
+ if (alignment > PAGE && extent == NULL) {
+ /*
+ * max_size guarantees the alignment requirement but is rather
+ * pessimistic. Next we try to satisfy the aligned allocation
+ * with sizes in [esize, max_size).
+ */
+ extent = extents_fit_alignment(extents, esize, max_size,
+ alignment);
+ }
+
+ return extent;
}
static bool
@@ -821,11 +876,6 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
}
size_t esize = size + pad;
- size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE;
- /* Beware size_t wrap-around. */
- if (alloc_size < esize) {
- return NULL;
- }
malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_t *extent;
@@ -847,7 +897,8 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
extent_unlock(tsdn, unlock_extent);
}
} else {
- extent = extents_fit_locked(tsdn, arena, extents, alloc_size);
+ extent = extents_fit_locked(tsdn, arena, extents, esize,
+ alignment);
}
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &extents->mtx);