aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid Goldblatt <davidgoldblatt@fb.com>2017-05-30 10:45:37 -0700
committerDavid Goldblatt <davidtgoldblatt@gmail.com>2017-05-31 13:08:45 -0700
commit8261e581be517f4fe193ead2c9b662717d9ca5e0 (patch)
tree4dbd5e4a2f434e23d29494c6f7821eedd44b3b78 /include
parent041e041e1f23a03d1019330c8401a01285feb44f (diff)
downloadplatform_external_jemalloc_new-8261e581be517f4fe193ead2c9b662717d9ca5e0.tar.gz
platform_external_jemalloc_new-8261e581be517f4fe193ead2c9b662717d9ca5e0.tar.bz2
platform_external_jemalloc_new-8261e581be517f4fe193ead2c9b662717d9ca5e0.zip
Header refactoring: Pull size helpers out of jemalloc module.
Diffstat (limited to 'include')
-rw-r--r--include/jemalloc/internal/arena_externs.h8
-rw-r--r--include/jemalloc/internal/arena_inlines_b.h13
-rw-r--r--include/jemalloc/internal/arena_types.h2
-rw-r--r--include/jemalloc/internal/extent_inlines.h3
-rw-r--r--include/jemalloc/internal/extent_structs.h2
-rw-r--r--include/jemalloc/internal/jemalloc_internal_externs.h17
-rw-r--r--include/jemalloc/internal/jemalloc_internal_inlines_a.h267
-rw-r--r--include/jemalloc/internal/jemalloc_internal_inlines_c.h7
-rw-r--r--include/jemalloc/internal/prof_inlines_b.h4
-rwxr-xr-xinclude/jemalloc/internal/size_classes.sh1
-rw-r--r--include/jemalloc/internal/sz.h317
-rw-r--r--include/jemalloc/internal/tcache_inlines.h9
12 files changed, 340 insertions, 310 deletions
diff --git a/include/jemalloc/internal/arena_externs.h b/include/jemalloc/internal/arena_externs.h
index 08a6d174..cfb7c6fb 100644
--- a/include/jemalloc/internal/arena_externs.h
+++ b/include/jemalloc/internal/arena_externs.h
@@ -6,14 +6,6 @@
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
-static const size_t large_pad =
-#ifdef JEMALLOC_CACHE_OBLIVIOUS
- PAGE
-#else
- 0
-#endif
- ;
-
extern ssize_t opt_dirty_decay_ms;
extern ssize_t opt_muzzy_decay_ms;
diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h
index 16635c1a..003abe11 100644
--- a/include/jemalloc/internal/arena_inlines_b.h
+++ b/include/jemalloc/internal/arena_inlines_b.h
@@ -5,6 +5,7 @@
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
static inline szind_t
@@ -127,7 +128,7 @@ arena_salloc(tsdn_t *tsdn, const void *ptr) {
(uintptr_t)ptr, true);
assert(szind != NSIZES);
- return index2size(szind);
+ return sz_index2size(szind);
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -160,7 +161,7 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
assert(szind != NSIZES);
- return index2size(szind);
+ return sz_index2size(szind);
}
static inline void
@@ -257,7 +258,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
- szind = size2index(size);
+ szind = sz_size2index(size);
slab = (szind < NBINS);
}
@@ -269,7 +270,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
- assert(szind == size2index(size));
+ assert(szind == sz_size2index(size));
assert((config_prof && opt_prof) || slab == (szind < NBINS));
if (config_debug) {
@@ -313,7 +314,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &local_ctx.szind,
&local_ctx.slab);
- assert(local_ctx.szind == size2index(size));
+ assert(local_ctx.szind == sz_size2index(size));
alloc_ctx = &local_ctx;
}
slab = alloc_ctx->slab;
@@ -323,7 +324,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
- szind = size2index(size);
+ szind = sz_size2index(size);
slab = (szind < NBINS);
}
diff --git a/include/jemalloc/internal/arena_types.h b/include/jemalloc/internal/arena_types.h
index 1374eeca..01b9096a 100644
--- a/include/jemalloc/internal/arena_types.h
+++ b/include/jemalloc/internal/arena_types.h
@@ -1,8 +1,6 @@
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
-#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
-
/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
diff --git a/include/jemalloc/internal/extent_inlines.h b/include/jemalloc/internal/extent_inlines.h
index 94c41923..bb2bd699 100644
--- a/include/jemalloc/internal/extent_inlines.h
+++ b/include/jemalloc/internal/extent_inlines.h
@@ -6,6 +6,7 @@
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/sz.h"
static inline void
extent_lock(tsdn_t *tsdn, extent_t *extent) {
@@ -65,7 +66,7 @@ extent_szind_get(const extent_t *extent) {
static inline size_t
extent_usize_get(const extent_t *extent) {
- return index2size(extent_szind_get(extent));
+ return sz_index2size(extent_szind_get(extent));
}
static inline size_t
diff --git a/include/jemalloc/internal/extent_structs.h b/include/jemalloc/internal/extent_structs.h
index 457891df..d2979503 100644
--- a/include/jemalloc/internal/extent_structs.h
+++ b/include/jemalloc/internal/extent_structs.h
@@ -53,7 +53,7 @@ struct extent_s {
* szind: The szind flag indicates usable size class index for
* allocations residing in this extent, regardless of whether the
* extent is a slab. Extent size and usable size often differ
- * even for non-slabs, either due to large_pad or promotion of
+ * even for non-slabs, either due to sz_large_pad or promotion of
* sampled small regions.
*
* nfree: Number of free regions in slab.
diff --git a/include/jemalloc/internal/jemalloc_internal_externs.h b/include/jemalloc/internal/jemalloc_internal_externs.h
index 11e16ecc..e10fb275 100644
--- a/include/jemalloc/internal/jemalloc_internal_externs.h
+++ b/include/jemalloc/internal/jemalloc_internal_externs.h
@@ -31,23 +31,6 @@ extern unsigned narenas_auto;
*/
extern atomic_p_t arenas[];
-/*
- * pind2sz_tab encodes the same information as could be computed by
- * pind2sz_compute().
- */
-extern size_t const pind2sz_tab[NPSIZES+1];
-/*
- * index2size_tab encodes the same information as could be computed (at
- * unacceptable cost in some code paths) by index2size_compute().
- */
-extern size_t const index2size_tab[NSIZES];
-/*
- * size2index_tab is a compact lookup table that rounds request sizes up to
- * size classes. In order to reduce cache footprint, the table is compressed,
- * and all accesses are via size2index().
- */
-extern uint8_t const size2index_tab[];
-
void *a0malloc(size_t size);
void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
diff --git a/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/include/jemalloc/internal/jemalloc_internal_inlines_a.h
index c8e26298..d0bf2eee 100644
--- a/include/jemalloc/internal/jemalloc_internal_inlines_a.h
+++ b/include/jemalloc/internal/jemalloc_internal_inlines_a.h
@@ -7,273 +7,6 @@
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/ticker.h"
-JEMALLOC_ALWAYS_INLINE pszind_t
-psz2ind(size_t psz) {
- if (unlikely(psz > LARGE_MAXCLASS)) {
- return NPSIZES;
- }
- {
- pszind_t x = lg_floor((psz<<1)-1);
- pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
- (LG_SIZE_CLASS_GROUP + LG_PAGE);
- pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
-
- pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
- LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
-
- size_t delta_inverse_mask = ZD(-1) << lg_delta;
- pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
- ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
- pszind_t ind = grp + mod;
- return ind;
- }
-}
-
-static inline size_t
-pind2sz_compute(pszind_t pind) {
- if (unlikely(pind == NPSIZES)) {
- return LARGE_MAXCLASS + PAGE;
- }
- {
- size_t grp = pind >> LG_SIZE_CLASS_GROUP;
- size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
- size_t grp_size_mask = ~((!!grp)-1);
- size_t grp_size = ((ZU(1) << (LG_PAGE +
- (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
- size_t shift = (grp == 0) ? 1 : grp;
- size_t lg_delta = shift + (LG_PAGE-1);
- size_t mod_size = (mod+1) << lg_delta;
-
- size_t sz = grp_size + mod_size;
- return sz;
- }
-}
-
-static inline size_t
-pind2sz_lookup(pszind_t pind) {
- size_t ret = (size_t)pind2sz_tab[pind];
- assert(ret == pind2sz_compute(pind));
- return ret;
-}
-
-static inline size_t
-pind2sz(pszind_t pind) {
- assert(pind < NPSIZES+1);
- return pind2sz_lookup(pind);
-}
-
-static inline size_t
-psz2u(size_t psz) {
- if (unlikely(psz > LARGE_MAXCLASS)) {
- return LARGE_MAXCLASS + PAGE;
- }
- {
- size_t x = lg_floor((psz<<1)-1);
- size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
- LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
- size_t delta = ZU(1) << lg_delta;
- size_t delta_mask = delta - 1;
- size_t usize = (psz + delta_mask) & ~delta_mask;
- return usize;
- }
-}
-
-static inline szind_t
-size2index_compute(size_t size) {
- if (unlikely(size > LARGE_MAXCLASS)) {
- return NSIZES;
- }
-#if (NTBINS != 0)
- if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
- szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
- szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
- return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
- }
-#endif
- {
- szind_t x = lg_floor((size<<1)-1);
- szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
- x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
- szind_t grp = shift << LG_SIZE_CLASS_GROUP;
-
- szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
- ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
-
- size_t delta_inverse_mask = ZD(-1) << lg_delta;
- szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
- ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
- szind_t index = NTBINS + grp + mod;
- return index;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-size2index_lookup(size_t size) {
- assert(size <= LOOKUP_MAXCLASS);
- {
- szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
- assert(ret == size2index_compute(size));
- return ret;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-size2index(size_t size) {
- assert(size > 0);
- if (likely(size <= LOOKUP_MAXCLASS)) {
- return size2index_lookup(size);
- }
- return size2index_compute(size);
-}
-
-static inline size_t
-index2size_compute(szind_t index) {
-#if (NTBINS > 0)
- if (index < NTBINS) {
- return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
- }
-#endif
- {
- size_t reduced_index = index - NTBINS;
- size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
- size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
- 1);
-
- size_t grp_size_mask = ~((!!grp)-1);
- size_t grp_size = ((ZU(1) << (LG_QUANTUM +
- (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
- size_t shift = (grp == 0) ? 1 : grp;
- size_t lg_delta = shift + (LG_QUANTUM-1);
- size_t mod_size = (mod+1) << lg_delta;
-
- size_t usize = grp_size + mod_size;
- return usize;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-index2size_lookup(szind_t index) {
- size_t ret = (size_t)index2size_tab[index];
- assert(ret == index2size_compute(index));
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-index2size(szind_t index) {
- assert(index < NSIZES);
- return index2size_lookup(index);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-s2u_compute(size_t size) {
- if (unlikely(size > LARGE_MAXCLASS)) {
- return 0;
- }
-#if (NTBINS > 0)
- if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
- size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
- size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
- return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
- (ZU(1) << lg_ceil));
- }
-#endif
- {
- size_t x = lg_floor((size<<1)-1);
- size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
- ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
- size_t delta = ZU(1) << lg_delta;
- size_t delta_mask = delta - 1;
- size_t usize = (size + delta_mask) & ~delta_mask;
- return usize;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-s2u_lookup(size_t size) {
- size_t ret = index2size_lookup(size2index_lookup(size));
-
- assert(ret == s2u_compute(size));
- return ret;
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-s2u(size_t size) {
- assert(size > 0);
- if (likely(size <= LOOKUP_MAXCLASS)) {
- return s2u_lookup(size);
- }
- return s2u_compute(size);
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size and alignment.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-sa2u(size_t size, size_t alignment) {
- size_t usize;
-
- assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
-
- /* Try for a small size class. */
- if (size <= SMALL_MAXCLASS && alignment < PAGE) {
- /*
- * Round size up to the nearest multiple of alignment.
- *
- * This done, we can take advantage of the fact that for each
- * small size class, every object is aligned at the smallest
- * power of two that is non-zero in the base two representation
- * of the size. For example:
- *
- * Size | Base 2 | Minimum alignment
- * -----+----------+------------------
- * 96 | 1100000 | 32
- * 144 | 10100000 | 32
- * 192 | 11000000 | 64
- */
- usize = s2u(ALIGNMENT_CEILING(size, alignment));
- if (usize < LARGE_MINCLASS) {
- return usize;
- }
- }
-
- /* Large size class. Beware of overflow. */
-
- if (unlikely(alignment > LARGE_MAXCLASS)) {
- return 0;
- }
-
- /* Make sure result is a large size class. */
- if (size <= LARGE_MINCLASS) {
- usize = LARGE_MINCLASS;
- } else {
- usize = s2u(size);
- if (usize < size) {
- /* size_t overflow. */
- return 0;
- }
- }
-
- /*
- * Calculate the multi-page mapping that large_palloc() would need in
- * order to guarantee the alignment.
- */
- if (usize + large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
- /* size_t overflow. */
- return 0;
- }
- return usize;
-}
-
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
malloc_getcpu(void) {
assert(have_percpu_arena);
diff --git a/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/include/jemalloc/internal/jemalloc_internal_inlines_c.h
index 80dfbeff..7ffce6fb 100644
--- a/include/jemalloc/internal/jemalloc_internal_inlines_c.h
+++ b/include/jemalloc/internal/jemalloc_internal_inlines_c.h
@@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_INLINES_C_H
#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/witness.h"
JEMALLOC_ALWAYS_INLINE arena_t *
@@ -48,7 +49,7 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
void *ret;
assert(usize != 0);
- assert(usize == sa2u(usize, alignment));
+ assert(usize == sz_sa2u(usize, alignment));
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena == NULL || arena_is_auto(arena));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
@@ -118,7 +119,7 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
void *p;
size_t usize, copysize;
- usize = sa2u(size + extra, alignment);
+ usize = sz_sa2u(size + extra, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return NULL;
}
@@ -128,7 +129,7 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
return NULL;
}
/* Try again, without extra this time. */
- usize = sa2u(size, alignment);
+ usize = sz_sa2u(size, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return NULL;
}
diff --git a/include/jemalloc/internal/prof_inlines_b.h b/include/jemalloc/internal/prof_inlines_b.h
index fba7b998..d670cb7b 100644
--- a/include/jemalloc/internal/prof_inlines_b.h
+++ b/include/jemalloc/internal/prof_inlines_b.h
@@ -1,6 +1,8 @@
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
+#include "jemalloc/internal/sz.h"
+
JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void) {
/*
@@ -113,7 +115,7 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
prof_tdata_t *tdata;
prof_bt_t bt;
- assert(usize == s2u(usize));
+ assert(usize == sz_s2u(usize));
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
&tdata))) {
diff --git a/include/jemalloc/internal/size_classes.sh b/include/jemalloc/internal/size_classes.sh
index dd562db1..998994d0 100755
--- a/include/jemalloc/internal/size_classes.sh
+++ b/include/jemalloc/internal/size_classes.sh
@@ -334,6 +334,7 @@ for lg_z in ${lg_zarr} ; do
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
echo "#define SMALL_MAXCLASS ${small_maxclass}"
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
+ echo "#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)"
echo "#define LARGE_MAXCLASS ${large_maxclass}"
echo "#endif"
echo
diff --git a/include/jemalloc/internal/sz.h b/include/jemalloc/internal/sz.h
new file mode 100644
index 00000000..7f640d55
--- /dev/null
+++ b/include/jemalloc/internal/sz.h
@@ -0,0 +1,317 @@
+#ifndef JEMALLOC_INTERNAL_SIZE_H
+#define JEMALLOC_INTERNAL_SIZE_H
+
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/util.h"
+
+/*
+ * sz module: Size computations.
+ *
+ * Some abbreviations used here:
+ * p: Page
+ * ind: Index
+ * s, sz: Size
+ * u: Usable size
+ * a: Aligned
+ *
+ * These are not always used completely consistently, but should be enough to
+ * interpret function names. E.g. sz_psz2ind converts page size to page size
+ * index; sz_sa2u converts a (size, alignment) allocation request to the usable
+ * size that would result from such an allocation.
+ */
+
+/*
+ * sz_pind2sz_tab encodes the same information as could be computed by
+ * sz_pind2sz_compute().
+ */
+extern size_t const sz_pind2sz_tab[NPSIZES+1];
+/*
+ * sz_index2size_tab encodes the same information as could be computed (at
+ * unacceptable cost in some code paths) by sz_index2size_compute().
+ */
+extern size_t const sz_index2size_tab[NSIZES];
+/*
+ * sz_size2index_tab is a compact lookup table that rounds request sizes up to
+ * size classes. In order to reduce cache footprint, the table is compressed,
+ * and all accesses are via sz_size2index().
+ */
+extern uint8_t const sz_size2index_tab[];
+
+static const size_t sz_large_pad =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ PAGE
+#else
+ 0
+#endif
+ ;
+
+JEMALLOC_ALWAYS_INLINE pszind_t
+sz_psz2ind(size_t psz) {
+ if (unlikely(psz > LARGE_MAXCLASS)) {
+ return NPSIZES;
+ }
+ {
+ pszind_t x = lg_floor((psz<<1)-1);
+ pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
+ (LG_SIZE_CLASS_GROUP + LG_PAGE);
+ pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
+
+ pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
+ LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
+
+ size_t delta_inverse_mask = ZD(-1) << lg_delta;
+ pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
+ ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+ pszind_t ind = grp + mod;
+ return ind;
+ }
+}
+
+static inline size_t
+sz_pind2sz_compute(pszind_t pind) {
+ if (unlikely(pind == NPSIZES)) {
+ return LARGE_MAXCLASS + PAGE;
+ }
+ {
+ size_t grp = pind >> LG_SIZE_CLASS_GROUP;
+ size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+ size_t grp_size_mask = ~((!!grp)-1);
+ size_t grp_size = ((ZU(1) << (LG_PAGE +
+ (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
+
+ size_t shift = (grp == 0) ? 1 : grp;
+ size_t lg_delta = shift + (LG_PAGE-1);
+ size_t mod_size = (mod+1) << lg_delta;
+
+ size_t sz = grp_size + mod_size;
+ return sz;
+ }
+}
+
+static inline size_t
+sz_pind2sz_lookup(pszind_t pind) {
+ size_t ret = (size_t)sz_pind2sz_tab[pind];
+ assert(ret == sz_pind2sz_compute(pind));
+ return ret;
+}
+
+static inline size_t
+sz_pind2sz(pszind_t pind) {
+ assert(pind < NPSIZES+1);
+ return sz_pind2sz_lookup(pind);
+}
+
+static inline size_t
+sz_psz2u(size_t psz) {
+ if (unlikely(psz > LARGE_MAXCLASS)) {
+ return LARGE_MAXCLASS + PAGE;
+ }
+ {
+ size_t x = lg_floor((psz<<1)-1);
+ size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
+ LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
+ size_t delta = ZU(1) << lg_delta;
+ size_t delta_mask = delta - 1;
+ size_t usize = (psz + delta_mask) & ~delta_mask;
+ return usize;
+ }
+}
+
+static inline szind_t
+sz_size2index_compute(size_t size) {
+ if (unlikely(size > LARGE_MAXCLASS)) {
+ return NSIZES;
+ }
+#if (NTBINS != 0)
+ if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
+ szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+ szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
+ }
+#endif
+ {
+ szind_t x = lg_floor((size<<1)-1);
+ szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
+ x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
+ szind_t grp = shift << LG_SIZE_CLASS_GROUP;
+
+ szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+ ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
+
+ size_t delta_inverse_mask = ZD(-1) << lg_delta;
+ szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
+ ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+ szind_t index = NTBINS + grp + mod;
+ return index;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+sz_size2index_lookup(size_t size) {
+ assert(size <= LOOKUP_MAXCLASS);
+ {
+ szind_t ret = (sz_size2index_tab[(size-1) >> LG_TINY_MIN]);
+ assert(ret == sz_size2index_compute(size));
+ return ret;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+sz_size2index(size_t size) {
+ assert(size > 0);
+ if (likely(size <= LOOKUP_MAXCLASS)) {
+ return sz_size2index_lookup(size);
+ }
+ return sz_size2index_compute(size);
+}
+
+static inline size_t
+sz_index2size_compute(szind_t index) {
+#if (NTBINS > 0)
+ if (index < NTBINS) {
+ return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
+ }
+#endif
+ {
+ size_t reduced_index = index - NTBINS;
+ size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
+ size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
+ 1);
+
+ size_t grp_size_mask = ~((!!grp)-1);
+ size_t grp_size = ((ZU(1) << (LG_QUANTUM +
+ (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
+
+ size_t shift = (grp == 0) ? 1 : grp;
+ size_t lg_delta = shift + (LG_QUANTUM-1);
+ size_t mod_size = (mod+1) << lg_delta;
+
+ size_t usize = grp_size + mod_size;
+ return usize;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_index2size_lookup(szind_t index) {
+ size_t ret = (size_t)sz_index2size_tab[index];
+ assert(ret == sz_index2size_compute(index));
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_index2size(szind_t index) {
+ assert(index < NSIZES);
+ return sz_index2size_lookup(index);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_s2u_compute(size_t size) {
+ if (unlikely(size > LARGE_MAXCLASS)) {
+ return 0;
+ }
+#if (NTBINS > 0)
+ if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
+ size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+ size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
+ (ZU(1) << lg_ceil));
+ }
+#endif
+ {
+ size_t x = lg_floor((size<<1)-1);
+ size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+ ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
+ size_t delta = ZU(1) << lg_delta;
+ size_t delta_mask = delta - 1;
+ size_t usize = (size + delta_mask) & ~delta_mask;
+ return usize;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_s2u_lookup(size_t size) {
+ size_t ret = sz_index2size_lookup(sz_size2index_lookup(size));
+
+ assert(ret == sz_s2u_compute(size));
+ return ret;
+}
+
+/*
+ * Compute usable size that would result from allocating an object with the
+ * specified size.
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+sz_s2u(size_t size) {
+ assert(size > 0);
+ if (likely(size <= LOOKUP_MAXCLASS)) {
+ return sz_s2u_lookup(size);
+ }
+ return sz_s2u_compute(size);
+}
+
+/*
+ * Compute usable size that would result from allocating an object with the
+ * specified size and alignment.
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+sz_sa2u(size_t size, size_t alignment) {
+ size_t usize;
+
+ assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
+
+ /* Try for a small size class. */
+ if (size <= SMALL_MAXCLASS && alignment < PAGE) {
+ /*
+ * Round size up to the nearest multiple of alignment.
+ *
+ * This done, we can take advantage of the fact that for each
+ * small size class, every object is aligned at the smallest
+ * power of two that is non-zero in the base two representation
+ * of the size. For example:
+ *
+ * Size | Base 2 | Minimum alignment
+ * -----+----------+------------------
+ * 96 | 1100000 | 32
+ * 144 | 10100000 | 32
+ * 192 | 11000000 | 64
+ */
+ usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
+ if (usize < LARGE_MINCLASS) {
+ return usize;
+ }
+ }
+
+ /* Large size class. Beware of overflow. */
+
+ if (unlikely(alignment > LARGE_MAXCLASS)) {
+ return 0;
+ }
+
+ /* Make sure result is a large size class. */
+ if (size <= LARGE_MINCLASS) {
+ usize = LARGE_MINCLASS;
+ } else {
+ usize = sz_s2u(size);
+ if (usize < size) {
+ /* size_t overflow. */
+ return 0;
+ }
+ }
+
+ /*
+ * Calculate the multi-page mapping that large_palloc() would need in
+ * order to guarantee the alignment.
+ */
+ if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
+ /* size_t overflow. */
+ return 0;
+ }
+ return usize;
+}
+
+#endif /* JEMALLOC_INTERNAL_SIZE_H */
diff --git a/include/jemalloc/internal/tcache_inlines.h b/include/jemalloc/internal/tcache_inlines.h
index 8a65ba2b..c55bcd27 100644
--- a/include/jemalloc/internal/tcache_inlines.h
+++ b/include/jemalloc/internal/tcache_inlines.h
@@ -3,6 +3,7 @@
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
@@ -95,7 +96,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
* statement are all static.
*/
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
- usize = index2size(binind);
+ usize = sz_index2size(binind);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
}
@@ -147,7 +148,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
return NULL;
}
- ret = large_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
+ ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
if (ret == NULL) {
return NULL;
}
@@ -157,7 +158,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
/* Only compute usize on demand */
if (config_prof || (slow_path && config_fill) ||
unlikely(zero)) {
- usize = index2size(binind);
+ usize = sz_index2size(binind);
assert(usize <= tcache_maxclass);
}
@@ -221,7 +222,7 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
if (slow_path && config_fill && unlikely(opt_junk_free)) {
- large_dalloc_junk(ptr, index2size(binind));
+ large_dalloc_junk(ptr, sz_index2size(binind));
}
tbin = tcache_large_bin_get(tcache, binind);