aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2017-07-31 14:35:33 -0700
committerQi Wang <interwq@gmail.com>2017-07-31 15:47:48 -0700
commit1ab2ab294c8f29a6f314f3ff30fbf4cdb2f01af6 (patch)
tree80179b00ac6fa2338feb2d917daa9b78b8475a99
parent9a39b23c9c823e8157e2e6850014fa67c09f9351 (diff)
downloadplatform_external_jemalloc_new-1ab2ab294c8f29a6f314f3ff30fbf4cdb2f01af6.tar.gz
platform_external_jemalloc_new-1ab2ab294c8f29a6f314f3ff30fbf4cdb2f01af6.tar.bz2
platform_external_jemalloc_new-1ab2ab294c8f29a6f314f3ff30fbf4cdb2f01af6.zip
Only read szind if ptr is not paged aligned in sdallocx.
If ptr is not page aligned, we know the allocation was not sampled. In this case use the size passed into sdallocx directly w/o accessing rtree. This improve sdallocx efficiency in the common case (not sampled && small allocation).
-rw-r--r--src/jemalloc.c24
1 files changed, 22 insertions, 2 deletions
diff --git a/src/jemalloc.c b/src/jemalloc.c
index ed470520..4c73ba4a 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -2194,17 +2194,37 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
assert(malloc_initialized() || IS_INITIALIZER);
alloc_ctx_t alloc_ctx, *ctx;
- if (config_prof && opt_prof) {
+ if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
+ /*
+ * When cache_oblivious is disabled and ptr is not page aligned,
+ * the allocation was not sampled -- usize can be used to
+ * determine szind directly.
+ */
+ alloc_ctx.szind = sz_size2index(usize);
+ alloc_ctx.slab = true;
+ ctx = &alloc_ctx;
+ if (config_debug) {
+ alloc_ctx_t dbg_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
+ rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
+ &dbg_ctx.slab);
+ assert(dbg_ctx.szind == alloc_ctx.szind);
+ assert(dbg_ctx.slab == alloc_ctx.slab);
+ }
+ } else if (config_prof && opt_prof) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind == sz_size2index(usize));
ctx = &alloc_ctx;
- prof_free(tsd, ptr, usize, ctx);
} else {
ctx = NULL;
}
+ if (config_prof && opt_prof) {
+ prof_free(tsd, ptr, usize, ctx);
+ }
if (config_stats) {
*tsd_thread_deallocatedp_get(tsd) += usize;
}