aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid Goldblatt <davidgoldblatt@fb.com>2017-09-18 17:25:57 -0700
committerDavid Goldblatt <davidtgoldblatt@gmail.com>2017-10-16 15:35:49 -0700
commitd14bbf8d8190df411f0daf182f73f7b7786288c4 (patch)
tree157b73fd0bc92e9cc42d6cd15ffde0c6193e0acf /include
parentbbaa72422bb086933890a125fd58bf199fe26f2d (diff)
downloadplatform_external_jemalloc_new-d14bbf8d8190df411f0daf182f73f7b7786288c4.tar.gz
platform_external_jemalloc_new-d14bbf8d8190df411f0daf182f73f7b7786288c4.tar.bz2
platform_external_jemalloc_new-d14bbf8d8190df411f0daf182f73f7b7786288c4.zip
Add a "dumpable" bit to the extent state.
Currently, this is unused (i.e. all extents are always marked dumpable). In the future, we'll begin using this functionality.
Diffstat (limited to 'include')
-rw-r--r--include/jemalloc/internal/extent_inlines.h16
-rw-r--r--include/jemalloc/internal/extent_structs.h36
2 files changed, 44 insertions, 8 deletions
diff --git a/include/jemalloc/internal/extent_inlines.h b/include/jemalloc/internal/extent_inlines.h
index bb2bd699..610072eb 100644
--- a/include/jemalloc/internal/extent_inlines.h
+++ b/include/jemalloc/internal/extent_inlines.h
@@ -94,6 +94,12 @@ extent_committed_get(const extent_t *extent) {
}
static inline bool
+extent_dumpable_get(const extent_t *extent) {
+ return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
+ EXTENT_BITS_DUMPABLE_SHIFT);
+}
+
+static inline bool
extent_slab_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
EXTENT_BITS_SLAB_SHIFT);
@@ -270,6 +276,12 @@ extent_committed_set(extent_t *extent, bool committed) {
}
static inline void
+extent_dumpable_set(extent_t *extent, bool dumpable) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
+ ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
+}
+
+static inline void
extent_slab_set(extent_t *extent, bool slab) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
@@ -283,7 +295,7 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
static inline void
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
- bool committed) {
+ bool committed, bool dumpable) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
extent_arena_set(extent, arena);
@@ -295,6 +307,7 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
extent_state_set(extent, state);
extent_zeroed_set(extent, zeroed);
extent_committed_set(extent, committed);
+ extent_dumpable_set(extent, dumpable);
ql_elm_new(extent, ql_link);
if (config_prof) {
extent_prof_tctx_set(extent, NULL);
@@ -312,6 +325,7 @@ extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
extent_state_set(extent, extent_state_active);
extent_zeroed_set(extent, true);
extent_committed_set(extent, true);
+ extent_dumpable_set(extent, true);
}
static inline void
diff --git a/include/jemalloc/internal/extent_structs.h b/include/jemalloc/internal/extent_structs.h
index 641a6325..722963b5 100644
--- a/include/jemalloc/internal/extent_structs.h
+++ b/include/jemalloc/internal/extent_structs.h
@@ -23,13 +23,14 @@ struct extent_s {
* a: arena_ind
* b: slab
* c: committed
+ * d: dumpable
* z: zeroed
* t: state
* i: szind
* f: nfree
* n: sn
*
- * nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa
+ * nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
*
* arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated.
@@ -44,6 +45,23 @@ struct extent_s {
* as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults.
*
+ * dumpable: The dumpable flag indicates whether or not we've set the
+ * memory in question to be dumpable. Note that this
+ * interacts somewhat subtly with user-specified extent hooks,
+ * since we don't know if *they* are fiddling with
+ * dumpability (in which case, we don't want to undo whatever
+ * they're doing). To deal with this scenario, we:
+ * - Make dumpable false only for memory allocated with the
+ * default hooks.
+ * - Only allow memory to go from non-dumpable to dumpable,
+ * and only once.
+ * - Never make the OS call to allow dumping when the
+ * dumpable bit is already set.
+ * These three constraints mean that we will never
+ * accidentally dump user memory that the user meant to set
+ * nondumpable with their extent hooks.
+ *
+ *
* zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled.
*
@@ -80,25 +98,29 @@ struct extent_s {
#define EXTENT_BITS_COMMITTED_MASK \
((uint64_t)0x1U << EXTENT_BITS_COMMITTED_SHIFT)
-#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 2)
+#define EXTENT_BITS_DUMPABLE_SHIFT (MALLOCX_ARENA_BITS + 2)
+#define EXTENT_BITS_DUMPABLE_MASK \
+ ((uint64_t)0x1U << EXTENT_BITS_DUMPABLE_SHIFT)
+
+#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 3)
#define EXTENT_BITS_ZEROED_MASK \
((uint64_t)0x1U << EXTENT_BITS_ZEROED_SHIFT)
-#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 3)
+#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 4)
#define EXTENT_BITS_STATE_MASK \
((uint64_t)0x3U << EXTENT_BITS_STATE_SHIFT)
-#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 5)
+#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 6)
#define EXTENT_BITS_SZIND_MASK \
(((uint64_t)(1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_SHIFT \
- (MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES)
+ (MALLOCX_ARENA_BITS + 6 + LG_CEIL_NSIZES)
#define EXTENT_BITS_NFREE_MASK \
((uint64_t)((1U << (LG_SLAB_MAXREGS + 1)) - 1) << EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_SN_SHIFT \
- (MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
+ (MALLOCX_ARENA_BITS + 6 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
@@ -128,7 +150,7 @@ struct extent_s {
*/
ql_elm(extent_t) ql_link;
- /*
+ /*
* Linkage for per size class sn/address-ordered heaps, and
* for extent_avail
*/