aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-10-01 22:20:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-10-01 22:20:11 -0400
commitbde17b90dd9712cb61a7ab0c1ccd0f7f6aa57957 (patch)
treed4597bfde4f5d4a3a5729e5534c44993c6216352 /mm/slab.c
parent1bca1000fa71a1092947b4a51928abe80a3316d2 (diff)
parent676bd99178cd962ed24ffdad222b7069d330a969 (diff)
downloadkernel_replicant_linux-bde17b90dd9712cb61a7ab0c1ccd0f7f6aa57957.tar.gz
kernel_replicant_linux-bde17b90dd9712cb61a7ab0c1ccd0f7f6aa57957.tar.bz2
kernel_replicant_linux-bde17b90dd9712cb61a7ab0c1ccd0f7f6aa57957.zip
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "12 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: dmapool: fix overflow condition in pool_find_page() thermal: avoid division by zero in power allocator memcg: remove pcp_counter_lock kprobes: use _do_fork() in samples to make them work again drivers/input/joystick/Kconfig: zhenhua.c needs BITREVERSE memcg: make mem_cgroup_read_stat() unsigned memcg: fix dirty page migration dax: fix NULL pointer in __dax_pmd_fault() mm: hugetlbfs: skip shared VMAs when unmapping private pages to satisfy a fault mm/slab: fix unexpected index mapping result of kmalloc_size(INDEX_NODE+1) userfaultfd: remove kernel header include from uapi header arch/x86/include/asm/efi.h: fix build failure
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c77ebe6cc87c..4fcc5dd8d5a6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size += BYTES_PER_WORD;
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
- if (size >= kmalloc_size(INDEX_NODE + 1)
- && cachep->object_size > cache_line_size()
- && ALIGN(size, cachep->align) < PAGE_SIZE) {
+ /*
+ * To activate debug pagealloc, off-slab management is necessary
+ * requirement. In early phase of initialization, small sized slab
+ * doesn't get initialized so it would not be possible. So, we need
+ * to check size >= 256. It guarantees that all necessary small
+ * sized slab is initialized in current slab initialization sequence.
+ */
+ if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
+ size >= 256 && cachep->object_size > cache_line_size() &&
+ ALIGN(size, cachep->align) < PAGE_SIZE) {
cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
size = PAGE_SIZE;
}