aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-03-25 03:06:47 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-25 08:22:50 -0800
commit0718dc2a82c865ca75975acabaf984057f9fd488 (patch)
tree0f7e8ed22ea26c79e17712c417593bdce04e7888 /mm
parentcafeb02e098ecd58fb0bd797b2c9fbba3edf54f8 (diff)
downloadkernel_samsung_smdk4412-0718dc2a82c865ca75975acabaf984057f9fd488.tar.gz
kernel_samsung_smdk4412-0718dc2a82c865ca75975acabaf984057f9fd488.tar.bz2
kernel_samsung_smdk4412-0718dc2a82c865ca75975acabaf984057f9fd488.zip
[PATCH] slab: fix memory leak in alloc_kmemlist
We have had this memory leak for a while now. The situation is complicated by the use of alloc_kmemlist() as a function to resize various caches by do_tune_cpucache(). What we do here is first of all make sure that we deallocate properly in the loop over all the nodes. If we are just resizing caches then we can simply return with -ENOMEM if an allocation fails. If the cache is new then we need to rollback and remove all earlier allocations. We detect that a cache is new by checking if the link to the global cache chain has been setup. This is a bit hackish .... (also fix up too overlong lines that I added in the last patch...) Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Jesper Juhl <jesper.juhl@gmail.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c33
1 files changed, 28 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c
index ef9f60fe37d..681837499d7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3418,7 +3418,7 @@ const char *kmem_cache_name(struct kmem_cache *cachep)
EXPORT_SYMBOL_GPL(kmem_cache_name);
/*
- * This initializes kmem_list3 for all nodes.
+ * This initializes kmem_list3 or resizes varioius caches for all nodes.
*/
static int alloc_kmemlist(struct kmem_cache *cachep)
{
@@ -3433,10 +3433,13 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
if (!new_alien)
goto fail;
- new_shared = alloc_arraycache(node, cachep->shared*cachep->batchcount,
+ new_shared = alloc_arraycache(node,
+ cachep->shared*cachep->batchcount,
0xbaadf00d);
- if (!new_shared)
+ if (!new_shared) {
+ free_alien_cache(new_alien);
goto fail;
+ }
l3 = cachep->nodelists[node];
if (l3) {
@@ -3445,7 +3448,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
spin_lock_irq(&l3->list_lock);
if (shared)
- free_block(cachep, shared->entry, shared->avail, node);
+ free_block(cachep, shared->entry,
+ shared->avail, node);
l3->shared = new_shared;
if (!l3->alien) {
@@ -3460,8 +3464,11 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
continue;
}
l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
- if (!l3)
+ if (!l3) {
+ free_alien_cache(new_alien);
+ kfree(new_shared);
goto fail;
+ }
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
@@ -3473,7 +3480,23 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
cachep->nodelists[node] = l3;
}
return 0;
+
fail:
+ if (!cachep->next.next) {
+ /* Cache is not active yet. Roll back what we did */
+ node--;
+ while (node >= 0) {
+ if (cachep->nodelists[node]) {
+ l3 = cachep->nodelists[node];
+
+ kfree(l3->shared);
+ free_alien_cache(l3->alien);
+ kfree(l3);
+ cachep->nodelists[node] = NULL;
+ }
+ node--;
+ }
+ }
return -ENOMEM;
}