aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mem.c
diff options
context:
space:
mode:
authorBernhard Walle <bwalle@suse.de>2008-02-07 00:15:17 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 08:42:25 -0800
commit72a7fe3967dbf86cb34e24fbf1d957fe24d2f246 (patch)
treec19f7d0b530577359840e959cce204939caf0649 /arch/powerpc/mm/mem.c
parent25fad945a7f7ff2cf06e437381c6a1121784dbd9 (diff)
downloadkernel_samsung_smdk4412-72a7fe3967dbf86cb34e24fbf1d957fe24d2f246.tar.gz
kernel_samsung_smdk4412-72a7fe3967dbf86cb34e24fbf1d957fe24d2f246.tar.bz2
kernel_samsung_smdk4412-72a7fe3967dbf86cb34e24fbf1d957fe24d2f246.zip
Introduce flags for reserve_bootmem()
This patchset adds a flags variable to reserve_bootmem() and uses the BOOTMEM_EXCLUSIVE flag in crashkernel reservation code to detect collisions between crashkernel area and already used memory. This patch: Change the reserve_bootmem() function to accept a new flag BOOTMEM_EXCLUSIVE. If that flag is set, the function returns with -EBUSY if the memory already has been reserved in the past. This is to avoid conflicts. Because that code runs before SMP initialisation, there's no race condition inside reserve_bootmem_core(). [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix powerpc build] Signed-off-by: Bernhard Walle <bwalle@suse.de> Cc: <linux-arch@vger.kernel.org> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc/mm/mem.c')
-rw-r--r--arch/powerpc/mm/mem.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index e8122447f01..ff5debf5eed 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -220,12 +220,13 @@ void __init do_init_bootmem(void)
lmb_size_bytes(&lmb.reserved, i) - 1;
if (addr < total_lowmem)
reserve_bootmem(lmb.reserved.region[i].base,
- lmb_size_bytes(&lmb.reserved, i));
+ lmb_size_bytes(&lmb.reserved, i),
+ BOOTMEM_DEFAULT);
else if (lmb.reserved.region[i].base < total_lowmem) {
unsigned long adjusted_size = total_lowmem -
lmb.reserved.region[i].base;
reserve_bootmem(lmb.reserved.region[i].base,
- adjusted_size);
+ adjusted_size, BOOTMEM_DEFAULT);
}
}
#else
@@ -234,7 +235,8 @@ void __init do_init_bootmem(void)
/* reserve the sections we're already using */
for (i = 0; i < lmb.reserved.cnt; i++)
reserve_bootmem(lmb.reserved.region[i].base,
- lmb_size_bytes(&lmb.reserved, i));
+ lmb_size_bytes(&lmb.reserved, i),
+ BOOTMEM_DEFAULT);
#endif
/* XXX need to clip this if using highmem? */