aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-01-08 01:00:49 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 20:12:41 -0800
commit7cbe34cf86c673503b177ff47cfa2c7030dabb50 (patch)
tree9b39d7e8f11fed68242d1cb1f0c85dfcf96e3250 /mm
parent49d2e9cc4544369635cd6f4ef6d5bb0f757079a7 (diff)
downloadkernel_samsung_smdk4412-7cbe34cf86c673503b177ff47cfa2c7030dabb50.tar.gz
kernel_samsung_smdk4412-7cbe34cf86c673503b177ff47cfa2c7030dabb50.tar.bz2
kernel_samsung_smdk4412-7cbe34cf86c673503b177ff47cfa2c7030dabb50.zip
[PATCH] Swap Migration V5: Add CONFIG_MIGRATION for page migration support
Include page migration if the system is NUMA or having a memory model that allows distinct areas of memory (SPARSEMEM, DISCONTIGMEM). And: - Only include lru_add_drain_per_cpu if building for an SMP system. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig7
-rw-r--r--mm/vmscan.c20
2 files changed, 18 insertions, 9 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index b3db11f137e..a9cb80ae640 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -132,3 +132,10 @@ config SPLIT_PTLOCK_CPUS
default "4096" if ARM && !CPU_CACHE_VIPT
default "4096" if PARISC && !PA20
default "4"
+
+#
+# support for page migration
+#
+config MIGRATION
+ def_bool y if NUMA || SPARSEMEM || DISCONTIGMEM
+ depends on SWAP
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a537a7f1635..58270aea669 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -568,6 +568,7 @@ keep:
return reclaimed;
}
+#ifdef CONFIG_MIGRATION
/*
* swapout a single page
* page is locked upon entry, unlocked on exit
@@ -656,8 +657,9 @@ redo:
/*
* Skip locked pages during the first two passes to give the
- * functions holding the lock time to release the page. Later we use
- * lock_page to have a higher chance of acquiring the lock.
+ * functions holding the lock time to release the page. Later we
+ * use lock_page() to have a higher chance of acquiring the
+ * lock.
*/
if (pass > 2)
lock_page(page);
@@ -669,15 +671,15 @@ redo:
* Only wait on writeback if we have already done a pass where
* we we may have triggered writeouts for lots of pages.
*/
- if (pass > 0)
+ if (pass > 0) {
wait_on_page_writeback(page);
- else
+ } else {
if (PageWriteback(page)) {
unlock_page(page);
goto retry_later;
}
+ }
-#ifdef CONFIG_SWAP
if (PageAnon(page) && !PageSwapCache(page)) {
if (!add_to_swap(page)) {
unlock_page(page);
@@ -686,16 +688,15 @@ redo:
continue;
}
}
-#endif /* CONFIG_SWAP */
/*
* Page is properly locked and writeback is complete.
* Try to migrate the page.
*/
- if (swap_page(page)) {
+ if (!swap_page(page))
+ continue;
retry_later:
- retry++;
- }
+ retry++;
}
if (retry && pass++ < 10)
goto redo;
@@ -708,6 +709,7 @@ retry_later:
return nr_failed + retry;
}
+#endif
/*
* zone->lru_lock is heavily contended. Some of the functions that