aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPradeep Sawlani <sawlani@amazon.com>2014-03-14 00:08:19 -0700
committerZiyan <jaraidaniel@gmail.com>2016-01-08 10:44:44 +0100
commitb2475e5c58dab09b1dc073ae7564d8a8126db574 (patch)
tree87a7770e43a7693d33fdc47af7276974b07c5e2d /mm
parentdef8e61f7da447b662f5b87970631023162ca111 (diff)
downloadkernel_samsung_tuna-b2475e5c58dab09b1dc073ae7564d8a8126db574.tar.gz
kernel_samsung_tuna-b2475e5c58dab09b1dc073ae7564d8a8126db574.tar.bz2
kernel_samsung_tuna-b2475e5c58dab09b1dc073ae7564d8a8126db574.zip
ksm: check and skip page, if it is already scanned
From: Pradeep Sawlani <sawlani@amazon.com> On system like Android where most of the process are forked from parent w/o execve, KSM can scan same page multiple times in one scan cycle. There is no advantage in scanning same page multiple times for merging. During testing with Android, it was observed around 60% pages are skipped for each scan cycle. Change-Id: I0cf01802f0b4d61fcab92558beb9e1c660dc9a77 Link: http://lkml.kernel.org/r/CAMrOTPgBtANS_ryRjan0-dTL97U7eRvtf3dCsss=Kn+Uk89fuA@mail.gmail.com Signed-off-by: Pradeep Sawlani <sawlani@amazon.com> Signed-off-by: Paul Reioux <reioux@gmail.com> Conflicts: include/linux/page-flags.h Conflicts: include/linux/page-flags.h
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig10
-rw-r--r--mm/ksm.c33
2 files changed, 41 insertions, 2 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 264a442c831..3bf1bfe4778 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -237,6 +237,16 @@ config KSM
until a program has madvised that an area is MADV_MERGEABLE, and
root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
+config KSM_CHECK_PAGE
+ bool "Check page before scanning"
+ depends on KSM
+ default n
+ help
+ If enabled, this will check and skip if page is already scanned in
+ same KSM scan cycle.
+ This is useful in situation where you have parent and
+ child process marking same area for KSM scanning.
+
config DEFAULT_MMAP_MIN_ADDR
int "Low address space to protect from user allocation"
depends on MMU
diff --git a/mm/ksm.c b/mm/ksm.c
index 9a68b0cf0a1..b126a7fb365 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -554,7 +554,9 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
* than left over from before.
*/
age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
+#ifndef CONFIG_KSM_CHECK_PAGE
BUG_ON(age > 1);
+#endif
if (!age)
rb_erase(&rmap_item->node, &root_unstable_tree);
@@ -1403,6 +1405,31 @@ next_mm:
return NULL;
}
+static inline int is_page_scanned(struct page *page)
+{
+#ifdef CONFIG_KSM_CHECK_PAGE
+ /* page is already marked as ksm, so this will be simple merge */
+ if (PageKsm(page))
+ return 0;
+
+ if (ksm_scan.seqnr & 0x1) {
+ /* odd cycle */
+ /* clear even cycle bit */
+ ClearPageKsmScan0(page);
+ /* get old value and mark it scanned */
+ return TestSetPageKsmScan1(page);
+ } else {
+ /* even cycle */
+ /* clear odd cycle bit */
+ ClearPageKsmScan1(page);
+ /* get old value and mark it scanned */
+ return TestSetPageKsmScan0(page);
+ }
+#else
+ return 0;
+#endif
+}
+
/**
* ksm_do_scan - the ksm scanner main worker function.
* @scan_npages - number of pages we want to scan before we return.
@@ -1417,8 +1444,10 @@ static void ksm_do_scan(unsigned int scan_npages)
rmap_item = scan_get_next_rmap_item(&page);
if (!rmap_item)
return;
- if (!PageKsm(page) || !in_stable_tree(rmap_item))
- cmp_and_merge_page(page, rmap_item);
+ if (!PageKsm(page) || !in_stable_tree(rmap_item)) {
+ if (!is_page_scanned(page))
+ cmp_and_merge_page(page, rmap_item);
+ }
put_page(page);
}
}