aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLuden <luden@ghostmail.com>2016-04-07 13:20:23 +0200
committerZiyan <jaraidaniel@gmail.com>2016-05-01 23:35:47 +0200
commit980a74e2c46a5f8b8ae8ef4e73dbcc4bf26443b9 (patch)
treec6b2cec1fc791963579ac1becb119d2139e478c7 /mm
parenta62e05e883fbbb201f6f89453849452c32d79429 (diff)
downloadkernel_samsung_tuna-980a74e2c46a5f8b8ae8ef4e73dbcc4bf26443b9.tar.gz
kernel_samsung_tuna-980a74e2c46a5f8b8ae8ef4e73dbcc4bf26443b9.tar.bz2
kernel_samsung_tuna-980a74e2c46a5f8b8ae8ef4e73dbcc4bf26443b9.zip
CMA fixes for kernel 3.0
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c23
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c18
3 files changed, 37 insertions, 6 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index b126a7fb365..d623e5b75c5 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1786,9 +1786,7 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage)
stable_node->kpfn = page_to_pfn(newpage);
}
}
-#endif /* CONFIG_MIGRATION */
-#ifdef CONFIG_MEMORY_HOTREMOVE
static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
unsigned long end_pfn)
{
@@ -1805,6 +1803,27 @@ static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
return NULL;
}
+void ksm_start_migration(void)
+{
+ mutex_lock(&ksm_thread_mutex);
+}
+
+void ksm_finalize_migration(unsigned long start_pfn, unsigned long nr_pages)
+{
+ struct stable_node *stable_node;
+ while ((stable_node = ksm_check_stable_tree(start_pfn,
+ start_pfn + nr_pages)) != NULL)
+ remove_node_from_stable_tree(stable_node);
+ mutex_unlock(&ksm_thread_mutex);
+}
+
+void ksm_abort_migration(void)
+{
+ mutex_unlock(&ksm_thread_mutex);
+}
+#endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
static int ksm_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
diff --git a/mm/migrate.c b/mm/migrate.c
index 3e315a7e4c8..76abb9ad067 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -770,7 +770,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* File Caches may use write_page() or lock_page() in migration, then,
* just care Anon page here.
*/
- if (PageAnon(page)) {
+ if (PageAnon(page) && !PageKsm(page)) {
/*
* Only page_lock_anon_vma() understands the subtleties of
* getting a hold on an anon_vma from outside one of its mms.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fad33968e65..5a900771cdc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -58,6 +58,7 @@
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
#include <linux/migrate.h>
+#include <linux/ksm.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -5298,7 +5299,6 @@ static void __setup_per_zone_wmarks(void)
zone->watermark[WMARK_MIN] = min;
}
-
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) +
low + (min >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
@@ -5898,7 +5898,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
ret = migrate_pages(&cc.migratepages,
__alloc_contig_migrate_alloc,
- 0, false, MIGRATE_SYNC);
+ 0, true, MIGRATE_SYNC);
}
putback_lru_pages(&cc.migratepages);
@@ -5943,7 +5943,7 @@ static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
NULL);
if (!did_some_progress) {
/* Exhausted what can be done so it's blamo time */
- out_of_memory(zonelist, gfp_mask, order, NULL, false);
+ out_of_memory(zonelist, gfp_mask, order, NULL);
}
}
@@ -5978,6 +5978,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
{
struct zone *zone = page_zone(pfn_to_page(start));
unsigned long outer_start, outer_end;
+ bool ksm_migration_started = false;
int ret = 0, order;
/*
@@ -6009,6 +6010,11 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (ret)
goto done;
+ // Need to take KSM lock, so that we can specify offlining = true
+ // and move KSM pages.
+ ksm_start_migration();
+ ksm_migration_started = true;
+
ret = __alloc_contig_migrate_range(start, end);
if (ret)
goto done;
@@ -6070,9 +6076,15 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (end != outer_end)
free_contig_range(end, outer_end - end);
+ // Finalize KSM migration.
+ ksm_finalize_migration(start, end - start);
+
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
+ if (ksm_migration_started) {
+ ksm_abort_migration();
+ }
return ret;
}