aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLuden <luden@ghostmail.com>2016-02-13 23:44:14 +0100
committerZiyan <jaraidaniel@gmail.com>2016-05-01 23:35:55 +0200
commitab26843c057773f42f5b46e4e4a519b39707253e (patch)
treed90a6f0111048a797cf217becfc2d3b70bb6e6b8 /mm
parentf819ad93dea3adee5f4a7ea87e1f6631aea83d44 (diff)
downloadkernel_samsung_tuna-ab26843c057773f42f5b46e4e4a519b39707253e.tar.gz
kernel_samsung_tuna-ab26843c057773f42f5b46e4e4a519b39707253e.tar.bz2
kernel_samsung_tuna-ab26843c057773f42f5b46e4e4a519b39707253e.zip
Retry CMA allocations.
It looks like Linux pages migration code was never designed to be deterministic or synchronous, there are multiple race conditions between different parts of the code that make CMA allocation in one step very likely to fail, especially for large memory ranges that we need for Ducati. Therefore, changing the allocation code to perform multiple allocation attempts. To further increase the chances of the allocation to succeed and to make things faster, the results of the previous allocation attempts are kept - that is, all pages that are already isolated stay isolated, so that retries are only for those pages that failed isolation or migration in previous steps. Additionally, there's a small delay between the steps so that the chances of the other code to free the pages we need are higher.
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/page_alloc.c161
2 files changed, 103 insertions, 61 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index d623e5b75c5..07a8358df93 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1814,10 +1814,9 @@ void ksm_finalize_migration(unsigned long start_pfn, unsigned long nr_pages)
while ((stable_node = ksm_check_stable_tree(start_pfn,
start_pfn + nr_pages)) != NULL)
remove_node_from_stable_tree(stable_node);
- mutex_unlock(&ksm_thread_mutex);
}
-void ksm_abort_migration(void)
+void ksm_stop_migration(void)
{
mutex_unlock(&ksm_thread_mutex);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5a900771cdc..ba78d904cdf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -59,6 +59,7 @@
#include <linux/prefetch.h>
#include <linux/migrate.h>
#include <linux/ksm.h>
+#include <linux/delay.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -5802,7 +5803,11 @@ int set_migratetype_isolate(struct page *page)
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages.
*/
- if (__count_immobile_pages(zone, page, arg.pages_found))
+ /* Note: if pageblock has MIGRATE_ISOLATE type already set, it means
+ * the previous __count_immobile_pages() check succeeded and we're now
+ * in retry mode, so skip this check now as it will fail. */
+ if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE ||
+ __count_immobile_pages(zone, page, arg.pages_found))
ret = 0;
/*
@@ -5977,9 +5982,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype)
{
struct zone *zone = page_zone(pfn_to_page(start));
- unsigned long outer_start, outer_end;
- bool ksm_migration_started = false;
- int ret = 0, order;
+ unsigned long outer_start = start, outer_end = end;
+ int ret = -EBUSY, order, tries, nMaxTries = 25;
/*
* What we do here is we mark all pageblocks in range as
@@ -6005,68 +6009,105 @@ int alloc_contig_range(unsigned long start, unsigned long end,
* put back to page allocator so that buddy can use them.
*/
- ret = start_isolate_page_range(pfn_max_align_down(start),
- pfn_max_align_up(end), migratetype);
- if (ret)
- goto done;
+ for (tries = 0; ret && tries < nMaxTries; ++tries) {
+#ifdef CONFIG_KSM
+ if (tries > 0) {
+ ksm_stop_migration();
+ }
+#endif
- // Need to take KSM lock, so that we can specify offlining = true
- // and move KSM pages.
- ksm_start_migration();
- ksm_migration_started = true;
+ /* If we had several unsuccessful iterations already -
+ * try to wait a bit between them so that the chances
+ * of pages beeing freed are higher. */
+ if (tries > 5) {
+ msleep(50);
+ }
- ret = __alloc_contig_migrate_range(start, end);
- if (ret)
- goto done;
+#ifdef CONFIG_KSM
+ /* Need to take KSM lock, so that we can specify offlining = true
+ * and move KSM pages.*/
+ ksm_start_migration();
+#endif
- /*
- * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
- * aligned blocks that are marked as MIGRATE_ISOLATE. What's
- * more, all pages in [start, end) are free in page allocator.
- * What we are going to do is to allocate all pages from
- * [start, end) (that is remove them from page allocator).
- *
- * The only problem is that pages at the beginning and at the
- * end of interesting range may be not aligned with pages that
- * page allocator holds, ie. they can be part of higher order
- * pages. Because of this, we reserve the bigger range and
- * once this is done free the pages we are not interested in.
- *
- * We don't have to hold zone->lock here because the pages are
- * isolated thus they won't get removed from buddy.
- */
+ ret = start_isolate_page_range(pfn_max_align_down(start),
+ pfn_max_align_up(end), migratetype);
+ if (ret) {
+#ifdef CONFIG_CMA_DEBUG
+ pr_warn("alloc_contig_range: start_isolate_page_range failed, result = %d, retrying at iteration %d\n", ret, tries);
+#endif
+ continue;
+ }
+
+ ret = __alloc_contig_migrate_range(start, end);
+ if (ret) {
+#ifdef CONFIG_CMA_DEBUG
+ pr_warn("alloc_contig_range: __alloc_contig_migrate_range failed, result = %d, retrying at iteration %d\n", ret, tries);
+#endif
+ continue;
+ }
- lru_add_drain_all();
- drain_all_pages();
+ /*
+ * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
+ * aligned blocks that are marked as MIGRATE_ISOLATE. What's
+ * more, all pages in [start, end) are free in page allocator.
+ * What we are going to do is to allocate all pages from
+ * [start, end) (that is remove them from page allocator).
+ *
+ * The only problem is that pages at the beginning and at the
+ * end of interesting range may be not aligned with pages that
+ * page allocator holds, ie. they can be part of higher order
+ * pages. Because of this, we reserve the bigger range and
+ * once this is done free the pages we are not interested in.
+ *
+ * We don't have to hold zone->lock here because the pages are
+ * isolated thus they won't get removed from buddy.
+ */
+
+ lru_add_drain_all();
+ drain_all_pages();
- order = 0;
- outer_start = start;
- while (!PageBuddy(pfn_to_page(outer_start))) {
- if (++order >= MAX_ORDER) {
+ order = 0;
+ outer_start = start;
+ while (!PageBuddy(pfn_to_page(outer_start))) {
+ if (++order >= MAX_ORDER) {
+#ifdef CONFIG_CMA_DEBUG
+ pr_warn("alloc_contig_range: MAX_ORDER exceeded, retrying at iteration %d\n", tries);
+#endif
+ ret = -EBUSY;
+ break;
+ }
+ outer_start &= ~0UL << order;
+ }
+
+ if (ret)
+ continue;
+
+ /* Make sure the range is really isolated. */
+ if (test_pages_isolated(outer_start, end)) {
+#ifdef CONFIG_CMA_DEBUG
+ pr_warn("alloc_contig_range: test_pages_isolated(%lx, %lx) failed, retrying at iteration %d\n",
+ outer_start, end, tries);
+#endif
ret = -EBUSY;
- goto done;
+ continue;
}
- outer_start &= ~0UL << order;
- }
- /* Make sure the range is really isolated. */
- if (test_pages_isolated(outer_start, end)) {
- pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
- outer_start, end);
- ret = -EBUSY;
- goto done;
- }
+ /*
+ * Reclaim enough pages to make sure that contiguous allocation
+ * will not starve the system.
+ */
+ __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
- /*
- * Reclaim enough pages to make sure that contiguous allocation
- * will not starve the system.
- */
- __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
+ /* Grab isolated pages from freelists. */
+ outer_end = isolate_freepages_range(outer_start, end);
+ if (!outer_end) {
+ ret = -EBUSY;
+ continue;
+ }
+ }
- /* Grab isolated pages from freelists. */
- outer_end = isolate_freepages_range(outer_start, end);
- if (!outer_end) {
- ret = -EBUSY;
+ if (ret) {
+ pr_err("alloc_contig_range: max retries reached, failing\n");
goto done;
}
@@ -6076,15 +6117,17 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (end != outer_end)
free_contig_range(end, outer_end - end);
+#ifdef CONFIG_KSM
// Finalize KSM migration.
ksm_finalize_migration(start, end - start);
+#endif
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
- if (ksm_migration_started) {
- ksm_abort_migration();
- }
+#ifdef CONFIG_KSM
+ ksm_stop_migration();
+#endif
return ret;
}