diff options
author | Yi-wei Zhao <gbjc64@motorola.com> | 2012-12-20 18:06:21 -0600 |
---|---|---|
committer | Ziyan <jaraidaniel@gmail.com> | 2016-01-08 10:44:50 +0100 |
commit | 95c015c61de96acca5bb4f3fd77f0cdab3322060 (patch) | |
tree | f0e14c2f4ec30e0c7fbbca29473779871026e09a /mm | |
parent | e9e13169869ecfd1b2566a923cf08a3d35b21614 (diff) | |
download | kernel_samsung_tuna-95c015c61de96acca5bb4f3fd77f0cdab3322060.tar.gz kernel_samsung_tuna-95c015c61de96acca5bb4f3fd77f0cdab3322060.tar.bz2 kernel_samsung_tuna-95c015c61de96acca5bb4f3fd77f0cdab3322060.zip |
mm: page_alloc: retry direct compaction
When system load is heavy, memory commpaction may encounter
transient failure; add retry mechanism. And add the following
statistic variable "compact_retry_success" in /proc/vmstat
to see how many compaction success due to the retry.
And __zone_watermark_ok() will easily return false when system
is busy, which will make compaction failure as well. Amend some
to save margin cases.
This patch is ported from the patch:
* (CR) mem compaction enhancement
Reviewed-on: http://gerrit.pcs.mot.com/494962
Change-Id: I5ad08364678c7e68993f66e0c0d43d97712a99b6
Signed-off-by: Yi-wei Zhao <gbjc64@motorola.com>
Reviewed-on: http://gerrit.pcs.mot.com/507230
Tested-by: Jira Key <JIRAKEY@motorola.com>
Reviewed-by: Jason Hrycay <jason.hrycay@motorola.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 30 | ||||
-rw-r--r-- | mm/vmstat.c | 1 |
2 files changed, 29 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4410edd4e33..17c9370cf02 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1513,10 +1513,20 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, /* At the next order, this order's pages become unavailable */ free_pages -= z->free_area[o].nr_free << o; + /* + * z->free_area[o].nr_free is changing. It may increase if + * there are newly freed pages. So free_pages may become + * negative (then always < min). But it should not block + * memory allocation due to new free pages added in lower + * order. Just amend some to save some margin case... + */ + if (free_pages < 0) + free_pages = 0; + /* Require fewer higher order pages to be free */ min >>= min_free_order_shift; - if (free_pages <= min) + if (free_pages < min) return false; } return true; @@ -1919,6 +1929,7 @@ out: } #ifdef CONFIG_COMPACTION +#define COMPACTION_RETRY_TIMES 2 /* Try memory compaction for high-order allocations before reclaim */ static struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, @@ -1929,6 +1940,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned long *did_some_progress) { struct page *page; + int retry_times = 0, order_adj = order; if (!order) return NULL; @@ -1938,8 +1950,9 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, return NULL; } +retry_compact: current->flags |= PF_MEMALLOC; - *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, + *did_some_progress = try_to_compact_pages(zonelist, order_adj, gfp_mask, nodemask, sync_migration); current->flags &= ~PF_MEMALLOC; if (*did_some_progress != COMPACT_SKIPPED) { @@ -1956,9 +1969,22 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, preferred_zone->compact_considered = 0; preferred_zone->compact_defer_shift = 0; count_vm_event(COMPACTSUCCESS); + + if (retry_times) + count_vm_event(COMPACTSUCCESS_RETRY); + return page; } + if (retry_times++ < COMPACTION_RETRY_TIMES) { + + order_adj++; + if (order_adj >= MAX_ORDER) + order_adj = -1; + + goto retry_compact; + } + /* * It's bad if compaction run occurs and fails. * The most likely reason is that pages exist, diff --git a/mm/vmstat.c b/mm/vmstat.c index 6559013c5a1..0cdab04c9a0 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -763,6 +763,7 @@ const char * const vmstat_text[] = { "compact_stall", "compact_fail", "compact_success", + "compact_retry_success", #endif #ifdef CONFIG_HUGETLB_PAGE |