aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorcodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
committercodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
commitc6da2cfeb05178a11c6d062a06f8078150ee492f (patch)
treef3b4021d252c52d6463a9b3c1bb7245e399b009c /mm/page_alloc.c
parentc6d7c4dbff353eac7919342ae6b3299a378160a6 (diff)
downloadkernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.gz
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.bz2
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.zip
samsung update 1
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c47
1 files changed, 44 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8439d2a04f2..4d8f48c320c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -78,6 +78,8 @@ DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
#endif
+struct rw_semaphore page_alloc_slow_rwsem;
+
/*
* Array of node states.
*/
@@ -127,6 +129,20 @@ void pm_restrict_gfp_mask(void)
saved_gfp_mask = gfp_allowed_mask;
gfp_allowed_mask &= ~GFP_IOFS;
}
+
+static bool pm_suspending(void)
+{
+ if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
+ return false;
+ return true;
+}
+
+#else
+
+static bool pm_suspending(void)
+{
+ return false;
+}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -176,6 +192,7 @@ static char * const zone_names[MAX_NR_ZONES] = {
};
int min_free_kbytes = 1024;
+int min_free_order_shift = 1;
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
@@ -1487,7 +1504,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
free_pages -= z->free_area[o].nr_free << o;
/* Require fewer higher order pages to be free */
- min >>= 1;
+ min >>= min_free_order_shift;
if (free_pages <= min)
return false;
@@ -2095,7 +2112,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned long pages_reclaimed = 0;
unsigned long did_some_progress;
bool sync_migration = false;
-
+#ifdef CONFIG_ANDROID_WIP
+ unsigned long start_tick = jiffies;
+#endif
/*
* In the slowpath, we sanity check order to avoid ever trying to
* reclaim >= MAX_ORDER areas which will never succeed. Callers may
@@ -2107,6 +2126,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
return NULL;
}
+ if (gfp_mask & __GFP_WAIT)
+ down_read(&page_alloc_slow_rwsem);
+
/*
* GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
* __GFP_NOWARN set) should not cause reclaim since the subsystem
@@ -2193,8 +2215,14 @@ rebalance:
/*
* If we failed to make any progress reclaiming, then we are
* running out of options and have to consider going OOM
+ * ANDROID_WIP: If we are looping more than 1 second, consider OOM
*/
- if (!did_some_progress) {
+#ifdef CONFIG_ANDROID_WIP
+#define SHOULD_CONSIDER_OOM !did_some_progress || time_after(jiffies, start_tick + HZ)
+#else
+#define SHOULD_CONSIDER_OOM !did_some_progress
+#endif
+ if (SHOULD_CONSIDER_OOM) {
if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
if (oom_killer_disabled)
goto nopage;
@@ -2225,6 +2253,14 @@ rebalance:
goto restart;
}
+
+ /*
+ * Suspend converts GFP_KERNEL to __GFP_WAIT which can
+ * prevent reclaim making forward progress without
+ * invoking OOM. Bail if we are suspending
+ */
+ if (pm_suspending())
+ goto nopage;
}
/* Check if we should retry the allocation */
@@ -2251,10 +2287,14 @@ rebalance:
nopage:
warn_alloc_failed(gfp_mask, order, NULL);
+ if (gfp_mask & __GFP_WAIT)
+ up_read(&page_alloc_slow_rwsem);
return page;
got_pg:
if (kmemcheck_enabled)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
+ if (gfp_mask & __GFP_WAIT)
+ up_read(&page_alloc_slow_rwsem);
return page;
}
@@ -5010,6 +5050,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
+ init_rwsem(&page_alloc_slow_rwsem);
}
/*