diff options
author | Martin Bligh <mbligh@google.com> | 2006-10-28 10:38:25 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-28 11:30:51 -0700 |
commit | bbdb396a60b2ebf7de3b717991e5d3e28c8b7bbd (patch) | |
tree | efeca197ae8a2421940006ace6c1b719b1811a0c | |
parent | 3bb1a852ab6c9cdf211a2f4a2f502340c8c38eca (diff) | |
download | kernel_samsung_espresso10-bbdb396a60b2ebf7de3b717991e5d3e28c8b7bbd.tar.gz kernel_samsung_espresso10-bbdb396a60b2ebf7de3b717991e5d3e28c8b7bbd.tar.bz2 kernel_samsung_espresso10-bbdb396a60b2ebf7de3b717991e5d3e28c8b7bbd.zip |
[PATCH] Use min of two prio settings in calculating distress for reclaim
If try_to_free_pages / balance_pgdat are called with a gfp_mask specifying
GFP_IO and/or GFP_FS, they will reclaim the requisite number of pages, and the
reset prev_priority to DEF_PRIORITY (or to some other high (ie: unurgent)
value).
However, another reclaimer without those gfp_mask flags set (say, GFP_NOIO)
may still be struggling to reclaim pages. The concurrent overwrite of
zone->prev_priority will cause this GFP_NOIO thread to unexpectedly cease
deactivating mapped pages, thus causing reclaim difficulties.
Fix this is to key the distress calculation not off zone->prev_priority, but
also take into account the local caller's priority by using
min(zone->prev_priority, sc->priority)
Signed-off-by: Martin J. Bligh <mbligh@google.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/vmscan.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index b32560ead5c..518540a4a2a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -760,7 +760,7 @@ static inline int zone_is_near_oom(struct zone *zone) * But we had to alter page->flags anyway. */ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, - struct scan_control *sc) + struct scan_control *sc, int priority) { unsigned long pgmoved; int pgdeactivate = 0; @@ -784,7 +784,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * `distress' is a measure of how much trouble we're having * reclaiming pages. 0 -> no problems. 100 -> great trouble. */ - distress = 100 >> zone->prev_priority; + distress = 100 >> min(zone->prev_priority, priority); /* * The point of this algorithm is to decide when to start @@ -936,7 +936,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone, nr_to_scan = min(nr_active, (unsigned long)sc->swap_cluster_max); nr_active -= nr_to_scan; - shrink_active_list(nr_to_scan, zone, sc); + shrink_active_list(nr_to_scan, zone, sc, priority); } if (nr_inactive) { @@ -1384,7 +1384,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, if (zone->nr_scan_active >= nr_pages || pass > 3) { zone->nr_scan_active = 0; nr_to_scan = min(nr_pages, zone->nr_active); - shrink_active_list(nr_to_scan, zone, sc); + shrink_active_list(nr_to_scan, zone, sc, prio); } } |