aboutsummaryrefslogtreecommitdiffstats
path: root/mm/readahead.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-06-16 15:31:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 19:47:30 -0700
commit045a2529a3513faed2d45bd82f9013b124309d94 (patch)
tree99b7743b6dab54286afe94d4d7b8113a271661b5 /mm/readahead.c
parentdc566127dd161b6c997466a2349ac179527ea89b (diff)
downloadkernel_samsung_smdk4412-045a2529a3513faed2d45bd82f9013b124309d94.tar.gz
kernel_samsung_smdk4412-045a2529a3513faed2d45bd82f9013b124309d94.tar.bz2
kernel_samsung_smdk4412-045a2529a3513faed2d45bd82f9013b124309d94.zip
readahead: move the random read case to bottom
Split all readahead cases, and move the random one to bottom. No behavior changes. This is to prepare for the introduction of context readahead, and make it easy for inserting accounting/tracing points for each case. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Cc: Vladislav Bolkhovitin <vst@vlnb.net> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Ying Han <yinghan@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c46
1 files changed, 25 insertions, 21 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index a7f01fcce9e..ceed7e4790b 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -339,33 +339,25 @@ ondemand_readahead(struct address_space *mapping,
unsigned long req_size)
{
unsigned long max = max_sane_readahead(ra->ra_pages);
- pgoff_t prev_offset;
- int sequential;
+
+ /*
+ * start of file
+ */
+ if (!offset)
+ goto initial_readahead;
/*
* It's the expected callback offset, assume sequential access.
* Ramp up sizes, and push forward the readahead window.
*/
- if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
- offset == (ra->start + ra->size))) {
+ if ((offset == (ra->start + ra->size - ra->async_size) ||
+ offset == (ra->start + ra->size))) {
ra->start += ra->size;
ra->size = get_next_ra_size(ra, max);
ra->async_size = ra->size;
goto readit;
}
- prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
- sequential = offset - prev_offset <= 1UL || req_size > max;
-
- /*
- * Standalone, small read.
- * Read as is, and do not pollute the readahead state.
- */
- if (!hit_readahead_marker && !sequential) {
- return __do_page_cache_readahead(mapping, filp,
- offset, req_size, 0);
- }
-
/*
* Hit a marked page without valid readahead state.
* E.g. interleaved reads.
@@ -391,12 +383,24 @@ ondemand_readahead(struct address_space *mapping,
}
/*
- * It may be one of
- * - first read on start of file
- * - sequential cache miss
- * - oversize random read
- * Start readahead for it.
+ * oversize read
*/
+ if (req_size > max)
+ goto initial_readahead;
+
+ /*
+ * sequential cache miss
+ */
+ if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
+ goto initial_readahead;
+
+ /*
+ * standalone, small random read
+ * Read as is, and do not pollute the readahead state.
+ */
+ return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+
+initial_readahead:
ra->start = offset;
ra->size = get_init_ra_size(req_size, max);
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;