aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-09-25 11:42:41 +0200
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 08:56:19 +0200
commite3ba9ae58a5599226e3976b29c8093041ae7c332 (patch)
tree5e8a7c0c1b675e19fa6209489479fe9b799a1b3e /block
parentf7d7b7a7a3db6526a84ea755c1c54a051e9a52de (diff)
downloadkernel_samsung_smdk4412-e3ba9ae58a5599226e3976b29c8093041ae7c332.tar.gz
kernel_samsung_smdk4412-e3ba9ae58a5599226e3976b29c8093041ae7c332.tar.bz2
kernel_samsung_smdk4412-e3ba9ae58a5599226e3976b29c8093041ae7c332.zip
block: reserve some tags just for sync IO
By only allowing async IO to consume 3/4 ths of the tag depth, we always have slots free to serve sync IO. This is important to avoid having writes fill the entire tag queue, thus starving reads. Original patch and idea from Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-tag.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 8a99688eb1b..c0d419e84ce 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -337,6 +337,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
+ unsigned max_depth, offset;
int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -350,10 +351,19 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
/*
* Protect against shared tag maps, as we may not have exclusive
* access to the tag map.
+ *
+ * We reserve a few tags just for sync IO, since we don't want
+ * to starve sync IO on behalf of flooding async IO.
*/
+ max_depth = bqt->max_depth;
+ if (rq_is_sync(rq))
+ offset = 0;
+ else
+ offset = max_depth >> 2;
+
do {
- tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
- if (tag >= bqt->max_depth)
+ tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
+ if (tag >= max_depth)
return 1;
} while (test_and_set_bit_lock(tag, bqt->tag_map));