diff options
author | Tejun Heo <htejun@gmail.com> | 2005-11-10 08:52:05 +0100 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2005-11-12 10:56:06 +0100 |
commit | 15853af9f07673680439b224519c692f1352b959 (patch) | |
tree | e6a8fc1cd34dec568883cd62102e1e626d9241d9 /block/elevator.c | |
parent | 1b5ed5e1f1315e37380e55102f58bcae3344d2a7 (diff) | |
download | kernel_samsung_smdk4412-15853af9f07673680439b224519c692f1352b959.tar.gz kernel_samsung_smdk4412-15853af9f07673680439b224519c692f1352b959.tar.bz2 kernel_samsung_smdk4412-15853af9f07673680439b224519c692f1352b959.zip |
[BLOCK] Implement elv_drain_elevator for improved switch error detection
This patch adds request_queue->nr_sorted which keeps the number of
requests in the iosched and implement elv_drain_elevator which
performs forced dispatching. elv_drain_elevator checks whether
iosched actually dispatches all requests it has and prints error
message if it doesn't. As buggy forced dispatching can result in
wrong barrier operations, I think this extra check is worthwhile.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 25 |
1 files changed, 20 insertions, 5 deletions
diff --git a/block/elevator.c b/block/elevator.c index a475b1a19f6..73aa46b6db4 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -226,6 +226,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq) if (q->last_merge == rq) q->last_merge = NULL; + q->nr_sorted--; boundary = q->end_sector; @@ -284,6 +285,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, if (e->ops->elevator_merge_req_fn) e->ops->elevator_merge_req_fn(q, rq, next); + q->nr_sorted--; q->last_merge = rq; } @@ -315,6 +317,20 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); } +static void elv_drain_elevator(request_queue_t *q) +{ + static int printed; + while (q->elevator->ops->elevator_dispatch_fn(q, 1)) + ; + if (q->nr_sorted == 0) + return; + if (printed++ < 10) { + printk(KERN_ERR "%s: forced dispatching is broken " + "(nr_sorted=%u), please report this\n", + q->elevator->elevator_type->elevator_name, q->nr_sorted); + } +} + void __elv_add_request(request_queue_t *q, struct request *rq, int where, int plug) { @@ -349,9 +365,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, case ELEVATOR_INSERT_BACK: rq->flags |= REQ_SOFTBARRIER; - - while (q->elevator->ops->elevator_dispatch_fn(q, 1)) - ; + elv_drain_elevator(q); list_add_tail(&rq->queuelist, &q->queue_head); /* * We kick the queue here for the following reasons. @@ -370,6 +384,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, case ELEVATOR_INSERT_SORT: BUG_ON(!blk_fs_request(rq)); rq->flags |= REQ_SORTED; + q->nr_sorted++; if (q->last_merge == NULL && rq_mergeable(rq)) q->last_merge = rq; /* @@ -692,8 +707,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); - while (q->elevator->ops->elevator_dispatch_fn(q, 1)) - ; + elv_drain_elevator(q); while (q->rq.elvpriv) { blk_remove_plug(q); @@ -701,6 +715,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); + elv_drain_elevator(q); } spin_unlock_irq(q->queue_lock); |