aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2020-06-30 18:24:57 +0800
committerJens Axboe <axboe@kernel.dk>2020-06-30 07:51:48 -0600
commit445874e89f662c8c6a56e6bb820cbe9fbef61428 (patch)
tree9bd7fd755f9b0dcc528ac5887689e56aadfb3ad2 /block/blk-mq-sched.c
parent65c763694398a2de63803b264dcf906b47f9d4c1 (diff)
downloadlinux-backports-445874e89f662c8c6a56e6bb820cbe9fbef61428.tar.gz
linux-backports-445874e89f662c8c6a56e6bb820cbe9fbef61428.tar.bz2
linux-backports-445874e89f662c8c6a56e6bb820cbe9fbef61428.zip
blk-mq: pass hctx to blk_mq_dispatch_rq_list
All requests in the 'list' of blk_mq_dispatch_rq_list belong to same hctx, so it is better to pass hctx instead of request queue, because blk-mq's dispatch target is hctx instead of request queue. Signed-off-by: Ming Lei <ming.lei@redhat.com> Tested-by: Baolin Wang <baolin.wang7@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Cc: Sagi Grimberg <sagi@grimberg.me> Cc: Baolin Wang <baolin.wang7@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index a31e281e9d31..632c6f8b63f7 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -96,10 +96,9 @@ static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
struct elevator_queue *e = q->elevator;
LIST_HEAD(rq_list);
int ret = 0;
+ struct request *rq;
do {
- struct request *rq;
-
if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
break;
@@ -131,7 +130,7 @@ static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
* in blk_mq_dispatch_rq_list().
*/
list_add(&rq->queuelist, &rq_list);
- } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
+ } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, true));
return ret;
}
@@ -161,10 +160,9 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
LIST_HEAD(rq_list);
struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
int ret = 0;
+ struct request *rq;
do {
- struct request *rq;
-
if (!list_empty_careful(&hctx->dispatch)) {
ret = -EAGAIN;
break;
@@ -200,7 +198,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
/* round robin for fair dispatch */
ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
- } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
+ } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, true));
WRITE_ONCE(hctx->dispatch_from, ctx);
return ret;
@@ -240,7 +238,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
*/
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart_hctx(hctx);
- if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
+ if (blk_mq_dispatch_rq_list(hctx, &rq_list, false)) {
if (has_sched_dispatch)
ret = blk_mq_do_dispatch_sched(hctx);
else
@@ -253,7 +251,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
ret = blk_mq_do_dispatch_ctx(hctx);
} else {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
- blk_mq_dispatch_rq_list(q, &rq_list, false);
+ blk_mq_dispatch_rq_list(hctx, &rq_list, false);
}
return ret;