Skip to content

Commit 95ed0c5

Browse files
isilenceaxboe
authored andcommitted
blk-mq: optimise blk_mq_flush_plug_list()
Instead of using list_del_init() in a loop, that generates a lot of unnecessary memory read/writes, iterate from the first request of a batch and cut out a sublist with list_cut_before(). Apart from removing the list node initialisation part, this is more register-friendly, and the assembly uses the stack less intensively. list_empty() at the beginning is done with hope, that the compiler can optimise out the same check in the following list_splice_init(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 28ca0d6 commit 95ed0c5

1 file changed

Lines changed: 19 additions & 38 deletions

File tree

block/blk-mq.c

Lines changed: 19 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1678,57 +1678,38 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
16781678

16791679
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
16801680
{
1681-
struct blk_mq_hw_ctx *this_hctx;
1682-
struct blk_mq_ctx *this_ctx;
1683-
struct request_queue *this_q;
1684-
struct request *rq;
16851681
LIST_HEAD(list);
1686-
LIST_HEAD(rq_list);
1687-
unsigned int depth;
16881682

1683+
if (list_empty(&plug->mq_list))
1684+
return;
16891685
list_splice_init(&plug->mq_list, &list);
16901686

16911687
if (plug->rq_count > 2 && plug->multiple_queues)
16921688
list_sort(NULL, &list, plug_rq_cmp);
16931689

16941690
plug->rq_count = 0;
16951691

1696-
this_q = NULL;
1697-
this_hctx = NULL;
1698-
this_ctx = NULL;
1699-
depth = 0;
1700-
1701-
while (!list_empty(&list)) {
1702-
rq = list_entry_rq(list.next);
1703-
list_del_init(&rq->queuelist);
1704-
BUG_ON(!rq->q);
1705-
if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
1706-
if (this_hctx) {
1707-
trace_block_unplug(this_q, depth, !from_schedule);
1708-
blk_mq_sched_insert_requests(this_hctx, this_ctx,
1709-
&rq_list,
1710-
from_schedule);
1711-
}
1712-
1713-
this_q = rq->q;
1714-
this_ctx = rq->mq_ctx;
1715-
this_hctx = rq->mq_hctx;
1716-
depth = 0;
1692+
do {
1693+
struct list_head rq_list;
1694+
struct request *rq, *head_rq = list_entry_rq(list.next);
1695+
struct list_head *pos = &head_rq->queuelist; /* skip first */
1696+
struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
1697+
struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
1698+
unsigned int depth = 1;
1699+
1700+
list_for_each_continue(pos, &list) {
1701+
rq = list_entry_rq(pos);
1702+
BUG_ON(!rq->q);
1703+
if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
1704+
break;
1705+
depth++;
17171706
}
17181707

1719-
depth++;
1720-
list_add_tail(&rq->queuelist, &rq_list);
1721-
}
1722-
1723-
/*
1724-
* If 'this_hctx' is set, we know we have entries to complete
1725-
* on 'rq_list'. Do those.
1726-
*/
1727-
if (this_hctx) {
1728-
trace_block_unplug(this_q, depth, !from_schedule);
1708+
list_cut_before(&rq_list, &list, pos);
1709+
trace_block_unplug(head_rq->q, depth, !from_schedule);
17291710
blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
17301711
from_schedule);
1731-
}
1712+
} while(!list_empty(&list));
17321713
}
17331714

17341715
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,

0 commit comments

Comments
 (0)