mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-26 14:17:26 -04:00
block: add mq_ops->queue_rqs hook
If we have a list of requests in our plug list, send it to the driver in one go, if possible. The driver must set mq_ops->queue_rqs() to support this, if not the usual one-by-one path is used. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
fcade2ce06
commit
3c67d44de7
2 changed files with 31 additions and 3 deletions
|
@ -2553,6 +2553,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *this_hctx;
|
struct blk_mq_hw_ctx *this_hctx;
|
||||||
struct blk_mq_ctx *this_ctx;
|
struct blk_mq_ctx *this_ctx;
|
||||||
|
struct request *rq;
|
||||||
unsigned int depth;
|
unsigned int depth;
|
||||||
LIST_HEAD(list);
|
LIST_HEAD(list);
|
||||||
|
|
||||||
|
@ -2561,7 +2562,28 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||||
plug->rq_count = 0;
|
plug->rq_count = 0;
|
||||||
|
|
||||||
if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
|
if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
|
||||||
struct request_queue *q = rq_list_peek(&plug->mq_list)->q;
|
struct request_queue *q;
|
||||||
|
|
||||||
|
rq = rq_list_peek(&plug->mq_list);
|
||||||
|
q = rq->q;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Peek first request and see if we have a ->queue_rqs() hook.
|
||||||
|
* If we do, we can dispatch the whole plug list in one go. We
|
||||||
|
* already know at this point that all requests belong to the
|
||||||
|
* same queue, caller must ensure that's the case.
|
||||||
|
*
|
||||||
|
* Since we pass off the full list to the driver at this point,
|
||||||
|
* we do not increment the active request count for the queue.
|
||||||
|
* Bypass shared tags for now because of that.
|
||||||
|
*/
|
||||||
|
if (q->mq_ops->queue_rqs &&
|
||||||
|
!(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
|
||||||
|
blk_mq_run_dispatch_ops(q,
|
||||||
|
q->mq_ops->queue_rqs(&plug->mq_list));
|
||||||
|
if (rq_list_empty(plug->mq_list))
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
blk_mq_run_dispatch_ops(q,
|
blk_mq_run_dispatch_ops(q,
|
||||||
blk_mq_plug_issue_direct(plug, false));
|
blk_mq_plug_issue_direct(plug, false));
|
||||||
|
@ -2573,8 +2595,6 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||||
this_ctx = NULL;
|
this_ctx = NULL;
|
||||||
depth = 0;
|
depth = 0;
|
||||||
do {
|
do {
|
||||||
struct request *rq;
|
|
||||||
|
|
||||||
rq = rq_list_pop(&plug->mq_list);
|
rq = rq_list_pop(&plug->mq_list);
|
||||||
|
|
||||||
if (!this_hctx) {
|
if (!this_hctx) {
|
||||||
|
|
|
@ -492,6 +492,14 @@ struct blk_mq_ops {
|
||||||
*/
|
*/
|
||||||
void (*commit_rqs)(struct blk_mq_hw_ctx *);
|
void (*commit_rqs)(struct blk_mq_hw_ctx *);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @queue_rqs: Queue a list of new requests. Driver is guaranteed
|
||||||
|
* that each request belongs to the same queue. If the driver doesn't
|
||||||
|
* empty the @rqlist completely, then the rest will be queued
|
||||||
|
* individually by the block layer upon return.
|
||||||
|
*/
|
||||||
|
void (*queue_rqs)(struct request **rqlist);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @get_budget: Reserve budget before queue request, once .queue_rq is
|
* @get_budget: Reserve budget before queue request, once .queue_rq is
|
||||||
* run, it is driver's responsibility to release the
|
* run, it is driver's responsibility to release the
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue