summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorYu Kuai <yukuai@fnnas.com>2026-02-03 16:19:44 +0800
committerJens Axboe <axboe@kernel.dk>2026-02-03 07:45:36 -0700
commitcf02d7d41b064af3e2c3a3a1ea9042a5b565b0d8 (patch)
treeb2dbb252f8b959a7e7f6be945788b9d3a8b1778f /block
parent1db61b0afdd7e8aa9289c423fdff002603b520b5 (diff)
blk-mq: factor out a helper blk_mq_limit_depth()
There are no functional changes, just make code cleaner. Signed-off-by: Yu Kuai <yukuai@fnnas.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c62
1 files changed, 37 insertions, 25 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cf1daedbb39f..b7b272e856b8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -498,6 +498,42 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
return rq_list_pop(data->cached_rqs);
}
+static void blk_mq_limit_depth(struct blk_mq_alloc_data *data)
+{
+ struct elevator_mq_ops *ops;
+
+ /* If no I/O scheduler has been configured, don't limit requests */
+ if (!data->q->elevator) {
+ blk_mq_tag_busy(data->hctx);
+ return;
+ }
+
+ /*
+ * All requests use scheduler tags when an I/O scheduler is
+ * enabled for the queue.
+ */
+ data->rq_flags |= RQF_SCHED_TAGS;
+
+ /*
+ * Flush/passthrough requests are special and go directly to the
+ * dispatch list, they are not subject to the async_depth limit.
+ */
+ if ((data->cmd_flags & REQ_OP_MASK) == REQ_OP_FLUSH ||
+ blk_op_is_passthrough(data->cmd_flags))
+ return;
+
+ WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
+ data->rq_flags |= RQF_USE_SCHED;
+
+ /*
+ * By default, sync requests have no limit, and async requests are
+ * limited to async_depth.
+ */
+ ops = &data->q->elevator->type->ops;
+ if (ops->limit_depth)
+ ops->limit_depth(data->cmd_flags, data);
+}
+
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
{
struct request_queue *q = data->q;
@@ -516,31 +552,7 @@ retry:
data->ctx = blk_mq_get_ctx(q);
data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx);
- if (q->elevator) {
- /*
- * All requests use scheduler tags when an I/O scheduler is
- * enabled for the queue.
- */
- data->rq_flags |= RQF_SCHED_TAGS;
-
- /*
- * Flush/passthrough requests are special and go directly to the
- * dispatch list.
- */
- if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
- !blk_op_is_passthrough(data->cmd_flags)) {
- struct elevator_mq_ops *ops = &q->elevator->type->ops;
-
- WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
-
- data->rq_flags |= RQF_USE_SCHED;
- if (ops->limit_depth)
- ops->limit_depth(data->cmd_flags, data);
- }
- } else {
- blk_mq_tag_busy(data->hctx);
- }
-
+ blk_mq_limit_depth(data);
if (data->flags & BLK_MQ_REQ_RESERVED)
data->rq_flags |= RQF_RESV;