summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFengnan Chang <fengnanchang@gmail.com>2025-11-28 16:53:13 +0800
committerJens Axboe <axboe@kernel.dk>2025-11-28 09:09:19 -0700
commitd0c98769ee7d5db8d699a270690639cde1766cd4 (patch)
tree736482908a880ed3a624edbcf4cec18c68cd044b
parentc6a45ee7607de3a350008630f4369b1b5ac80884 (diff)
blk-mq: use array manage hctx map instead of xarray
After commit 4e5cc99e1e48 ("blk-mq: manage hctx map via xarray"), we use an xarray instead of array to store hctx, but in poll mode, each time in blk_mq_poll, we need use xa_load to find corresponding hctx, this introduce some costs. In my test, xa_load may cost 3.8% cpu. This patch revert previous change, eliminates the overhead of xa_load and can result in a 3% performance improvement. Signed-off-by: Fengnan Chang <changfengnan@bytedance.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--block/blk-mq.c58
-rw-r--r--block/blk-mq.h2
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blkdev.h2
5 files changed, 42 insertions, 25 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 5b664dbdf655..33946cdb5716 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -499,7 +499,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
int srcu_idx;
/*
- * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
+ * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
* while the queue is frozen. So we can use q_usage_counter to avoid
* racing with it.
*/
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f2650c97a75e..1ef81110eb8a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -730,7 +730,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
* If not tell the caller that it should skip this queue.
*/
ret = -EXDEV;
- data.hctx = xa_load(&q->hctx_table, hctx_idx);
+ data.hctx = q->queue_hw_ctx[hctx_idx];
if (!blk_mq_hw_queue_mapped(data.hctx))
goto out_queue_exit;
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
@@ -3946,8 +3946,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
blk_free_flush_queue_callback);
hctx->fq = NULL;
- xa_erase(&q->hctx_table, hctx_idx);
-
spin_lock(&q->unused_hctx_lock);
list_add(&hctx->hctx_list, &q->unused_hctx_list);
spin_unlock(&q->unused_hctx_lock);
@@ -3989,14 +3987,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->numa_node))
goto exit_hctx;
- if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
- goto exit_flush_rq;
-
return 0;
- exit_flush_rq:
- if (set->ops->exit_request)
- set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@@ -4385,7 +4377,7 @@ void blk_mq_release(struct request_queue *q)
kobject_put(&hctx->kobj);
}
- xa_destroy(&q->hctx_table);
+ kfree(q->queue_hw_ctx);
/*
* release .mq_kobj and sw queue's kobject now because
@@ -4529,26 +4521,44 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct request_queue *q)
{
- struct blk_mq_hw_ctx *hctx;
- unsigned long i, j;
+ int i, j, end;
+ struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
+
+ if (q->nr_hw_queues < set->nr_hw_queues) {
+ struct blk_mq_hw_ctx **new_hctxs;
+
+ new_hctxs = kcalloc_node(set->nr_hw_queues,
+ sizeof(*new_hctxs), GFP_KERNEL,
+ set->numa_node);
+ if (!new_hctxs)
+ return;
+ if (hctxs)
+ memcpy(new_hctxs, hctxs, q->nr_hw_queues *
+ sizeof(*hctxs));
+ q->queue_hw_ctx = new_hctxs;
+ kfree(hctxs);
+ hctxs = new_hctxs;
+ }
for (i = 0; i < set->nr_hw_queues; i++) {
int old_node;
int node = blk_mq_get_hctx_node(set, i);
- struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
+ struct blk_mq_hw_ctx *old_hctx = hctxs[i];
if (old_hctx) {
old_node = old_hctx->numa_node;
blk_mq_exit_hctx(q, set, old_hctx, i);
}
- if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
+ hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, node);
+ if (!hctxs[i]) {
if (!old_hctx)
break;
pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
node, old_node);
- hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
- WARN_ON_ONCE(!hctx);
+ hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i,
+ old_node);
+ WARN_ON_ONCE(!hctxs[i]);
}
}
/*
@@ -4557,13 +4567,21 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
*/
if (i != set->nr_hw_queues) {
j = q->nr_hw_queues;
+ end = i;
} else {
j = i;
+ end = q->nr_hw_queues;
q->nr_hw_queues = set->nr_hw_queues;
}
- xa_for_each_start(&q->hctx_table, j, hctx, j)
- blk_mq_exit_hctx(q, set, hctx, j);
+ for (; j < end; j++) {
+ struct blk_mq_hw_ctx *hctx = hctxs[j];
+
+ if (hctx) {
+ blk_mq_exit_hctx(q, set, hctx, j);
+ hctxs[j] = NULL;
+ }
+ }
}
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
@@ -4599,8 +4617,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
INIT_LIST_HEAD(&q->unused_hctx_list);
spin_lock_init(&q->unused_hctx_lock);
- xa_init(&q->hctx_table);
-
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
goto err_hctxs;
@@ -5187,7 +5203,7 @@ int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
{
if (!blk_mq_can_poll(q))
return 0;
- return blk_hctx_poll(q, xa_load(&q->hctx_table, cookie), iob, flags);
+ return blk_hctx_poll(q, q->queue_hw_ctx[cookie], iob, flags);
}
int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
diff --git a/block/blk-mq.h b/block/blk-mq.h
index c4fccdeb5441..80a3f0c2bce7 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -84,7 +84,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
enum hctx_type type,
unsigned int cpu)
{
- return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
+ return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
}
static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b54506b3b76d..9208ff90ae16 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -1016,7 +1016,8 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
}
#define queue_for_each_hw_ctx(q, hctx, i) \
- xa_for_each(&(q)->hctx_table, (i), (hctx))
+ for ((i) = 0; (i) < (q)->nr_hw_queues && \
+ ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
#define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0; (i) < (hctx)->nr_ctx && \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index cb4ba09959ee..6195f89648db 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -503,7 +503,7 @@ struct request_queue {
/* hw dispatch queues */
unsigned int nr_hw_queues;
- struct xarray hctx_table;
+ struct blk_mq_hw_ctx **queue_hw_ctx;
struct percpu_ref q_usage_counter;
struct lock_class_key io_lock_cls_key;