diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-10-17 08:31:26 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-10-17 08:31:26 -0700 |
| commit | 0c8df15f758a69a7bf88ecf9b7f95dc7db2c463c (patch) | |
| tree | a856a04ee1e3b8249e65a624abd24415b27328f5 /block | |
| parent | cf1ea8854e4fb1341c08f66e6c91da97d038ab6a (diff) | |
| parent | f0624c6646435c1b56652193cce3e34062d50e3f (diff) | |
Merge tag 'block-6.18-20251016' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull block fixes from Jens Axboe:
- NVMe pull request via Keith:
- iostats accounting fixed on multipath retries (Amit)
- secure concatenation response fixup (Martin)
- tls partial record fixup (Wilfred)
- Fix for a lockdep reported issue with the elevator lock and
blk group frozen operations
- Fix for a regression in this merge window, where updating
'nr_requests' would not do the right thing for queues with
shared tags
* tag 'block-6.18-20251016' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
nvme/tcp: handle tls partially sent records in write_space()
block: Remove elevator_lock usage from blkg_conf frozen operations
blk-mq: fix stale tag depth for shared sched tags in blk_mq_update_nr_requests()
nvme-auth: update sc_c in host response
nvme-multipath: Skip nr_active increments in RETRY disposition
Diffstat (limited to 'block')
| -rw-r--r-- | block/blk-cgroup.c | 13 | ||||
| -rw-r--r-- | block/blk-mq-sched.c | 2 | ||||
| -rw-r--r-- | block/blk-mq-tag.c | 5 | ||||
| -rw-r--r-- | block/blk-mq.c | 2 | ||||
| -rw-r--r-- | block/blk-mq.h | 3 |
5 files changed, 11 insertions, 14 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f93de34fe87d..3cffb68ba5d8 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -812,8 +812,7 @@ int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx) } /* * Similar to blkg_conf_open_bdev, but additionally freezes the queue, - * acquires q->elevator_lock, and ensures the correct locking order - * between q->elevator_lock and q->rq_qos_mutex. + * ensures the correct locking order between freeze queue and q->rq_qos_mutex. * * This function returns negative error on failure. On success it returns * memflags which must be saved and later passed to blkg_conf_exit_frozen @@ -834,13 +833,11 @@ unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx) * At this point, we haven’t started protecting anything related to QoS, * so we release q->rq_qos_mutex here, which was first acquired in blkg_ * conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing - * the queue and acquiring q->elevator_lock to maintain the correct - * locking order. + * the queue to maintain the correct locking order. */ mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex); memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue); - mutex_lock(&ctx->bdev->bd_queue->elevator_lock); mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex); return memflags; @@ -995,9 +992,8 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx) EXPORT_SYMBOL_GPL(blkg_conf_exit); /* - * Similar to blkg_conf_exit, but also unfreezes the queue and releases - * q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen - * is used to open the bdev. + * Similar to blkg_conf_exit, but also unfreezes the queue. Should be used + * when blkg_conf_open_bdev_frozen is used to open the bdev. */ void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags) { @@ -1005,7 +1001,6 @@ void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags) struct request_queue *q = ctx->bdev->bd_queue; blkg_conf_exit(ctx); - mutex_unlock(&q->elevator_lock); blk_mq_unfreeze_queue(q, memflags); } } diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index d06bb137a743..e0bed16485c3 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -557,7 +557,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e, if (blk_mq_is_shared_tags(flags)) { /* Shared tags are stored at index 0 in @et->tags. */ q->sched_shared_tags = et->tags[0]; - blk_mq_tag_update_sched_shared_tags(q); + blk_mq_tag_update_sched_shared_tags(q, et->nr_requests); } queue_for_each_hw_ctx(q, hctx, i) { diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index c7a4d4b9cc87..5b664dbdf655 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -622,10 +622,11 @@ void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); } -void blk_mq_tag_update_sched_shared_tags(struct request_queue *q) +void blk_mq_tag_update_sched_shared_tags(struct request_queue *q, + unsigned int nr) { sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags, - q->nr_requests - q->tag_set->reserved_tags); + nr - q->tag_set->reserved_tags); } /** diff --git a/block/blk-mq.c b/block/blk-mq.c index 09f579414161..d626d32f6e57 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4941,7 +4941,7 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q, * tags can't grow, see blk_mq_alloc_sched_tags(). */ if (q->elevator) - blk_mq_tag_update_sched_shared_tags(q); + blk_mq_tag_update_sched_shared_tags(q, nr); else blk_mq_tag_resize_shared_tags(set, nr); } else if (!q->elevator) { diff --git a/block/blk-mq.h b/block/blk-mq.h index af42dc018808..c4fccdeb5441 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -186,7 +186,8 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size); -void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); +void blk_mq_tag_update_sched_shared_tags(struct request_queue *q, + unsigned int nr); void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, |