block-6.18-20251016
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmjxoHoQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgprKCD/4irkkA7mBorYyNROXMwANOUg+2pl20xp/X 8reZsIZKWztUS18Emfg2jS2NIXP6LIFc3ZehfJX/9FrM26B9URH9cq/F/D/mHc/+ G4qfT5HUR5Eyav0qCP+pbru53irUOUWSUKKgrWRR8gDY9BcT7apjV8pULd/1PAfo 3XLfY2o39u68TBzmcwZvDudtBFcBfSan/JCIiW6IMxHWerHhV+IEJG5ABncFo8n9 +Ep5uOVWYQanM1lvat+Zy+aiWz0Fb0yYzXvtDatcGsAxfxJIf2Bs8ryZMAxgw7yk B9Jsd5kGTw9Tfn/H7kl2P4RGQ0gGr91dl0FmaUkDMXTyZcsz/Nq2PbwiiJaESp/4 Ixk3m9QjXpA6ofxAeorXFtTo98obnKklZLpCPzV5sqslzpGSWXdsbPmHOB5A4XcH M2QT/uM2eZbUtHUkymoUBMJTcqCfUsL827+Z6DGLl+Rrb0bjRvunlVCoxoTPuAeg ulOpuWd888Gy3X8lT7vBhY/9iWyljFwja/suiFx3f29e2DULXiXDTXrrA0GIxcO+ l7PA7BgcMf/0lJfo2gpGtPZvHYvtFXoRwxGaIGbusXixgg/dLY2LQ64BYPpu0JU8 Ph3xsL3pgLLPFVMBarHqwKoSb/4avOvzMaM7xGQQICg+0Gx2x9YPpLosaMgbdYPY OFPSLihwUg== =qiT3 -----END PGP SIGNATURE----- Merge tag 'block-6.18-20251016' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux Pull block fixes from Jens Axboe: - NVMe pull request via Keith: - iostats accounting fixed on multipath retries (Amit) - secure concatenation response fixup (Martin) - tls partial record fixup (Wilfred) - Fix for a lockdep reported issue with the elevator lock and blk group frozen operations - Fix for a regression in this merge window, where updating 'nr_requests' would not do the right thing for queues with shared tags * tag 'block-6.18-20251016' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: nvme/tcp: handle tls partially sent records in write_space() block: Remove elevator_lock usage from blkg_conf frozen operations blk-mq: fix stale tag depth for shared sched tags in blk_mq_update_nr_requests() nvme-auth: update sc_c in host response nvme-multipath: Skip nr_active increments in RETRY disposition
This commit is contained in:
commit
0c8df15f75
|
@ -812,8 +812,7 @@ int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
|
|||
}
|
||||
/*
|
||||
* Similar to blkg_conf_open_bdev, but additionally freezes the queue,
|
||||
* acquires q->elevator_lock, and ensures the correct locking order
|
||||
* between q->elevator_lock and q->rq_qos_mutex.
|
||||
* ensures the correct locking order between freeze queue and q->rq_qos_mutex.
|
||||
*
|
||||
* This function returns negative error on failure. On success it returns
|
||||
* memflags which must be saved and later passed to blkg_conf_exit_frozen
|
||||
|
@ -834,13 +833,11 @@ unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx)
|
|||
* At this point, we haven’t started protecting anything related to QoS,
|
||||
* so we release q->rq_qos_mutex here, which was first acquired in blkg_
|
||||
* conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing
|
||||
* the queue and acquiring q->elevator_lock to maintain the correct
|
||||
* locking order.
|
||||
* the queue to maintain the correct locking order.
|
||||
*/
|
||||
mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
|
||||
|
||||
memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue);
|
||||
mutex_lock(&ctx->bdev->bd_queue->elevator_lock);
|
||||
mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex);
|
||||
|
||||
return memflags;
|
||||
|
@ -995,9 +992,8 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
|
|||
EXPORT_SYMBOL_GPL(blkg_conf_exit);
|
||||
|
||||
/*
|
||||
* Similar to blkg_conf_exit, but also unfreezes the queue and releases
|
||||
* q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen
|
||||
* is used to open the bdev.
|
||||
* Similar to blkg_conf_exit, but also unfreezes the queue. Should be used
|
||||
* when blkg_conf_open_bdev_frozen is used to open the bdev.
|
||||
*/
|
||||
void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
|
||||
{
|
||||
|
@ -1005,7 +1001,6 @@ void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
|
|||
struct request_queue *q = ctx->bdev->bd_queue;
|
||||
|
||||
blkg_conf_exit(ctx);
|
||||
mutex_unlock(&q->elevator_lock);
|
||||
blk_mq_unfreeze_queue(q, memflags);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -557,7 +557,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
|
|||
if (blk_mq_is_shared_tags(flags)) {
|
||||
/* Shared tags are stored at index 0 in @et->tags. */
|
||||
q->sched_shared_tags = et->tags[0];
|
||||
blk_mq_tag_update_sched_shared_tags(q);
|
||||
blk_mq_tag_update_sched_shared_tags(q, et->nr_requests);
|
||||
}
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
|
|
|
@ -622,10 +622,11 @@ void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size
|
|||
sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
|
||||
}
|
||||
|
||||
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
|
||||
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
|
||||
unsigned int nr)
|
||||
{
|
||||
sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
|
||||
q->nr_requests - q->tag_set->reserved_tags);
|
||||
nr - q->tag_set->reserved_tags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -4941,7 +4941,7 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
|
|||
* tags can't grow, see blk_mq_alloc_sched_tags().
|
||||
*/
|
||||
if (q->elevator)
|
||||
blk_mq_tag_update_sched_shared_tags(q);
|
||||
blk_mq_tag_update_sched_shared_tags(q, nr);
|
||||
else
|
||||
blk_mq_tag_resize_shared_tags(set, nr);
|
||||
} else if (!q->elevator) {
|
||||
|
|
|
@ -186,7 +186,8 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
|
|||
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
|
||||
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
|
||||
unsigned int size);
|
||||
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
|
||||
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
|
||||
unsigned int nr);
|
||||
|
||||
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
|
||||
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
|
||||
|
|
|
@ -36,6 +36,7 @@ struct nvme_dhchap_queue_context {
|
|||
u8 status;
|
||||
u8 dhgroup_id;
|
||||
u8 hash_id;
|
||||
u8 sc_c;
|
||||
size_t hash_len;
|
||||
u8 c1[64];
|
||||
u8 c2[64];
|
||||
|
@ -154,6 +155,8 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
|
|||
data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
|
||||
data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
|
||||
|
||||
chap->sc_c = data->sc_c;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@ -489,7 +492,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
|
|||
ret = crypto_shash_update(shash, buf, 2);
|
||||
if (ret)
|
||||
goto out;
|
||||
memset(buf, 0, sizeof(buf));
|
||||
*buf = chap->sc_c;
|
||||
ret = crypto_shash_update(shash, buf, 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -500,6 +503,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
|
|||
strlen(ctrl->opts->host->nqn));
|
||||
if (ret)
|
||||
goto out;
|
||||
memset(buf, 0, sizeof(buf));
|
||||
ret = crypto_shash_update(shash, buf, 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
|
|
@ -182,12 +182,14 @@ void nvme_mpath_start_request(struct request *rq)
|
|||
struct nvme_ns *ns = rq->q->queuedata;
|
||||
struct gendisk *disk = ns->head->disk;
|
||||
|
||||
if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
|
||||
if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) &&
|
||||
!(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) {
|
||||
atomic_inc(&ns->ctrl->nr_active);
|
||||
nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
|
||||
}
|
||||
|
||||
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
|
||||
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) ||
|
||||
(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
|
||||
return;
|
||||
|
||||
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
|
||||
|
|
|
@ -1081,6 +1081,9 @@ static void nvme_tcp_write_space(struct sock *sk)
|
|||
queue = sk->sk_user_data;
|
||||
if (likely(queue && sk_stream_is_writeable(sk))) {
|
||||
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||
/* Ensure pending TLS partial records are retried */
|
||||
if (nvme_tcp_queue_tls(queue))
|
||||
queue->write_space(sk);
|
||||
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
|
||||
}
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
|
|
Loading…
Reference in New Issue