* [PATCH AUTOSEL 6.1 03/17] nvme: tcp: avoid race between queue_lock lock and destroy
[not found] <20241023143202.2981992-1-sashal@kernel.org>
@ 2024-10-23 14:31 ` Sasha Levin
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 07/17] nvme: disable CC.CRIME (NVME_CC_CRIME) Sasha Levin
` (2 subsequent siblings)
3 siblings, 0 replies; 4+ messages in thread
From: Sasha Levin @ 2024-10-23 14:31 UTC (permalink / raw)
To: linux-kernel, stable
Cc: Hannes Reinecke, Shin'ichiro Kawasaki, Keith Busch,
Sasha Levin, sagi, linux-nvme
From: Hannes Reinecke <hare@suse.de>
[ Upstream commit 782373ba27660ba7d330208cf5509ece6feb4545 ]
Commit 76d54bf20cdc ("nvme-tcp: don't access released socket during
error recovery") added a mutex_lock() call for the queue->queue_lock
in nvme_tcp_get_address(). However, the mutex_lock() races with
mutex_destroy() in nvme_tcp_free_queue(), and causes the WARN below.
DEBUG_LOCKS_WARN_ON(lock->magic != lock)
WARNING: CPU: 3 PID: 34077 at kernel/locking/mutex.c:587 __mutex_lock+0xcf0/0x1220
Modules linked in: nvmet_tcp nvmet nvme_tcp nvme_fabrics iw_cm ib_cm ib_core pktcdvd nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject nft_ct nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ip_set nf_tables qrtr sunrpc ppdev 9pnet_virtio 9pnet pcspkr netfs parport_pc parport e1000 i2c_piix4 i2c_smbus loop fuse nfnetlink zram bochs drm_vram_helper drm_ttm_helper ttm drm_kms_helper xfs drm sym53c8xx floppy nvme scsi_transport_spi nvme_core nvme_auth serio_raw ata_generic pata_acpi dm_multipath qemu_fw_cfg [last unloaded: ib_uverbs]
CPU: 3 UID: 0 PID: 34077 Comm: udisksd Not tainted 6.11.0-rc7 #319
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-2.fc40 04/01/2014
RIP: 0010:__mutex_lock+0xcf0/0x1220
Code: 08 84 d2 0f 85 c8 04 00 00 8b 15 ef b6 c8 01 85 d2 0f 85 78 f4 ff ff 48 c7 c6 20 93 ee af 48 c7 c7 60 91 ee af e8 f0 a7 6d fd <0f> 0b e9 5e f4 ff ff 48 b8 00 00 00 00 00 fc ff df 4c 89 f2 48 c1
RSP: 0018:ffff88811305f760 EFLAGS: 00010286
RAX: 0000000000000000 RBX: ffff88812c652058 RCX: 0000000000000000
RDX: 0000000000000000 RSI: 0000000000000004 RDI: 0000000000000001
RBP: ffff88811305f8b0 R08: 0000000000000001 R09: ffffed1075c36341
R10: ffff8883ae1b1a0b R11: 0000000000010498 R12: 0000000000000000
R13: 0000000000000000 R14: dffffc0000000000 R15: ffff88812c652058
FS: 00007f9713ae4980(0000) GS:ffff8883ae180000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fcd78483c7c CR3: 0000000122c38000 CR4: 00000000000006f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
<TASK>
? __warn.cold+0x5b/0x1af
? __mutex_lock+0xcf0/0x1220
? report_bug+0x1ec/0x390
? handle_bug+0x3c/0x80
? exc_invalid_op+0x13/0x40
? asm_exc_invalid_op+0x16/0x20
? __mutex_lock+0xcf0/0x1220
? nvme_tcp_get_address+0xc2/0x1e0 [nvme_tcp]
? __pfx___mutex_lock+0x10/0x10
? __lock_acquire+0xd6a/0x59e0
? nvme_tcp_get_address+0xc2/0x1e0 [nvme_tcp]
nvme_tcp_get_address+0xc2/0x1e0 [nvme_tcp]
? __pfx_nvme_tcp_get_address+0x10/0x10 [nvme_tcp]
nvme_sysfs_show_address+0x81/0xc0 [nvme_core]
dev_attr_show+0x42/0x80
? __asan_memset+0x1f/0x40
sysfs_kf_seq_show+0x1f0/0x370
seq_read_iter+0x2cb/0x1130
? rw_verify_area+0x3b1/0x590
? __mutex_lock+0x433/0x1220
vfs_read+0x6a6/0xa20
? lockdep_hardirqs_on+0x78/0x100
? __pfx_vfs_read+0x10/0x10
ksys_read+0xf7/0x1d0
? __pfx_ksys_read+0x10/0x10
? __x64_sys_openat+0x105/0x1d0
do_syscall_64+0x93/0x180
? lockdep_hardirqs_on_prepare+0x16d/0x400
? do_syscall_64+0x9f/0x180
? lockdep_hardirqs_on+0x78/0x100
? do_syscall_64+0x9f/0x180
? __pfx_ksys_read+0x10/0x10
? lockdep_hardirqs_on_prepare+0x16d/0x400
? do_syscall_64+0x9f/0x180
? lockdep_hardirqs_on+0x78/0x100
? do_syscall_64+0x9f/0x180
? lockdep_hardirqs_on_prepare+0x16d/0x400
? do_syscall_64+0x9f/0x180
? lockdep_hardirqs_on+0x78/0x100
? do_syscall_64+0x9f/0x180
? lockdep_hardirqs_on_prepare+0x16d/0x400
? do_syscall_64+0x9f/0x180
? lockdep_hardirqs_on+0x78/0x100
? do_syscall_64+0x9f/0x180
? lockdep_hardirqs_on_prepare+0x16d/0x400
? do_syscall_64+0x9f/0x180
? lockdep_hardirqs_on+0x78/0x100
? do_syscall_64+0x9f/0x180
? do_syscall_64+0x9f/0x180
entry_SYSCALL_64_after_hwframe+0x76/0x7e
RIP: 0033:0x7f9713f55cfa
Code: 55 48 89 e5 48 83 ec 20 48 89 55 e8 48 89 75 f0 89 7d f8 e8 e8 74 f8 ff 48 8b 55 e8 48 8b 75 f0 41 89 c0 8b 7d f8 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 2e 44 89 c7 48 89 45 f8 e8 42 75 f8 ff 48 8b
RSP: 002b:00007ffd7f512e70 EFLAGS: 00000246 ORIG_RAX: 0000000000000000
RAX: ffffffffffffffda RBX: 000055c38f316859 RCX: 00007f9713f55cfa
RDX: 0000000000000fff RSI: 00007ffd7f512eb0 RDI: 0000000000000011
RBP: 00007ffd7f512e90 R08: 0000000000000000 R09: 00000000ffffffff
R10: 0000000000000000 R11: 0000000000000246 R12: 000055c38f317148
R13: 0000000000000000 R14: 00007f96f4004f30 R15: 000055c3b6b623c0
</TASK>
The WARN is observed when the blktests test case nvme/014 is repeated
with tcp transport. It is rare, and 200 times repeat is required to
recreate in some test environments.
To avoid the WARN, check the NVME_TCP_Q_LIVE flag before locking
queue->queue_lock. The flag is cleared long time before the lock gets
destroyed.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/nvme/host/tcp.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index f2fedd25915f9..29489c2c52fb9 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2495,10 +2495,11 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
len = nvmf_get_address(ctrl, buf, size);
+ if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ return len;
+
mutex_lock(&queue->queue_lock);
- if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
- goto done;
ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
if (ret > 0) {
if (len > 0)
@@ -2506,7 +2507,7 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
(len) ? "," : "", &src_addr);
}
-done:
+
mutex_unlock(&queue->queue_lock);
return len;
--
2.43.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH AUTOSEL 6.1 07/17] nvme: disable CC.CRIME (NVME_CC_CRIME)
[not found] <20241023143202.2981992-1-sashal@kernel.org>
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 03/17] nvme: tcp: avoid race between queue_lock lock and destroy Sasha Levin
@ 2024-10-23 14:31 ` Sasha Levin
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 15/17] nvme-multipath: defer partition scanning Sasha Levin
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 17/17] nvme: make keep-alive synchronous operation Sasha Levin
3 siblings, 0 replies; 4+ messages in thread
From: Sasha Levin @ 2024-10-23 14:31 UTC (permalink / raw)
To: linux-kernel, stable
Cc: Greg Joyce, Nilay Shroff, Keith Busch, Sasha Levin, sagi,
linux-nvme
From: Greg Joyce <gjoyce@linux.ibm.com>
[ Upstream commit 0ce96a6708f34280a536263ee5c67e20c433dcce ]
Disable NVME_CC_CRIME so that CSTS.RDY indicates that the media
is ready and able to handle commands without returning
NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY.
Signed-off-by: Greg Joyce <gjoyce@linux.ibm.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Tested-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/nvme/host/core.c | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0729ab5430725..dc25d91891327 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2394,8 +2394,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
else
ctrl->ctrl_config = NVME_CC_CSS_NVM;
- if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
- ctrl->ctrl_config |= NVME_CC_CRIME;
+ /*
+ * Setting CRIME results in CSTS.RDY before the media is ready. This
+ * makes it possible for media related commands to return the error
+ * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is
+ * restructured to handle retries, disable CC.CRIME.
+ */
+ ctrl->ctrl_config &= ~NVME_CC_CRIME;
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
@@ -2430,10 +2435,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
* devices are known to get this wrong. Use the larger of the
* two values.
*/
- if (ctrl->ctrl_config & NVME_CC_CRIME)
- ready_timeout = NVME_CRTO_CRIMT(crto);
- else
- ready_timeout = NVME_CRTO_CRWMT(crto);
+ ready_timeout = NVME_CRTO_CRWMT(crto);
if (ready_timeout < timeout)
dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
--
2.43.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH AUTOSEL 6.1 15/17] nvme-multipath: defer partition scanning
[not found] <20241023143202.2981992-1-sashal@kernel.org>
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 03/17] nvme: tcp: avoid race between queue_lock lock and destroy Sasha Levin
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 07/17] nvme: disable CC.CRIME (NVME_CC_CRIME) Sasha Levin
@ 2024-10-23 14:31 ` Sasha Levin
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 17/17] nvme: make keep-alive synchronous operation Sasha Levin
3 siblings, 0 replies; 4+ messages in thread
From: Sasha Levin @ 2024-10-23 14:31 UTC (permalink / raw)
To: linux-kernel, stable
Cc: Keith Busch, Hannes Reinecke, Christoph Hellwig, Sasha Levin,
sagi, linux-nvme
From: Keith Busch <kbusch@kernel.org>
[ Upstream commit 1f021341eef41e77a633186e9be5223de2ce5d48 ]
We need to suppress the partition scan from occuring within the
controller's scan_work context. If a path error occurs here, the IO will
wait until a path becomes available or all paths are torn down, but that
action also occurs within scan_work, so it would deadlock. Defer the
partion scan to a different context that does not block scan_work.
Reported-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/nvme/host/multipath.c | 33 +++++++++++++++++++++++++++++++++
drivers/nvme/host/nvme.h | 1 +
2 files changed, 34 insertions(+)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 93ada8941a4c5..43b89c7d585f0 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -463,6 +463,20 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
return ret;
}
+static void nvme_partition_scan_work(struct work_struct *work)
+{
+ struct nvme_ns_head *head =
+ container_of(work, struct nvme_ns_head, partition_scan_work);
+
+ if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
+ &head->disk->state)))
+ return;
+
+ mutex_lock(&head->disk->open_mutex);
+ bdev_disk_changed(head->disk, false);
+ mutex_unlock(&head->disk->open_mutex);
+}
+
static void nvme_requeue_work(struct work_struct *work)
{
struct nvme_ns_head *head =
@@ -489,6 +503,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
bio_list_init(&head->requeue_list);
spin_lock_init(&head->requeue_lock);
INIT_WORK(&head->requeue_work, nvme_requeue_work);
+ INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work);
/*
* Add a multipath node if the subsystems supports multiple controllers.
@@ -504,6 +519,16 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
return -ENOMEM;
head->disk->fops = &nvme_ns_head_ops;
head->disk->private_data = head;
+
+ /*
+ * We need to suppress the partition scan from occuring within the
+ * controller's scan_work context. If a path error occurs here, the IO
+ * will wait until a path becomes available or all paths are torn down,
+ * but that action also occurs within scan_work, so it would deadlock.
+ * Defer the partion scan to a different context that does not block
+ * scan_work.
+ */
+ set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state);
sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance);
@@ -552,6 +577,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
return;
}
nvme_add_ns_head_cdev(head);
+ kblockd_schedule_work(&head->partition_scan_work);
}
mutex_lock(&head->lock);
@@ -851,6 +877,12 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
kblockd_schedule_work(&head->requeue_work);
if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
nvme_cdev_del(&head->cdev, &head->cdev_device);
+ /*
+ * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
+ * to allow multipath to fail all I/O.
+ */
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
del_gendisk(head->disk);
}
}
@@ -862,6 +894,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
+ flush_work(&head->partition_scan_work);
put_disk(head->disk);
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 5f8a146b70148..0f49b779dec65 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -460,6 +460,7 @@ struct nvme_ns_head {
struct bio_list requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
+ struct work_struct partition_scan_work;
struct mutex lock;
unsigned long flags;
#define NVME_NSHEAD_DISK_LIVE 0
--
2.43.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH AUTOSEL 6.1 17/17] nvme: make keep-alive synchronous operation
[not found] <20241023143202.2981992-1-sashal@kernel.org>
` (2 preceding siblings ...)
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 15/17] nvme-multipath: defer partition scanning Sasha Levin
@ 2024-10-23 14:31 ` Sasha Levin
3 siblings, 0 replies; 4+ messages in thread
From: Sasha Levin @ 2024-10-23 14:31 UTC (permalink / raw)
To: linux-kernel, stable
Cc: Nilay Shroff, Christoph Hellwig, Keith Busch, Sasha Levin, sagi,
linux-nvme
From: Nilay Shroff <nilay@linux.ibm.com>
[ Upstream commit d06923670b5a5f609603d4a9fee4dec02d38de9c ]
The nvme keep-alive operation, which executes at a periodic interval,
could potentially sneak in while shutting down a fabric controller.
This may lead to a race between the fabric controller admin queue
destroy code path (invoked while shutting down controller) and hw/hctx
queue dispatcher called from the nvme keep-alive async request queuing
operation. This race could lead to the kernel crash shown below:
Call Trace:
autoremove_wake_function+0x0/0xbc (unreliable)
__blk_mq_sched_dispatch_requests+0x114/0x24c
blk_mq_sched_dispatch_requests+0x44/0x84
blk_mq_run_hw_queue+0x140/0x220
nvme_keep_alive_work+0xc8/0x19c [nvme_core]
process_one_work+0x200/0x4e0
worker_thread+0x340/0x504
kthread+0x138/0x140
start_kernel_thread+0x14/0x18
While shutting down fabric controller, if nvme keep-alive request sneaks
in then it would be flushed off. The nvme_keep_alive_end_io function is
then invoked to handle the end of the keep-alive operation which
decrements the admin->q_usage_counter and assuming this is the last/only
request in the admin queue then the admin->q_usage_counter becomes zero.
If that happens then blk-mq destroy queue operation (blk_mq_destroy_
queue()) which could be potentially running simultaneously on another
cpu (as this is the controller shutdown code path) would forward
progress and deletes the admin queue. So, now from this point onward
we are not supposed to access the admin queue resources. However the
issue here's that the nvme keep-alive thread running hw/hctx queue
dispatch operation hasn't yet finished its work and so it could still
potentially access the admin queue resource while the admin queue had
been already deleted and that causes the above crash.
This fix helps avoid the observed crash by implementing keep-alive as a
synchronous operation so that we decrement admin->q_usage_counter only
after keep-alive command finished its execution and returns the command
status back up to its caller (blk_execute_rq()). This would ensure that
fabric shutdown code path doesn't destroy the fabric admin queue until
keep-alive request finished execution and also keep-alive thread is not
running hw/hctx queue dispatch operation.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/nvme/host/core.c | 17 +++++++----------
1 file changed, 7 insertions(+), 10 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index dc25d91891327..92ffeb6605618 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1231,10 +1231,9 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
nvme_keep_alive_work_period(ctrl));
}
-static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
- blk_status_t status)
+static void nvme_keep_alive_finish(struct request *rq,
+ blk_status_t status, struct nvme_ctrl *ctrl)
{
- struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
bool startka = false;
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
@@ -1252,13 +1251,11 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
delay = 0;
}
- blk_mq_free_request(rq);
-
if (status) {
dev_err(ctrl->device,
"failed nvme_keep_alive_end_io error=%d\n",
status);
- return RQ_END_IO_NONE;
+ return;
}
ctrl->ka_last_check_time = jiffies;
@@ -1270,7 +1267,6 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
- return RQ_END_IO_NONE;
}
static void nvme_keep_alive_work(struct work_struct *work)
@@ -1279,6 +1275,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
struct nvme_ctrl, ka_work);
bool comp_seen = ctrl->comp_seen;
struct request *rq;
+ blk_status_t status;
ctrl->ka_last_check_time = jiffies;
@@ -1301,9 +1298,9 @@ static void nvme_keep_alive_work(struct work_struct *work)
nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ;
- rq->end_io = nvme_keep_alive_end_io;
- rq->end_io_data = ctrl;
- blk_execute_rq_nowait(rq, false);
+ status = blk_execute_rq(rq, false);
+ nvme_keep_alive_finish(rq, status, ctrl);
+ blk_mq_free_request(rq);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
--
2.43.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-10-23 14:37 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20241023143202.2981992-1-sashal@kernel.org>
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 03/17] nvme: tcp: avoid race between queue_lock lock and destroy Sasha Levin
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 07/17] nvme: disable CC.CRIME (NVME_CC_CRIME) Sasha Levin
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 15/17] nvme-multipath: defer partition scanning Sasha Levin
2024-10-23 14:31 ` [PATCH AUTOSEL 6.1 17/17] nvme: make keep-alive synchronous operation Sasha Levin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox