* [PULL 1/7] spdm-socket: add seperate send/recv functions
2025-10-30 7:29 [PULL 0/7] nvme queue Klaus Jensen
@ 2025-10-30 7:29 ` Klaus Jensen
2025-10-30 7:29 ` [PULL 2/7] spdm: add spdm storage transport virtual header Klaus Jensen
` (6 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Klaus Jensen @ 2025-10-30 7:29 UTC (permalink / raw)
To: qemu-devel
Cc: Peter Maydell, Wilfred Mallawa, Alistair Francis,
Jonathan Cameron, Klaus Jensen
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
This is to support uni-directional transports such as SPDM over Storage.
As specified by the DMTF DSP0286.
Also update spdm_socket_rsp() to use the new send()/receive() functions. For
the case of spdm_socket_receive(), this allows us to do error checking
in one place with the addition of spdm_socket_command_valid().
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
backends/spdm-socket.c | 58 ++++++++++++++++++++++++++++--------
include/system/spdm-socket.h | 32 ++++++++++++++++++++
2 files changed, 77 insertions(+), 13 deletions(-)
diff --git a/backends/spdm-socket.c b/backends/spdm-socket.c
index 2c709c68c873..ab74a02d9cf6 100644
--- a/backends/spdm-socket.c
+++ b/backends/spdm-socket.c
@@ -184,29 +184,61 @@ int spdm_socket_connect(uint16_t port, Error **errp)
return client_socket;
}
-uint32_t spdm_socket_rsp(const int socket, uint32_t transport_type,
- void *req, uint32_t req_len,
- void *rsp, uint32_t rsp_len)
+static bool spdm_socket_command_valid(uint32_t command)
+{
+ switch (command) {
+ case SPDM_SOCKET_COMMAND_NORMAL:
+ case SPDM_SOCKET_STORAGE_CMD_IF_SEND:
+ case SPDM_SOCKET_STORAGE_CMD_IF_RECV:
+ case SOCKET_SPDM_STORAGE_ACK_STATUS:
+ case SPDM_SOCKET_COMMAND_OOB_ENCAP_KEY_UPDATE:
+ case SPDM_SOCKET_COMMAND_CONTINUE:
+ case SPDM_SOCKET_COMMAND_SHUTDOWN:
+ case SPDM_SOCKET_COMMAND_UNKOWN:
+ case SPDM_SOCKET_COMMAND_TEST:
+ return true;
+ default:
+ return false;
+ }
+}
+
+uint32_t spdm_socket_receive(const int socket, uint32_t transport_type,
+ void *rsp, uint32_t rsp_len)
{
uint32_t command;
bool result;
- result = send_platform_data(socket, transport_type,
- SPDM_SOCKET_COMMAND_NORMAL,
- req, req_len);
- if (!result) {
- return 0;
- }
-
result = receive_platform_data(socket, transport_type, &command,
(uint8_t *)rsp, &rsp_len);
+
+ /* we may have received some data, but check if the command is valid */
+ if (!result || !spdm_socket_command_valid(command)) {
+ return 0;
+ }
+
+ return rsp_len;
+}
+
+bool spdm_socket_send(const int socket, uint32_t socket_cmd,
+ uint32_t transport_type, void *req, uint32_t req_len)
+{
+ return send_platform_data(socket, transport_type, socket_cmd, req,
+ req_len);
+}
+
+uint32_t spdm_socket_rsp(const int socket, uint32_t transport_type,
+ void *req, uint32_t req_len,
+ void *rsp, uint32_t rsp_len)
+{
+ bool result;
+
+ result = spdm_socket_send(socket, SPDM_SOCKET_COMMAND_NORMAL,
+ transport_type, req, req_len);
if (!result) {
return 0;
}
- assert(command != 0);
-
- return rsp_len;
+ return spdm_socket_receive(socket, transport_type, rsp, rsp_len);
}
void spdm_socket_close(const int socket, uint32_t transport_type)
diff --git a/include/system/spdm-socket.h b/include/system/spdm-socket.h
index 5d8bd9aa4e1d..29aa04fd5211 100644
--- a/include/system/spdm-socket.h
+++ b/include/system/spdm-socket.h
@@ -50,6 +50,35 @@ uint32_t spdm_socket_rsp(const int socket, uint32_t transport_type,
void *req, uint32_t req_len,
void *rsp, uint32_t rsp_len);
+/**
+ * spdm_socket_rsp: Receive a message from an SPDM server
+ * @socket: socket returned from spdm_socket_connect()
+ * @transport_type: SPDM_SOCKET_TRANSPORT_TYPE_* macro
+ * @rsp: response buffer
+ * @rsp_len: response buffer length
+ *
+ * Receives a message from the SPDM server and returns the number of bytes
+ * received or 0 on failure. This can be used to receive a message from the SPDM
+ * server without sending anything first.
+ */
+uint32_t spdm_socket_receive(const int socket, uint32_t transport_type,
+ void *rsp, uint32_t rsp_len);
+
+/**
+ * spdm_socket_rsp: Sends a message to an SPDM server
+ * @socket: socket returned from spdm_socket_connect()
+ * @socket_cmd: socket command type (normal/if_recv/if_send etc...)
+ * @transport_type: SPDM_SOCKET_TRANSPORT_TYPE_* macro
+ * @req: request buffer
+ * @req_len: request buffer length
+ *
+ * Sends platform data to a SPDM server on socket, returns true on success.
+ * The response from the server must then be fetched by using
+ * spdm_socket_receive().
+ */
+bool spdm_socket_send(const int socket, uint32_t socket_cmd,
+ uint32_t transport_type, void *req, uint32_t req_len);
+
/**
* spdm_socket_close: send a shutdown command to the server
* @socket: socket returned from spdm_socket_connect()
@@ -60,6 +89,9 @@ uint32_t spdm_socket_rsp(const int socket, uint32_t transport_type,
void spdm_socket_close(const int socket, uint32_t transport_type);
#define SPDM_SOCKET_COMMAND_NORMAL 0x0001
+#define SPDM_SOCKET_STORAGE_CMD_IF_SEND 0x0002
+#define SPDM_SOCKET_STORAGE_CMD_IF_RECV 0x0003
+#define SOCKET_SPDM_STORAGE_ACK_STATUS 0x0004
#define SPDM_SOCKET_COMMAND_OOB_ENCAP_KEY_UPDATE 0x8001
#define SPDM_SOCKET_COMMAND_CONTINUE 0xFFFD
#define SPDM_SOCKET_COMMAND_SHUTDOWN 0xFFFE
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PULL 2/7] spdm: add spdm storage transport virtual header
2025-10-30 7:29 [PULL 0/7] nvme queue Klaus Jensen
2025-10-30 7:29 ` [PULL 1/7] spdm-socket: add seperate send/recv functions Klaus Jensen
@ 2025-10-30 7:29 ` Klaus Jensen
2025-10-30 7:29 ` [PULL 3/7] hw/nvme: add NVMe Admin Security SPDM support Klaus Jensen
` (5 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Klaus Jensen @ 2025-10-30 7:29 UTC (permalink / raw)
To: qemu-devel
Cc: Peter Maydell, Wilfred Mallawa, Alistair Francis,
Jonathan Cameron, Klaus Jensen
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
This header contains the transport encoding for an SPDM message that
uses the SPDM over Storage transport as defined by the DMTF DSP0286.
Note that in the StorageSpdmTransportHeader structure, security_protocol
field is defined in the SCSI Primary Commands 5 (SPC-5) specification.
The NVMe specification also refers to the SPC-5 for this definition.
The security_protocol_specific field is defined in DSP0286 and is
referred to as SP Specific for NVMe and ATA.
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
include/system/spdm-socket.h | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/include/system/spdm-socket.h b/include/system/spdm-socket.h
index 29aa04fd5211..8c07dc12d283 100644
--- a/include/system/spdm-socket.h
+++ b/include/system/spdm-socket.h
@@ -88,6 +88,20 @@ bool spdm_socket_send(const int socket, uint32_t socket_cmd,
*/
void spdm_socket_close(const int socket, uint32_t transport_type);
+/*
+ * Defines the transport encoding for SPDM, this information shall be passed
+ * down to the SPDM server, when conforming to the SPDM over Storage standard
+ * as defined by DSP0286.
+ */
+typedef struct {
+ uint8_t security_protocol; /* Must be 0xE8 for SPDM Commands
+ as per SCSI Primary Commands 5 */
+ uint16_t security_protocol_specific; /* Bit[7:2] SPDM Operation
+ Bit[0:1] Connection ID
+ per DSP0286 1.0: Section 7.2 */
+ uint32_t length; /* Length of the SPDM Message*/
+} QEMU_PACKED StorageSpdmTransportHeader;
+
#define SPDM_SOCKET_COMMAND_NORMAL 0x0001
#define SPDM_SOCKET_STORAGE_CMD_IF_SEND 0x0002
#define SPDM_SOCKET_STORAGE_CMD_IF_RECV 0x0003
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PULL 3/7] hw/nvme: add NVMe Admin Security SPDM support
2025-10-30 7:29 [PULL 0/7] nvme queue Klaus Jensen
2025-10-30 7:29 ` [PULL 1/7] spdm-socket: add seperate send/recv functions Klaus Jensen
2025-10-30 7:29 ` [PULL 2/7] spdm: add spdm storage transport virtual header Klaus Jensen
@ 2025-10-30 7:29 ` Klaus Jensen
2025-10-30 7:29 ` [PULL 4/7] spdm: define SPDM transport enum types Klaus Jensen
` (4 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Klaus Jensen @ 2025-10-30 7:29 UTC (permalink / raw)
To: qemu-devel
Cc: Peter Maydell, Wilfred Mallawa, Stefan Hajnoczi, Jonathan Cameron,
Klaus Jensen, Alistair Francis, Keith Busch, Klaus Jensen,
Jesper Devantier, Fam Zheng, Philippe Mathieu-Daudé,
Kevin Wolf, Hanna Reitz, qemu-block
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
Adds the NVMe Admin Security Send/Receive command support with support
for DMTFs SPDM. The transport binding for SPDM is defined in the
DMTF DSP0286.
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/nvme/ctrl.c | 212 ++++++++++++++++++++++++++++++++++-
hw/nvme/nvme.h | 5 +
include/block/nvme.h | 15 +++
include/system/spdm-socket.h | 2 +
4 files changed, 233 insertions(+), 1 deletion(-)
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index cd81f7399754..e450e785ea10 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -282,6 +282,8 @@ static const uint32_t nvme_cse_acs_default[256] = {
[NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_ADM_CMD_DIRECTIVE_RECV] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_DIRECTIVE_SEND] = NVME_CMD_EFF_CSUPP,
+ [NVME_ADM_CMD_SECURITY_SEND] = NVME_CMD_EFF_CSUPP,
+ [NVME_ADM_CMD_SECURITY_RECV] = NVME_CMD_EFF_CSUPP,
};
static const uint32_t nvme_cse_iocs_nvm_default[256] = {
@@ -7282,6 +7284,207 @@ static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
return NVME_SUCCESS;
}
+static uint16_t nvme_sec_prot_spdm_send(NvmeCtrl *n, NvmeRequest *req)
+{
+ StorageSpdmTransportHeader hdr = {0};
+ g_autofree uint8_t *sec_buf = NULL;
+ uint32_t transfer_len = le32_to_cpu(req->cmd.cdw11);
+ uint32_t transport_transfer_len = transfer_len;
+ uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
+ uint32_t recvd;
+ uint16_t nvme_cmd_status, ret;
+ uint8_t secp = extract32(dw10, 24, 8);
+ uint16_t spsp = extract32(dw10, 8, 16);
+ bool spdm_res;
+
+ if (transport_transfer_len > UINT32_MAX - sizeof(hdr)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ transport_transfer_len += sizeof(hdr);
+ if (transport_transfer_len > SPDM_SOCKET_MAX_MESSAGE_BUFFER_SIZE) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ ret = nvme_check_mdts(n, transport_transfer_len);
+ if (ret != NVME_SUCCESS) {
+ return ret;
+ }
+
+ /* Generate the NVMe transport header */
+ hdr.security_protocol = secp;
+ hdr.security_protocol_specific = cpu_to_le16(spsp);
+ hdr.length = cpu_to_le32(transfer_len);
+
+ sec_buf = g_try_malloc0(transport_transfer_len);
+ if (!sec_buf) {
+ return NVME_INTERNAL_DEV_ERROR;
+ }
+
+ /* Attach the transport header */
+ memcpy(sec_buf, &hdr, sizeof(hdr));
+ ret = nvme_h2c(n, sec_buf + sizeof(hdr), transfer_len, req);
+ if (ret) {
+ return ret;
+ }
+
+ spdm_res = spdm_socket_send(n->spdm_socket, SPDM_SOCKET_STORAGE_CMD_IF_SEND,
+ SPDM_SOCKET_TRANSPORT_TYPE_NVME, sec_buf,
+ transport_transfer_len);
+ if (!spdm_res) {
+ return NVME_DATA_TRAS_ERROR | NVME_DNR;
+ }
+
+ /* The responder shall ack with message status */
+ recvd = spdm_socket_receive(n->spdm_socket, SPDM_SOCKET_TRANSPORT_TYPE_NVME,
+ &nvme_cmd_status,
+ SPDM_SOCKET_MAX_MSG_STATUS_LEN);
+
+ nvme_cmd_status = be16_to_cpu(nvme_cmd_status);
+
+ if (recvd < SPDM_SOCKET_MAX_MSG_STATUS_LEN) {
+ return NVME_DATA_TRAS_ERROR | NVME_DNR;
+ }
+
+ return nvme_cmd_status;
+}
+
+/* From host to controller */
+static uint16_t nvme_security_send(NvmeCtrl *n, NvmeRequest *req)
+{
+ uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
+ uint8_t secp = extract32(dw10, 24, 8);
+
+ switch (secp) {
+ case NVME_SEC_PROT_DMTF_SPDM:
+ if (n->spdm_socket < 0) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+ return nvme_sec_prot_spdm_send(n, req);
+ default:
+ /* Unsupported Security Protocol Type */
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+static uint16_t nvme_sec_prot_spdm_receive(NvmeCtrl *n, NvmeRequest *req)
+{
+ StorageSpdmTransportHeader hdr;
+ g_autofree uint8_t *rsp_spdm_buf = NULL;
+ uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
+ uint32_t alloc_len = le32_to_cpu(req->cmd.cdw11);
+ uint32_t recvd, spdm_res;
+ uint16_t nvme_cmd_status, ret;
+ uint8_t secp = extract32(dw10, 24, 8);
+ uint8_t spsp = extract32(dw10, 8, 16);
+ if (!alloc_len) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ /* Generate the NVMe transport header */
+ hdr = (StorageSpdmTransportHeader) {
+ .security_protocol = secp,
+ .security_protocol_specific = cpu_to_le16(spsp),
+ .length = cpu_to_le32(alloc_len),
+ };
+
+ /* Forward if_recv to the SPDM Server with SPSP0 */
+ spdm_res = spdm_socket_send(n->spdm_socket, SPDM_SOCKET_STORAGE_CMD_IF_RECV,
+ SPDM_SOCKET_TRANSPORT_TYPE_NVME,
+ &hdr, sizeof(hdr));
+ if (!spdm_res) {
+ return NVME_DATA_TRAS_ERROR | NVME_DNR;
+ }
+
+ /* The responder shall ack with message status */
+ recvd = spdm_socket_receive(n->spdm_socket, SPDM_SOCKET_TRANSPORT_TYPE_NVME,
+ &nvme_cmd_status,
+ SPDM_SOCKET_MAX_MSG_STATUS_LEN);
+ if (recvd < SPDM_SOCKET_MAX_MSG_STATUS_LEN) {
+ return NVME_DATA_TRAS_ERROR | NVME_DNR;
+ }
+
+ nvme_cmd_status = be16_to_cpu(nvme_cmd_status);
+ /* An error here implies the prior if_recv from requester was spurious */
+ if (nvme_cmd_status != NVME_SUCCESS) {
+ return nvme_cmd_status;
+ }
+
+ /* Clear to start receiving data from the server */
+ rsp_spdm_buf = g_try_malloc0(alloc_len);
+ if (!rsp_spdm_buf) {
+ return NVME_INTERNAL_DEV_ERROR;
+ }
+
+ recvd = spdm_socket_receive(n->spdm_socket,
+ SPDM_SOCKET_TRANSPORT_TYPE_NVME,
+ rsp_spdm_buf, alloc_len);
+ if (!recvd) {
+ return NVME_DATA_TRAS_ERROR | NVME_DNR;
+ }
+
+ ret = nvme_c2h(n, rsp_spdm_buf, MIN(recvd, alloc_len), req);
+ if (ret) {
+ return ret;
+ }
+
+ return NVME_SUCCESS;
+}
+
+static uint16_t nvme_get_sec_prot_info(NvmeCtrl *n, NvmeRequest *req)
+{
+ uint32_t alloc_len = le32_to_cpu(req->cmd.cdw11);
+ uint8_t resp[10] = {
+ /* Support Security Protol List Length */
+ [6] = 0, /* MSB */
+ [7] = 2, /* LSB */
+ /* Support Security Protocol List */
+ [8] = SFSC_SECURITY_PROT_INFO,
+ [9] = 0,
+ };
+
+ if (n->spdm_socket >= 0) {
+ resp[9] = NVME_SEC_PROT_DMTF_SPDM;
+ }
+
+ if (alloc_len < 10) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ return nvme_c2h(n, resp, sizeof(resp), req);
+}
+
+/* From controller to host */
+static uint16_t nvme_security_receive(NvmeCtrl *n, NvmeRequest *req)
+{
+ uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
+ uint16_t spsp = extract32(dw10, 8, 16);
+ uint8_t secp = extract32(dw10, 24, 8);
+
+ switch (secp) {
+ case SFSC_SECURITY_PROT_INFO:
+ switch (spsp) {
+ case 0:
+ /* Supported security protocol list */
+ return nvme_get_sec_prot_info(n, req);
+ case 1:
+ /* Certificate data */
+ /* fallthrough */
+ default:
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+ case NVME_SEC_PROT_DMTF_SPDM:
+ if (n->spdm_socket < 0) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+ return nvme_sec_prot_spdm_receive(n, req);
+ default:
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+}
+
static uint16_t nvme_directive_send(NvmeCtrl *n, NvmeRequest *req)
{
return NVME_INVALID_FIELD | NVME_DNR;
@@ -7389,6 +7592,10 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
return nvme_directive_send(n, req);
case NVME_ADM_CMD_DIRECTIVE_RECV:
return nvme_directive_receive(n, req);
+ case NVME_ADM_CMD_SECURITY_SEND:
+ return nvme_security_send(n, req);
+ case NVME_ADM_CMD_SECURITY_RECV:
+ return nvme_security_receive(n, req);
default:
g_assert_not_reached();
}
@@ -8459,6 +8666,8 @@ static void nvme_init_state(NvmeCtrl *n)
sctrl->vfn = cpu_to_le16(i + 1);
}
+ n->spdm_socket = -1;
+
cap->cntlid = cpu_to_le16(n->cntlid);
cap->crt = NVME_CRT_VQ | NVME_CRT_VI;
@@ -8820,7 +9029,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->mdts = n->params.mdts;
id->ver = cpu_to_le32(NVME_SPEC_VER);
- oacs = NVME_OACS_NMS | NVME_OACS_FORMAT | NVME_OACS_DIRECTIVES;
+ oacs = NVME_OACS_NMS | NVME_OACS_FORMAT | NVME_OACS_DIRECTIVES |
+ NVME_OACS_SECURITY;
if (n->params.dbcs) {
oacs |= NVME_OACS_DBCS;
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index b5c9378ea4e5..67ed562e0086 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -461,6 +461,8 @@ static inline const char *nvme_adm_opc_str(uint8_t opc)
case NVME_ADM_CMD_DIRECTIVE_RECV: return "NVME_ADM_CMD_DIRECTIVE_RECV";
case NVME_ADM_CMD_DBBUF_CONFIG: return "NVME_ADM_CMD_DBBUF_CONFIG";
case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM";
+ case NVME_ADM_CMD_SECURITY_SEND: return "NVME_ADM_CMD_SECURITY_SEND";
+ case NVME_ADM_CMD_SECURITY_RECV: return "NVME_ADM_CMD_SECURITY_RECV";
default: return "NVME_ADM_CMD_UNKNOWN";
}
}
@@ -648,6 +650,9 @@ typedef struct NvmeCtrl {
} next_pri_ctrl_cap; /* These override pri_ctrl_cap after reset */
uint32_t dn; /* Disable Normal */
NvmeAtomic atomic;
+
+ /* Socket mapping to SPDM over NVMe Security In/Out commands */
+ int spdm_socket;
} NvmeCtrl;
typedef enum NvmeResetType {
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 358e516e38b0..9fa2ecaf281c 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -1779,6 +1779,21 @@ enum NvmeDirectiveOperations {
NVME_DIRECTIVE_RETURN_PARAMS = 0x1,
};
+typedef enum SfscSecurityProtocol {
+ SFSC_SECURITY_PROT_INFO = 0x00,
+} SfscSecurityProtocol;
+
+typedef enum NvmeSecurityProtocols {
+ NVME_SEC_PROT_DMTF_SPDM = 0xE8,
+} NvmeSecurityProtocols;
+
+typedef enum SpdmOperationCodes {
+ SPDM_STORAGE_DISCOVERY = 0x1, /* Mandatory */
+ SPDM_STORAGE_PENDING_INFO = 0x2, /* Optional */
+ SPDM_STORAGE_MSG = 0x5, /* Mandatory */
+ SPDM_STORAGE_SEC_MSG = 0x6, /* Optional */
+} SpdmOperationCodes;
+
typedef struct QEMU_PACKED NvmeFdpConfsHdr {
uint16_t num_confs;
uint8_t version;
diff --git a/include/system/spdm-socket.h b/include/system/spdm-socket.h
index 8c07dc12d283..e61163381251 100644
--- a/include/system/spdm-socket.h
+++ b/include/system/spdm-socket.h
@@ -114,7 +114,9 @@ typedef struct {
#define SPDM_SOCKET_TRANSPORT_TYPE_MCTP 0x01
#define SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE 0x02
+#define SPDM_SOCKET_TRANSPORT_TYPE_NVME 0x04
#define SPDM_SOCKET_MAX_MESSAGE_BUFFER_SIZE 0x1200
+#define SPDM_SOCKET_MAX_MSG_STATUS_LEN 0x02
#endif
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PULL 4/7] spdm: define SPDM transport enum types
2025-10-30 7:29 [PULL 0/7] nvme queue Klaus Jensen
` (2 preceding siblings ...)
2025-10-30 7:29 ` [PULL 3/7] hw/nvme: add NVMe Admin Security SPDM support Klaus Jensen
@ 2025-10-30 7:29 ` Klaus Jensen
2025-10-30 7:29 ` [PULL 5/7] hw/nvme: connect SPDM over NVMe Security Send/Recv Klaus Jensen
` (3 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Klaus Jensen @ 2025-10-30 7:29 UTC (permalink / raw)
To: qemu-devel
Cc: Peter Maydell, Wilfred Mallawa, Jonathan Cameron,
Alistair Francis, Klaus Jensen
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
SPDM maybe used over different transports. This patch specifies the
trasnport types as an enum with a qdev property definition such that
a user input transport type (string) can be mapped directly into the
respective SPDM transportenum for internal use.
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
backends/spdm-socket.c | 23 +++++++++++++++++++++++
include/system/spdm-socket.h | 19 +++++++++++++++----
2 files changed, 38 insertions(+), 4 deletions(-)
diff --git a/backends/spdm-socket.c b/backends/spdm-socket.c
index ab74a02d9cf6..6d8f02d3b928 100644
--- a/backends/spdm-socket.c
+++ b/backends/spdm-socket.c
@@ -13,6 +13,9 @@
#include "qemu/osdep.h"
#include "system/spdm-socket.h"
#include "qapi/error.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "hw/core/qdev-prop-internal.h"
static bool read_bytes(const int socket, uint8_t *buffer,
size_t number_of_bytes)
@@ -246,3 +249,23 @@ void spdm_socket_close(const int socket, uint32_t transport_type)
send_platform_data(socket, transport_type,
SPDM_SOCKET_COMMAND_SHUTDOWN, NULL, 0);
}
+
+const QEnumLookup SpdmTransport_lookup = {
+ .array = (const char *const[]) {
+ [SPDM_SOCKET_TRANSPORT_TYPE_UNSPEC] = "unspecified",
+ [SPDM_SOCKET_TRANSPORT_TYPE_MCTP] = "mctp",
+ [SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE] = "doe",
+ [SPDM_SOCKET_TRANSPORT_TYPE_SCSI] = "scsi",
+ [SPDM_SOCKET_TRANSPORT_TYPE_NVME] = "nvme",
+ },
+ .size = SPDM_SOCKET_TRANSPORT_TYPE_MAX
+};
+
+const PropertyInfo qdev_prop_spdm_trans = {
+ .type = "SpdmTransportType",
+ .description = "Spdm Transport, doe/nvme/mctp/scsi/unspecified",
+ .enum_table = &SpdmTransport_lookup,
+ .get = qdev_propinfo_get_enum,
+ .set = qdev_propinfo_set_enum,
+ .set_default_value = qdev_propinfo_set_default_value_enum,
+};
diff --git a/include/system/spdm-socket.h b/include/system/spdm-socket.h
index e61163381251..00cb0e97f36e 100644
--- a/include/system/spdm-socket.h
+++ b/include/system/spdm-socket.h
@@ -112,11 +112,22 @@ typedef struct {
#define SPDM_SOCKET_COMMAND_UNKOWN 0xFFFF
#define SPDM_SOCKET_COMMAND_TEST 0xDEAD
-#define SPDM_SOCKET_TRANSPORT_TYPE_MCTP 0x01
-#define SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE 0x02
-#define SPDM_SOCKET_TRANSPORT_TYPE_NVME 0x04
-
#define SPDM_SOCKET_MAX_MESSAGE_BUFFER_SIZE 0x1200
#define SPDM_SOCKET_MAX_MSG_STATUS_LEN 0x02
+typedef enum SpdmTransportType {
+ SPDM_SOCKET_TRANSPORT_TYPE_UNSPEC = 0,
+ SPDM_SOCKET_TRANSPORT_TYPE_MCTP,
+ SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE,
+ SPDM_SOCKET_TRANSPORT_TYPE_SCSI,
+ SPDM_SOCKET_TRANSPORT_TYPE_NVME,
+ SPDM_SOCKET_TRANSPORT_TYPE_MAX
+} SpdmTransportType;
+
+extern const PropertyInfo qdev_prop_spdm_trans;
+
+#define DEFINE_PROP_SPDM_TRANS(_name, _state, _field, _default) \
+ DEFINE_PROP_UNSIGNED(_name, _state, _field, _default, \
+ qdev_prop_spdm_trans, SpdmTransportType)
+
#endif
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PULL 5/7] hw/nvme: connect SPDM over NVMe Security Send/Recv
2025-10-30 7:29 [PULL 0/7] nvme queue Klaus Jensen
` (3 preceding siblings ...)
2025-10-30 7:29 ` [PULL 4/7] spdm: define SPDM transport enum types Klaus Jensen
@ 2025-10-30 7:29 ` Klaus Jensen
2025-10-30 7:29 ` [PULL 6/7] hw/nvme: enable ns atomic writes Klaus Jensen
` (2 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Klaus Jensen @ 2025-10-30 7:29 UTC (permalink / raw)
To: qemu-devel
Cc: Peter Maydell, Wilfred Mallawa, Jonathan Cameron, Klaus Jensen,
Alistair Francis, Keith Busch, Klaus Jensen, Jesper Devantier,
Michael S. Tsirkin, Marcel Apfelbaum, qemu-block
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
This patch extends the existing support we have for NVMe with only DoE
to also add support to SPDM over the NVMe Security Send/Recv commands.
With the new definition of the `spdm-trans` argument, users can specify
`spdm_trans=nvme` or `spdm_trans=doe`. This allows us to select the SPDM
transport respectively. SPDM over the NVMe Security Send/Recv commands
are defined in the DMTF DSP0286.
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
[k.jensen: fix declaration in case statement; fix quotes in docs]
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
docs/specs/spdm.rst | 10 ++++++--
hw/nvme/ctrl.c | 47 ++++++++++++++++++++++++++++---------
include/hw/pci/pci_device.h | 2 ++
3 files changed, 46 insertions(+), 13 deletions(-)
diff --git a/docs/specs/spdm.rst b/docs/specs/spdm.rst
index 0e3ad25bc698..477ff9ef36c7 100644
--- a/docs/specs/spdm.rst
+++ b/docs/specs/spdm.rst
@@ -98,7 +98,7 @@ Then you can add this to your QEMU command line:
.. code-block:: shell
-drive file=blknvme,if=none,id=mynvme,format=raw \
- -device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323
+ -device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323,spdm_trans=doe
At which point QEMU will try to connect to the SPDM server.
@@ -113,7 +113,13 @@ of the default. So the entire QEMU command might look like this
-append "root=/dev/vda console=ttyS0" \
-net none -nographic \
-drive file=blknvme,if=none,id=mynvme,format=raw \
- -device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323
+ -device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323,spdm_trans=doe
+
+The ``spdm_trans`` argument defines the underlying transport type that is
+emulated by QEMU. For an PCIe NVMe controller, both "doe" and "nvme" are
+supported. Where, "doe" does SPDM transport over the PCIe extended capability
+Data Object Exchange (DOE), and "nvme" uses the NVMe Admin Security
+Send/Receive commands to implement the SPDM transport.
.. _DMTF:
https://www.dmtf.org/standards/SPDM
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index e450e785ea10..fa003031e719 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -8943,19 +8943,33 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
pcie_cap_deverr_init(pci_dev);
- /* DOE Initialisation */
+ /* SPDM Initialisation */
if (pci_dev->spdm_port) {
- uint16_t doe_offset = n->params.sriov_max_vfs ?
- PCI_CONFIG_SPACE_SIZE + PCI_ARI_SIZEOF
- : PCI_CONFIG_SPACE_SIZE;
+ uint16_t doe_offset = PCI_CONFIG_SPACE_SIZE;
- pcie_doe_init(pci_dev, &pci_dev->doe_spdm, doe_offset,
- doe_spdm_prot, true, 0);
+ switch (pci_dev->spdm_trans) {
+ case SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE:
+ if (n->params.sriov_max_vfs) {
+ doe_offset += PCI_ARI_SIZEOF;
+ }
- pci_dev->doe_spdm.spdm_socket = spdm_socket_connect(pci_dev->spdm_port,
- errp);
+ pcie_doe_init(pci_dev, &pci_dev->doe_spdm, doe_offset,
+ doe_spdm_prot, true, 0);
- if (pci_dev->doe_spdm.spdm_socket < 0) {
+ pci_dev->doe_spdm.spdm_socket =
+ spdm_socket_connect(pci_dev->spdm_port, errp);
+
+ if (pci_dev->doe_spdm.spdm_socket < 0) {
+ return false;
+ }
+ break;
+ case SPDM_SOCKET_TRANSPORT_TYPE_NVME:
+ n->spdm_socket = spdm_socket_connect(pci_dev->spdm_port, errp);
+ if (n->spdm_socket < 0) {
+ return false;
+ }
+ break;
+ default:
return false;
}
}
@@ -9246,9 +9260,14 @@ static void nvme_exit(PCIDevice *pci_dev)
g_free(n->cmb.buf);
}
+ /* Only one of the `spdm_socket`s below should have been setup */
+ assert(!(pci_dev->doe_spdm.spdm_socket > 0 && n->spdm_socket >= 0));
if (pci_dev->doe_spdm.spdm_socket > 0) {
spdm_socket_close(pci_dev->doe_spdm.spdm_socket,
SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE);
+ } else if (n->spdm_socket >= 0) {
+ spdm_socket_close(pci_dev->doe_spdm.spdm_socket,
+ SPDM_SOCKET_TRANSPORT_TYPE_NVME);
}
if (n->pmr.dev) {
@@ -9303,6 +9322,8 @@ static const Property nvme_props[] = {
false),
DEFINE_PROP_UINT16("mqes", NvmeCtrl, params.mqes, 0x7ff),
DEFINE_PROP_UINT16("spdm_port", PCIDevice, spdm_port, 0),
+ DEFINE_PROP_SPDM_TRANS("spdm_trans", PCIDevice, spdm_trans,
+ SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE),
DEFINE_PROP_BOOL("ctratt.mem", NvmeCtrl, params.ctratt.mem, false),
DEFINE_PROP_BOOL("atomic.dn", NvmeCtrl, params.atomic_dn, 0),
DEFINE_PROP_UINT16("atomic.awun", NvmeCtrl, params.atomic_awun, 0),
@@ -9378,7 +9399,9 @@ static void nvme_pci_write_config(PCIDevice *dev, uint32_t address,
{
uint16_t old_num_vfs = pcie_sriov_num_vfs(dev);
- if (pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE)) {
+ /* DOE is only initialised if SPDM over DOE is used */
+ if (pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE) &&
+ dev->spdm_trans == SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE) {
pcie_doe_write_config(&dev->doe_spdm, address, val, len);
}
pci_default_write_config(dev, address, val, len);
@@ -9389,7 +9412,9 @@ static void nvme_pci_write_config(PCIDevice *dev, uint32_t address,
static uint32_t nvme_pci_read_config(PCIDevice *dev, uint32_t address, int len)
{
uint32_t val;
- if (dev->spdm_port && pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE)) {
+
+ if (dev->spdm_port && pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE) &&
+ (dev->spdm_trans == SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE)) {
if (pcie_doe_read_config(&dev->doe_spdm, address, len, &val)) {
return val;
}
diff --git a/include/hw/pci/pci_device.h b/include/hw/pci/pci_device.h
index eee03385686c..88ccea501136 100644
--- a/include/hw/pci/pci_device.h
+++ b/include/hw/pci/pci_device.h
@@ -4,6 +4,7 @@
#include "hw/pci/pci.h"
#include "hw/pci/pcie.h"
#include "hw/pci/pcie_doe.h"
+#include "system/spdm-socket.h"
#define TYPE_PCI_DEVICE "pci-device"
typedef struct PCIDeviceClass PCIDeviceClass;
@@ -166,6 +167,7 @@ struct PCIDevice {
/* SPDM */
uint16_t spdm_port;
+ SpdmTransportType spdm_trans;
/* DOE */
DOECap doe_spdm;
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PULL 6/7] hw/nvme: enable ns atomic writes
2025-10-30 7:29 [PULL 0/7] nvme queue Klaus Jensen
` (4 preceding siblings ...)
2025-10-30 7:29 ` [PULL 5/7] hw/nvme: connect SPDM over NVMe Security Send/Recv Klaus Jensen
@ 2025-10-30 7:29 ` Klaus Jensen
2025-11-02 11:50 ` Peter Maydell
2025-10-30 7:29 ` [PULL 7/7] hw/nvme: add atomic boundary support Klaus Jensen
2025-11-01 8:36 ` [PULL 0/7] nvme queue Richard Henderson
7 siblings, 1 reply; 11+ messages in thread
From: Klaus Jensen @ 2025-10-30 7:29 UTC (permalink / raw)
To: qemu-devel
Cc: Peter Maydell, Alan Adamson, Jesper Wendel Devantier,
Klaus Jensen, Keith Busch, Klaus Jensen, qemu-block
From: Alan Adamson <alan.adamson@oracle.com>
Add support for the namespace atomic paramters: NAWUN and NAWUN. Namespace
Atomic Compare and Write Unit (NACWU) is not currently supported.
Writes that adhere to the NACWU and NAWUPF parameters are guaranteed to be
atomic.
New NVMe QEMU Paramters (See NVMe Specification for details):
atomic.nawun=UINT16 (default: 0)
atomic.nawupf=UINT16 (default: 0)
atomic.nsfeat (default off) - Set Namespace Supported Atomic Boundary &
Power (NSABP) bit in Namespace Features (NSFEAT) in the Identify
Namespace Data Structure
See the NVMe Specification for more information.
Signed-off-by: Alan Adamson <alan.adamson@oracle.com>
Reviewed-by: Jesper Wendel Devantier <foss@defmacro.it>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/nvme/ctrl.c | 23 +++++++++++++++++++++++
hw/nvme/ns.c | 38 ++++++++++++++++++++++++++++++++++++++
hw/nvme/nvme.h | 6 ++++++
3 files changed, 67 insertions(+)
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index fa003031e719..121a95b2e373 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -6705,6 +6705,23 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
} else {
atomic->atomic_writes = 1;
}
+ for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+ ns = nvme_ns(n, i);
+ if (ns && ns->atomic.atomic_writes) {
+ if (n->dn) {
+ ns->atomic.atomic_max_write_size =
+ le16_to_cpu(ns->id_ns.nawupf) + 1;
+ } else {
+ ns->atomic.atomic_max_write_size =
+ le16_to_cpu(ns->id_ns.nawun) + 1;
+ }
+ if (ns->atomic.atomic_max_write_size == 1) {
+ ns->atomic.atomic_writes = 0;
+ } else {
+ ns->atomic.atomic_writes = 1;
+ }
+ }
+ }
break;
default:
return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
@@ -7688,6 +7705,12 @@ static int nvme_atomic_write_check(NvmeCtrl *n, NvmeCmd *cmd,
static NvmeAtomic *nvme_get_atomic(NvmeCtrl *n, NvmeCmd *cmd)
{
+ NvmeNamespace *ns = nvme_ns(n, cmd->nsid);
+
+ if (ns && ns->atomic.atomic_writes) {
+ return &ns->atomic;
+ }
+
if (n->atomic.atomic_writes) {
return &n->atomic;
}
diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c
index 6df2e8e7c5ac..28aacb8db59a 100644
--- a/hw/nvme/ns.c
+++ b/hw/nvme/ns.c
@@ -724,11 +724,46 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
BusState *s = qdev_get_parent_bus(dev);
NvmeCtrl *n = NVME(s->parent);
NvmeSubsystem *subsys = n->subsys;
+ NvmeIdCtrl *id = &n->id_ctrl;
+ NvmeIdNs *id_ns = &ns->id_ns;
uint32_t nsid = ns->params.nsid;
int i;
assert(subsys);
+ /* Set atomic write parameters */
+ if (ns->params.atomic_nsfeat) {
+ id_ns->nsfeat |= NVME_ID_NS_NSFEAT_NSABPNS;
+ id_ns->nawun = cpu_to_le16(ns->params.atomic_nawun);
+ if (!id->awupf || (id_ns->nawun && (id_ns->nawun < id->awun))) {
+ error_report("Invalid NAWUN: %x AWUN=%x", id_ns->nawun, id->awun);
+ }
+ id_ns->nawupf = cpu_to_le16(ns->params.atomic_nawupf);
+ if (!id->awupf || (id_ns->nawupf && (id_ns->nawupf < id->awupf))) {
+ error_report("Invalid NAWUPF: %x AWUPF=%x",
+ id_ns->nawupf, id->awupf);
+ }
+ if (id_ns->nawupf > id_ns->nawun) {
+ error_report("Invalid: NAWUN=%x NAWUPF=%x",
+ id_ns->nawun, id_ns->nawupf);
+ }
+ }
+
+ if (id_ns->nawun || id_ns->nawupf) {
+ NvmeAtomic *atomic = &ns->atomic;
+
+ if (n->dn) {
+ atomic->atomic_max_write_size = cpu_to_le16(id_ns->nawupf) + 1;
+ } else {
+ atomic->atomic_max_write_size = cpu_to_le16(id_ns->nawun) + 1;
+ }
+ if (atomic->atomic_max_write_size == 1) {
+ atomic->atomic_writes = 0;
+ } else {
+ atomic->atomic_writes = 1;
+ }
+ }
+
/* reparent to subsystem bus */
if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) {
return;
@@ -804,6 +839,9 @@ static const Property nvme_ns_props[] = {
DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default,
false),
DEFINE_PROP_STRING("fdp.ruhs", NvmeNamespace, params.fdp.ruhs),
+ DEFINE_PROP_UINT16("atomic.nawun", NvmeNamespace, params.atomic_nawun, 0),
+ DEFINE_PROP_UINT16("atomic.nawupf", NvmeNamespace, params.atomic_nawupf, 0),
+ DEFINE_PROP_BOOL("atomic.nsfeat", NvmeNamespace, params.atomic_nsfeat, 0),
};
static void nvme_ns_class_init(ObjectClass *oc, const void *data)
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index 67ed562e0086..7d01080fc1f9 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -218,6 +218,9 @@ typedef struct NvmeNamespaceParams {
struct {
char *ruhs;
} fdp;
+ uint16_t atomic_nawun;
+ uint16_t atomic_nawupf;
+ bool atomic_nsfeat;
} NvmeNamespaceParams;
typedef struct NvmeAtomic {
@@ -280,6 +283,9 @@ typedef struct NvmeNamespace {
/* reclaim unit handle identifiers indexed by placement handle */
uint16_t *phs;
} fdp;
+ uint16_t atomic_nawun;
+ uint16_t atomic_nawupf;
+ NvmeAtomic atomic;
} NvmeNamespace;
static inline uint32_t nvme_nsid(NvmeNamespace *ns)
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PULL 6/7] hw/nvme: enable ns atomic writes
2025-10-30 7:29 ` [PULL 6/7] hw/nvme: enable ns atomic writes Klaus Jensen
@ 2025-11-02 11:50 ` Peter Maydell
2025-11-03 12:50 ` Klaus Jensen
0 siblings, 1 reply; 11+ messages in thread
From: Peter Maydell @ 2025-11-02 11:50 UTC (permalink / raw)
To: Klaus Jensen
Cc: qemu-devel, Alan Adamson, Jesper Wendel Devantier, Klaus Jensen,
Keith Busch, qemu-block
On Thu, 30 Oct 2025 at 07:30, Klaus Jensen <its@irrelevant.dk> wrote:
>
> From: Alan Adamson <alan.adamson@oracle.com>
>
> Add support for the namespace atomic paramters: NAWUN and NAWUN. Namespace
> Atomic Compare and Write Unit (NACWU) is not currently supported.
>
> Writes that adhere to the NACWU and NAWUPF parameters are guaranteed to be
> atomic.
>
> New NVMe QEMU Paramters (See NVMe Specification for details):
> atomic.nawun=UINT16 (default: 0)
> atomic.nawupf=UINT16 (default: 0)
> atomic.nsfeat (default off) - Set Namespace Supported Atomic Boundary &
> Power (NSABP) bit in Namespace Features (NSFEAT) in the Identify
> Namespace Data Structure
>
> See the NVMe Specification for more information.
>
> Signed-off-by: Alan Adamson <alan.adamson@oracle.com>
> Reviewed-by: Jesper Wendel Devantier <foss@defmacro.it>
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> + /* Set atomic write parameters */
> + if (ns->params.atomic_nsfeat) {
> + id_ns->nsfeat |= NVME_ID_NS_NSFEAT_NSABPNS;
> + id_ns->nawun = cpu_to_le16(ns->params.atomic_nawun);
> + if (!id->awupf || (id_ns->nawun && (id_ns->nawun < id->awun))) {
> + error_report("Invalid NAWUN: %x AWUN=%x", id_ns->nawun, id->awun);
> + }
This error check is about NAWUN, but the condition looks at id->awpuf.
Is that intentional ? (Coverity wonders if this is a copy-paste error:
CID 1642811.)
We should return early if we've detected an error case in the properties.
By default we'll fall on through. Similarly below.
This is a realize method, so error handling should be by
setting the 'error' argument, not by error_report().
> + id_ns->nawupf = cpu_to_le16(ns->params.atomic_nawupf);
> + if (!id->awupf || (id_ns->nawupf && (id_ns->nawupf < id->awupf))) {
> + error_report("Invalid NAWUPF: %x AWUPF=%x",
> + id_ns->nawupf, id->awupf);
> + }
> + if (id_ns->nawupf > id_ns->nawun) {
> + error_report("Invalid: NAWUN=%x NAWUPF=%x",
> + id_ns->nawun, id_ns->nawupf);
> + }
Personally I find this stack of checks a bit confusing -- we
are presumably catching various different invalid combinations
of the properties, but the error messages we produce are rather
unspecific. If it's the case that (for instance) the NAWUPF
cannot be larger than the NAWUN, we could tell the user that
specifically rather than just saying "Invalid" and making them
go look up what the requirements are in the spec or the code.
> + }
thanks
-- PMM
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PULL 6/7] hw/nvme: enable ns atomic writes
2025-11-02 11:50 ` Peter Maydell
@ 2025-11-03 12:50 ` Klaus Jensen
0 siblings, 0 replies; 11+ messages in thread
From: Klaus Jensen @ 2025-11-03 12:50 UTC (permalink / raw)
To: Peter Maydell
Cc: qemu-devel, Alan Adamson, Jesper Wendel Devantier, Klaus Jensen,
Keith Busch, qemu-block
[-- Attachment #1: Type: text/plain, Size: 2986 bytes --]
On Nov 2 11:50, Peter Maydell wrote:
> On Thu, 30 Oct 2025 at 07:30, Klaus Jensen <its@irrelevant.dk> wrote:
> >
> > From: Alan Adamson <alan.adamson@oracle.com>
> >
> > Add support for the namespace atomic paramters: NAWUN and NAWUN. Namespace
> > Atomic Compare and Write Unit (NACWU) is not currently supported.
> >
> > Writes that adhere to the NACWU and NAWUPF parameters are guaranteed to be
> > atomic.
> >
> > New NVMe QEMU Paramters (See NVMe Specification for details):
> > atomic.nawun=UINT16 (default: 0)
> > atomic.nawupf=UINT16 (default: 0)
> > atomic.nsfeat (default off) - Set Namespace Supported Atomic Boundary &
> > Power (NSABP) bit in Namespace Features (NSFEAT) in the Identify
> > Namespace Data Structure
> >
> > See the NVMe Specification for more information.
> >
> > Signed-off-by: Alan Adamson <alan.adamson@oracle.com>
> > Reviewed-by: Jesper Wendel Devantier <foss@defmacro.it>
> > Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
>
>
>
> > + /* Set atomic write parameters */
> > + if (ns->params.atomic_nsfeat) {
> > + id_ns->nsfeat |= NVME_ID_NS_NSFEAT_NSABPNS;
> > + id_ns->nawun = cpu_to_le16(ns->params.atomic_nawun);
> > + if (!id->awupf || (id_ns->nawun && (id_ns->nawun < id->awun))) {
> > + error_report("Invalid NAWUN: %x AWUN=%x", id_ns->nawun, id->awun);
> > + }
>
> This error check is about NAWUN, but the condition looks at id->awpuf.
> Is that intentional ? (Coverity wonders if this is a copy-paste error:
> CID 1642811.)
>
The check is as intended, but I can understand why Coverity thinks it
may be off. I'll rework it.
> We should return early if we've detected an error case in the properties.
> By default we'll fall on through. Similarly below.
>
> This is a realize method, so error handling should be by
> setting the 'error' argument, not by error_report().
>
True, I'll fix that up.
> > + id_ns->nawupf = cpu_to_le16(ns->params.atomic_nawupf);
> > + if (!id->awupf || (id_ns->nawupf && (id_ns->nawupf < id->awupf))) {
> > + error_report("Invalid NAWUPF: %x AWUPF=%x",
> > + id_ns->nawupf, id->awupf);
> > + }
> > + if (id_ns->nawupf > id_ns->nawun) {
> > + error_report("Invalid: NAWUN=%x NAWUPF=%x",
> > + id_ns->nawun, id_ns->nawupf);
> > + }
>
> Personally I find this stack of checks a bit confusing -- we
> are presumably catching various different invalid combinations
> of the properties, but the error messages we produce are rather
> unspecific. If it's the case that (for instance) the NAWUPF
> cannot be larger than the NAWUN, we could tell the user that
> specifically rather than just saying "Invalid" and making them
> go look up what the requirements are in the spec or the code.
>
I'll fix up the parameter validation.
Thanks Peter!
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PULL 7/7] hw/nvme: add atomic boundary support
2025-10-30 7:29 [PULL 0/7] nvme queue Klaus Jensen
` (5 preceding siblings ...)
2025-10-30 7:29 ` [PULL 6/7] hw/nvme: enable ns atomic writes Klaus Jensen
@ 2025-10-30 7:29 ` Klaus Jensen
2025-11-01 8:36 ` [PULL 0/7] nvme queue Richard Henderson
7 siblings, 0 replies; 11+ messages in thread
From: Klaus Jensen @ 2025-10-30 7:29 UTC (permalink / raw)
To: qemu-devel
Cc: Peter Maydell, Alan Adamson, Jesper Wendel Devantier,
Klaus Jensen, Keith Busch, Klaus Jensen, qemu-block
From: Alan Adamson <alan.adamson@oracle.com>
Add support for the namespace atomic boundary paramters: NABO, NABSN, and NABSPF.
Writes that cross an atomic boundary whose size is less than or equal to values
reported by AWUN/AWUPF are guaranteed to be atomic. If AWUN/AWUPF is set to zero,
writes that cross an atomic boundary are not guaranteed to be atomic.
The value reported by NABO field indicates the LBA on this namespace where the
first atomic boundary starts.
New NVMe QEMU Paramters (See NVMe Specification for details):
atomic.nabo=UINT16 (default: 0)
atomic.nabsn=UINT16 (default: 0)
atomic.nabspf=UINT16 (default: 0)
See the NVMe Specification for more information.
Signed-off-by: Alan Adamson <alan.adamson@oracle.com>
Reviewed-by: Jesper Wendel Devantier <foss@defmacro.it>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/nvme/ctrl.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++
hw/nvme/ns.c | 36 ++++++++++++++++++++++++++++++++++
hw/nvme/nvme.h | 8 ++++++++
3 files changed, 97 insertions(+)
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 121a95b2e373..4d150c7206ad 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -6711,9 +6711,21 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
if (n->dn) {
ns->atomic.atomic_max_write_size =
le16_to_cpu(ns->id_ns.nawupf) + 1;
+ if (ns->id_ns.nabspf) {
+ ns->atomic.atomic_boundary =
+ le16_to_cpu(ns->id_ns.nabspf) + 1;
+ } else {
+ ns->atomic.atomic_boundary = 0;
+ }
} else {
ns->atomic.atomic_max_write_size =
le16_to_cpu(ns->id_ns.nawun) + 1;
+ if (ns->id_ns.nabsn) {
+ ns->atomic.atomic_boundary =
+ le16_to_cpu(ns->id_ns.nabsn) + 1;
+ } else {
+ ns->atomic.atomic_boundary = 0;
+ }
}
if (ns->atomic.atomic_max_write_size == 1) {
ns->atomic.atomic_writes = 0;
@@ -7636,6 +7648,36 @@ static void nvme_update_sq_tail(NvmeSQueue *sq)
trace_pci_nvme_update_sq_tail(sq->sqid, sq->tail);
}
+static int nvme_atomic_boundary_check(NvmeCtrl *n, NvmeCmd *cmd,
+ NvmeAtomic *atomic)
+{
+ NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
+
+ if (atomic->atomic_boundary) {
+ uint64_t slba = le64_to_cpu(rw->slba);
+ uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb);
+ uint64_t elba = slba + nlb;
+ uint64_t imask;
+
+ if ((slba < atomic->atomic_nabo) || (elba < atomic->atomic_nabo)) {
+ return 0;
+ }
+
+ /* Update slba/elba based on boundary offset */
+ slba = slba - atomic->atomic_nabo;
+ elba = slba + nlb;
+
+ imask = ~(atomic->atomic_boundary - 1);
+ if ((slba & imask) != (elba & imask)) {
+ if (n->atomic.atomic_max_write_size &&
+ ((nlb + 1) <= n->atomic.atomic_max_write_size)) {
+ return 1;
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
#define NVME_ATOMIC_NO_START 0
#define NVME_ATOMIC_START_ATOMIC 1
#define NVME_ATOMIC_START_NONATOMIC 2
@@ -7655,6 +7697,15 @@ static int nvme_atomic_write_check(NvmeCtrl *n, NvmeCmd *cmd,
cmd_atomic_wr = false;
}
+ /*
+ * Check if a write crosses an atomic boundary.
+ */
+ if (cmd->opcode == NVME_CMD_WRITE) {
+ if (!nvme_atomic_boundary_check(n, cmd, atomic)) {
+ cmd_atomic_wr = false;
+ }
+ }
+
/*
* Walk the queues to see if there are any atomic conflicts.
*/
@@ -8741,6 +8792,8 @@ static void nvme_init_state(NvmeCtrl *n)
} else {
atomic->atomic_writes = 1;
}
+ atomic->atomic_boundary = 0;
+ atomic->atomic_nabo = 0;
}
}
diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c
index 28aacb8db59a..86f5ab0a7572 100644
--- a/hw/nvme/ns.c
+++ b/hw/nvme/ns.c
@@ -747,6 +747,28 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
error_report("Invalid: NAWUN=%x NAWUPF=%x",
id_ns->nawun, id_ns->nawupf);
}
+ id_ns->nabsn = cpu_to_le16(ns->params.atomic_nabsn);
+ id_ns->nabspf = cpu_to_le16(ns->params.atomic_nabspf);
+ id_ns->nabo = cpu_to_le16(ns->params.atomic_nabo);
+ if (!id->awun || (id_ns->nabsn && ((id_ns->nabsn < id_ns->nawun) ||
+ (id_ns->nabsn < id->awun)))) {
+ error_report("Invalid NABSN: %x NAWUN=%x AWUN=%x",
+ id_ns->nabsn, id_ns->nawun, id->awun);
+ }
+ if (!id->awupf || (id_ns->nabspf && ((id_ns->nabspf < id_ns->nawupf) ||
+ (id_ns->nawupf < id->awupf)))) {
+ error_report("Invalid NABSPF: %x NAWUPF=%x AWUPF=%x",
+ id_ns->nabspf, id_ns->nawupf, id->awupf);
+ }
+ if (id_ns->nabo && ((id_ns->nabo > id_ns->nabsn) ||
+ (id_ns->nabo > id_ns->nabspf))) {
+ error_report("Invalid NABO: %x NABSN=%x NABSPF=%x",
+ id_ns->nabo, id_ns->nabsn, id_ns->nabspf);
+ }
+ if (id_ns->nawupf > id_ns->nawun) {
+ error_report("Invalid: NAWUN=%x NAWUPF=%x", id_ns->nawun,
+ id_ns->nawupf);
+ }
}
if (id_ns->nawun || id_ns->nawupf) {
@@ -754,14 +776,25 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
if (n->dn) {
atomic->atomic_max_write_size = cpu_to_le16(id_ns->nawupf) + 1;
+ if (id_ns->nabspf) {
+ atomic->atomic_boundary = cpu_to_le16(id_ns->nabspf) + 1;
+ } else {
+ atomic->atomic_boundary = 0;
+ }
} else {
atomic->atomic_max_write_size = cpu_to_le16(id_ns->nawun) + 1;
+ if (id_ns->nabsn) {
+ atomic->atomic_boundary = cpu_to_le16(id_ns->nabsn) + 1;
+ } else {
+ atomic->atomic_boundary = 0;
+ }
}
if (atomic->atomic_max_write_size == 1) {
atomic->atomic_writes = 0;
} else {
atomic->atomic_writes = 1;
}
+ atomic->atomic_nabo = cpu_to_le16(id_ns->nabo);
}
/* reparent to subsystem bus */
@@ -841,6 +874,9 @@ static const Property nvme_ns_props[] = {
DEFINE_PROP_STRING("fdp.ruhs", NvmeNamespace, params.fdp.ruhs),
DEFINE_PROP_UINT16("atomic.nawun", NvmeNamespace, params.atomic_nawun, 0),
DEFINE_PROP_UINT16("atomic.nawupf", NvmeNamespace, params.atomic_nawupf, 0),
+ DEFINE_PROP_UINT16("atomic.nabspf", NvmeNamespace, params.atomic_nabspf, 0),
+ DEFINE_PROP_UINT16("atomic.nabsn", NvmeNamespace, params.atomic_nabsn, 0),
+ DEFINE_PROP_UINT16("atomic.nabo", NvmeNamespace, params.atomic_nabo, 0),
DEFINE_PROP_BOOL("atomic.nsfeat", NvmeNamespace, params.atomic_nsfeat, 0),
};
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index 7d01080fc1f9..a7d225d2d80b 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -220,11 +220,16 @@ typedef struct NvmeNamespaceParams {
} fdp;
uint16_t atomic_nawun;
uint16_t atomic_nawupf;
+ uint16_t atomic_nabsn;
+ uint16_t atomic_nabspf;
+ uint16_t atomic_nabo;
bool atomic_nsfeat;
} NvmeNamespaceParams;
typedef struct NvmeAtomic {
uint32_t atomic_max_write_size;
+ uint64_t atomic_boundary;
+ uint64_t atomic_nabo;
bool atomic_writes;
} NvmeAtomic;
@@ -285,6 +290,9 @@ typedef struct NvmeNamespace {
} fdp;
uint16_t atomic_nawun;
uint16_t atomic_nawupf;
+ uint16_t atomic_nabsn;
+ uint16_t atomic_nabspf;
+ uint16_t atomic_nabo;
NvmeAtomic atomic;
} NvmeNamespace;
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PULL 0/7] nvme queue
2025-10-30 7:29 [PULL 0/7] nvme queue Klaus Jensen
` (6 preceding siblings ...)
2025-10-30 7:29 ` [PULL 7/7] hw/nvme: add atomic boundary support Klaus Jensen
@ 2025-11-01 8:36 ` Richard Henderson
7 siblings, 0 replies; 11+ messages in thread
From: Richard Henderson @ 2025-11-01 8:36 UTC (permalink / raw)
To: qemu-devel
On 10/30/25 08:29, Klaus Jensen wrote:
> From: Klaus Jensen<k.jensen@samsung.com>
>
> Hi,
>
> The following changes since commit e090e0312dc9030d94e38e3d98a88718d3561e4e:
>
> Merge tag 'pull-trivial-patches' ofhttps://gitlab.com/mjt0k/qemu into staging (2025-10-29 10:44:15 +0100)
>
> are available in the Git repository at:
>
> https://gitlab.com/birkelund/qemu.git tags/pull-nvme-20251030
>
> for you to fetch changes up to bce51b83709b548fbecbe64acd65225587c5b803:
>
> hw/nvme: add atomic boundary support (2025-10-30 07:07:14 +0100)
>
> ----------------------------------------------------------------
> nvme queue
Applied, thanks. Please update https://wiki.qemu.org/ChangeLog/10.2 as appropriate.
r~
^ permalink raw reply [flat|nested] 11+ messages in thread