* [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec()
2025-02-20 1:36 [PATCH v4 0/4] cxl: Dirty shutdown followups Davidlohr Bueso
@ 2025-02-20 1:36 ` Davidlohr Bueso
2025-02-20 15:34 ` Dave Jiang
` (3 more replies)
2025-02-20 1:36 ` [PATCH 2/4] cxl/pmem: Rename cxl_dirty_shutdown_state() Davidlohr Bueso
` (2 subsequent siblings)
3 siblings, 4 replies; 23+ messages in thread
From: Davidlohr Bueso @ 2025-02-20 1:36 UTC (permalink / raw)
To: dave.jiang, dan.j.williams
Cc: jonathan.cameron, alison.schofield, ira.weiny, vishal.l.verma,
seven.yi.lee, ming.li, a.manzanares, fan.ni, anisa.su, dave,
linux-cxl
Add a helper to fetch the port/device GPF dvsecs. This is
currently only used for ports, but a later patch to export
dirty count to users will make use of the device one.
Reviewed-by: Li Ming <ming.li@zohomail.com>
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
drivers/cxl/core/pci.c | 30 ++++++++++++++++++++----------
drivers/cxl/cxl.h | 2 ++
2 files changed, 22 insertions(+), 10 deletions(-)
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index a5c65f79db18..96fecb799cbc 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -1072,6 +1072,22 @@ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
#define GPF_TIMEOUT_BASE_MAX 2
#define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */
+u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port)
+{
+ u16 dvsec;
+
+ if (!dev_is_pci(dev))
+ return 0;
+
+ dvsec = pci_find_dvsec_capability(to_pci_dev(dev), PCI_VENDOR_ID_CXL,
+ is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF);
+ if (!dvsec)
+ dev_warn(dev, "%s GPF DVSEC not present\n",
+ is_port ? "Port" : "Device");
+ return dvsec;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_gpf_get_dvsec, "CXL");
+
static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase)
{
u64 base, scale;
@@ -1116,26 +1132,20 @@ int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port)
{
struct pci_dev *pdev;
- if (!dev_is_pci(dport_dev))
- return 0;
-
- pdev = to_pci_dev(dport_dev);
- if (!pdev || !port)
+ if (!port)
return -EINVAL;
if (!port->gpf_dvsec) {
int dvsec;
- dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL,
- CXL_DVSEC_PORT_GPF);
- if (!dvsec) {
- pci_warn(pdev, "Port GPF DVSEC not present\n");
+ dvsec = cxl_gpf_get_dvsec(dport_dev, true);
+ if (!dvsec)
return -EINVAL;
- }
port->gpf_dvsec = dvsec;
}
+ pdev = to_pci_dev(dport_dev);
update_gpf_port_dvsec(pdev, port->gpf_dvsec, 1);
update_gpf_port_dvsec(pdev, port->gpf_dvsec, 2);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 6baec4ba9141..29f2ab0d5bf6 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -901,4 +901,6 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
#define __mock static
#endif
+u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port);
+
#endif /* __CXL_H__ */
--
2.39.5
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec()
2025-02-20 1:36 ` [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec() Davidlohr Bueso
@ 2025-02-20 15:34 ` Dave Jiang
2025-02-20 16:08 ` Ira Weiny
` (2 subsequent siblings)
3 siblings, 0 replies; 23+ messages in thread
From: Dave Jiang @ 2025-02-20 15:34 UTC (permalink / raw)
To: Davidlohr Bueso, dan.j.williams
Cc: jonathan.cameron, alison.schofield, ira.weiny, vishal.l.verma,
seven.yi.lee, ming.li, a.manzanares, fan.ni, anisa.su, linux-cxl
On 2/19/25 6:36 PM, Davidlohr Bueso wrote:
> Add a helper to fetch the port/device GPF dvsecs. This is
> currently only used for ports, but a later patch to export
> dirty count to users will make use of the device one.
>
> Reviewed-by: Li Ming <ming.li@zohomail.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
> ---
> drivers/cxl/core/pci.c | 30 ++++++++++++++++++++----------
> drivers/cxl/cxl.h | 2 ++
> 2 files changed, 22 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
> index a5c65f79db18..96fecb799cbc 100644
> --- a/drivers/cxl/core/pci.c
> +++ b/drivers/cxl/core/pci.c
> @@ -1072,6 +1072,22 @@ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
> #define GPF_TIMEOUT_BASE_MAX 2
> #define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */
>
> +u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port)
> +{
> + u16 dvsec;
> +
> + if (!dev_is_pci(dev))
> + return 0;
> +
> + dvsec = pci_find_dvsec_capability(to_pci_dev(dev), PCI_VENDOR_ID_CXL,
> + is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF);
> + if (!dvsec)
> + dev_warn(dev, "%s GPF DVSEC not present\n",
> + is_port ? "Port" : "Device");
> + return dvsec;
> +}
> +EXPORT_SYMBOL_NS_GPL(cxl_gpf_get_dvsec, "CXL");
> +
> static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase)
> {
> u64 base, scale;
> @@ -1116,26 +1132,20 @@ int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port)
> {
> struct pci_dev *pdev;
>
> - if (!dev_is_pci(dport_dev))
> - return 0;
> -
> - pdev = to_pci_dev(dport_dev);
> - if (!pdev || !port)
> + if (!port)
> return -EINVAL;
>
> if (!port->gpf_dvsec) {
> int dvsec;
>
> - dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL,
> - CXL_DVSEC_PORT_GPF);
> - if (!dvsec) {
> - pci_warn(pdev, "Port GPF DVSEC not present\n");
> + dvsec = cxl_gpf_get_dvsec(dport_dev, true);
> + if (!dvsec)
> return -EINVAL;
> - }
>
> port->gpf_dvsec = dvsec;
> }
>
> + pdev = to_pci_dev(dport_dev);
> update_gpf_port_dvsec(pdev, port->gpf_dvsec, 1);
> update_gpf_port_dvsec(pdev, port->gpf_dvsec, 2);
>
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index 6baec4ba9141..29f2ab0d5bf6 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -901,4 +901,6 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
> #define __mock static
> #endif
>
> +u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port);
> +
> #endif /* __CXL_H__ */
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec()
2025-02-20 1:36 ` [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec() Davidlohr Bueso
2025-02-20 15:34 ` Dave Jiang
@ 2025-02-20 16:08 ` Ira Weiny
2025-02-20 17:04 ` Jonathan Cameron
2025-02-21 0:15 ` Fan Ni
3 siblings, 0 replies; 23+ messages in thread
From: Ira Weiny @ 2025-02-20 16:08 UTC (permalink / raw)
To: Davidlohr Bueso, dave.jiang, dan.j.williams
Cc: jonathan.cameron, alison.schofield, ira.weiny, vishal.l.verma,
seven.yi.lee, ming.li, a.manzanares, fan.ni, anisa.su, dave,
linux-cxl
Davidlohr Bueso wrote:
> Add a helper to fetch the port/device GPF dvsecs. This is
> currently only used for ports, but a later patch to export
> dirty count to users will make use of the device one.
>
> Reviewed-by: Li Ming <ming.li@zohomail.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
[snip]
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec()
2025-02-20 1:36 ` [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec() Davidlohr Bueso
2025-02-20 15:34 ` Dave Jiang
2025-02-20 16:08 ` Ira Weiny
@ 2025-02-20 17:04 ` Jonathan Cameron
2025-02-21 0:15 ` Fan Ni
3 siblings, 0 replies; 23+ messages in thread
From: Jonathan Cameron @ 2025-02-20 17:04 UTC (permalink / raw)
To: Davidlohr Bueso
Cc: dave.jiang, dan.j.williams, alison.schofield, ira.weiny,
vishal.l.verma, seven.yi.lee, ming.li, a.manzanares, fan.ni,
anisa.su, linux-cxl
On Wed, 19 Feb 2025 17:36:01 -0800
Davidlohr Bueso <dave@stgolabs.net> wrote:
> Add a helper to fetch the port/device GPF dvsecs. This is
> currently only used for ports, but a later patch to export
> dirty count to users will make use of the device one.
>
> Reviewed-by: Li Ming <ming.li@zohomail.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Maybe an enum for the port vs device thing would have been nice but
I'm not that bothered and a single use enum is bit nasty.
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec()
2025-02-20 1:36 ` [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec() Davidlohr Bueso
` (2 preceding siblings ...)
2025-02-20 17:04 ` Jonathan Cameron
@ 2025-02-21 0:15 ` Fan Ni
3 siblings, 0 replies; 23+ messages in thread
From: Fan Ni @ 2025-02-21 0:15 UTC (permalink / raw)
To: Davidlohr Bueso
Cc: dave.jiang, dan.j.williams, jonathan.cameron, alison.schofield,
ira.weiny, vishal.l.verma, seven.yi.lee, ming.li, a.manzanares,
anisa.su, linux-cxl
On Wed, Feb 19, 2025 at 05:36:01PM -0800, Davidlohr Bueso wrote:
> Add a helper to fetch the port/device GPF dvsecs. This is
> currently only used for ports, but a later patch to export
> dirty count to users will make use of the device one.
>
> Reviewed-by: Li Ming <ming.li@zohomail.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
> ---
Reviewed-by: Fan Ni <fan.ni@samsung.com>
> drivers/cxl/core/pci.c | 30 ++++++++++++++++++++----------
> drivers/cxl/cxl.h | 2 ++
> 2 files changed, 22 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
> index a5c65f79db18..96fecb799cbc 100644
> --- a/drivers/cxl/core/pci.c
> +++ b/drivers/cxl/core/pci.c
> @@ -1072,6 +1072,22 @@ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
> #define GPF_TIMEOUT_BASE_MAX 2
> #define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */
>
> +u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port)
> +{
> + u16 dvsec;
> +
> + if (!dev_is_pci(dev))
> + return 0;
> +
> + dvsec = pci_find_dvsec_capability(to_pci_dev(dev), PCI_VENDOR_ID_CXL,
> + is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF);
> + if (!dvsec)
> + dev_warn(dev, "%s GPF DVSEC not present\n",
> + is_port ? "Port" : "Device");
> + return dvsec;
> +}
> +EXPORT_SYMBOL_NS_GPL(cxl_gpf_get_dvsec, "CXL");
> +
> static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase)
> {
> u64 base, scale;
> @@ -1116,26 +1132,20 @@ int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port)
> {
> struct pci_dev *pdev;
>
> - if (!dev_is_pci(dport_dev))
> - return 0;
> -
> - pdev = to_pci_dev(dport_dev);
> - if (!pdev || !port)
> + if (!port)
> return -EINVAL;
>
> if (!port->gpf_dvsec) {
> int dvsec;
>
> - dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL,
> - CXL_DVSEC_PORT_GPF);
> - if (!dvsec) {
> - pci_warn(pdev, "Port GPF DVSEC not present\n");
> + dvsec = cxl_gpf_get_dvsec(dport_dev, true);
> + if (!dvsec)
> return -EINVAL;
> - }
>
> port->gpf_dvsec = dvsec;
> }
>
> + pdev = to_pci_dev(dport_dev);
> update_gpf_port_dvsec(pdev, port->gpf_dvsec, 1);
> update_gpf_port_dvsec(pdev, port->gpf_dvsec, 2);
>
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index 6baec4ba9141..29f2ab0d5bf6 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -901,4 +901,6 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
> #define __mock static
> #endif
>
> +u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port);
> +
> #endif /* __CXL_H__ */
> --
> 2.39.5
>
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH 2/4] cxl/pmem: Rename cxl_dirty_shutdown_state()
2025-02-20 1:36 [PATCH v4 0/4] cxl: Dirty shutdown followups Davidlohr Bueso
2025-02-20 1:36 ` [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec() Davidlohr Bueso
@ 2025-02-20 1:36 ` Davidlohr Bueso
2025-02-20 16:08 ` Ira Weiny
2025-02-20 17:12 ` Jonathan Cameron
2025-02-20 1:36 ` [PATCH 3/4] cxl/pmem: Export dirty shutdown count via sysfs Davidlohr Bueso
2025-02-20 1:36 ` [PATCH 4/4] tools/testing/cxl: Set Shutdown State support Davidlohr Bueso
3 siblings, 2 replies; 23+ messages in thread
From: Davidlohr Bueso @ 2025-02-20 1:36 UTC (permalink / raw)
To: dave.jiang, dan.j.williams
Cc: jonathan.cameron, alison.schofield, ira.weiny, vishal.l.verma,
seven.yi.lee, ming.li, a.manzanares, fan.ni, anisa.su, dave,
linux-cxl
... to a better suited 'cxl_arm_dirty_shutdown()'.
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Li Ming <ming.li@zohomail.com>
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
drivers/cxl/core/mbox.c | 4 ++--
drivers/cxl/cxlmem.h | 2 +-
drivers/cxl/pmem.c | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index c5eedcae3b02..86d13f4a1c18 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1281,7 +1281,7 @@ int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL");
-int cxl_dirty_shutdown_state(struct cxl_memdev_state *mds)
+int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds)
{
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_cmd mbox_cmd;
@@ -1297,7 +1297,7 @@ int cxl_dirty_shutdown_state(struct cxl_memdev_state *mds)
return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
-EXPORT_SYMBOL_NS_GPL(cxl_dirty_shutdown_state, "CXL");
+EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL");
int cxl_set_timestamp(struct cxl_memdev_state *mds)
{
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 8e1e46c348f5..6d60030139df 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -822,7 +822,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt);
-int cxl_dirty_shutdown_state(struct cxl_memdev_state *mds);
+int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index a39e2c52d7ab..6b284962592f 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -90,7 +90,7 @@ static int cxl_nvdimm_probe(struct device *dev)
* clear it upon a successful GPF flow. The exception to this
* is upon Viral detection, per CXL 3.2 section 12.4.2.
*/
- if (cxl_dirty_shutdown_state(mds))
+ if (cxl_arm_dirty_shutdown(mds))
dev_warn(dev, "GPF: could not dirty shutdown state\n");
dev_set_drvdata(dev, nvdimm);
--
2.39.5
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH 2/4] cxl/pmem: Rename cxl_dirty_shutdown_state()
2025-02-20 1:36 ` [PATCH 2/4] cxl/pmem: Rename cxl_dirty_shutdown_state() Davidlohr Bueso
@ 2025-02-20 16:08 ` Ira Weiny
2025-02-20 17:12 ` Jonathan Cameron
1 sibling, 0 replies; 23+ messages in thread
From: Ira Weiny @ 2025-02-20 16:08 UTC (permalink / raw)
To: Davidlohr Bueso, dave.jiang, dan.j.williams
Cc: jonathan.cameron, alison.schofield, ira.weiny, vishal.l.verma,
seven.yi.lee, ming.li, a.manzanares, fan.ni, anisa.su, dave,
linux-cxl
Davidlohr Bueso wrote:
> ... to a better suited 'cxl_arm_dirty_shutdown()'.
>
> Reviewed-by: Dave Jiang <dave.jiang@intel.com>
> Reviewed-by: Li Ming <ming.li@zohomail.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
[snip]
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 2/4] cxl/pmem: Rename cxl_dirty_shutdown_state()
2025-02-20 1:36 ` [PATCH 2/4] cxl/pmem: Rename cxl_dirty_shutdown_state() Davidlohr Bueso
2025-02-20 16:08 ` Ira Weiny
@ 2025-02-20 17:12 ` Jonathan Cameron
2025-02-20 19:51 ` Davidlohr Bueso
1 sibling, 1 reply; 23+ messages in thread
From: Jonathan Cameron @ 2025-02-20 17:12 UTC (permalink / raw)
To: Davidlohr Bueso
Cc: dave.jiang, dan.j.williams, alison.schofield, ira.weiny,
vishal.l.verma, seven.yi.lee, ming.li, a.manzanares, fan.ni,
anisa.su, linux-cxl
On Wed, 19 Feb 2025 17:36:02 -0800
Davidlohr Bueso <dave@stgolabs.net> wrote:
> ... to a better suited 'cxl_arm_dirty_shutdown()'.
But it works on x86 as well! I'll cope I suppose.
Mind you why not let it take the state as a parameter
and always pass 1? From a mailbox command point of
view 0 is valid. The text in table 8-152 has me a little confused
but I think it says you can set it to 1 and back to 0 and
then reset at which point the dirty shutdown count will
not increment.
No idea why you'd do that, but just passing a 1 into
this function would make a call of
cxl_set_dirty_shutdown_state(mds, 1)
seem reasonable, or pass a boolean.
>
> Reviewed-by: Dave Jiang <dave.jiang@intel.com>
> Reviewed-by: Li Ming <ming.li@zohomail.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
> ---
> drivers/cxl/core/mbox.c | 4 ++--
> drivers/cxl/cxlmem.h | 2 +-
> drivers/cxl/pmem.c | 2 +-
> 3 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
> index c5eedcae3b02..86d13f4a1c18 100644
> --- a/drivers/cxl/core/mbox.c
> +++ b/drivers/cxl/core/mbox.c
> @@ -1281,7 +1281,7 @@ int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
> }
> EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL");
>
> -int cxl_dirty_shutdown_state(struct cxl_memdev_state *mds)
> +int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds)
> {
> struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
> struct cxl_mbox_cmd mbox_cmd;
> @@ -1297,7 +1297,7 @@ int cxl_dirty_shutdown_state(struct cxl_memdev_state *mds)
>
> return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
> }
> -EXPORT_SYMBOL_NS_GPL(cxl_dirty_shutdown_state, "CXL");
> +EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL");
>
> int cxl_set_timestamp(struct cxl_memdev_state *mds)
> {
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH 2/4] cxl/pmem: Rename cxl_dirty_shutdown_state()
2025-02-20 17:12 ` Jonathan Cameron
@ 2025-02-20 19:51 ` Davidlohr Bueso
0 siblings, 0 replies; 23+ messages in thread
From: Davidlohr Bueso @ 2025-02-20 19:51 UTC (permalink / raw)
To: Jonathan Cameron
Cc: dave.jiang, dan.j.williams, alison.schofield, ira.weiny,
vishal.l.verma, seven.yi.lee, ming.li, a.manzanares, fan.ni,
anisa.su, linux-cxl
On Thu, 20 Feb 2025, Jonathan Cameron wrote:
>On Wed, 19 Feb 2025 17:36:02 -0800
>Davidlohr Bueso <dave@stgolabs.net> wrote:
>
>> ... to a better suited 'cxl_arm_dirty_shutdown()'.
>
>But it works on x86 as well! I'll cope I suppose.
>
>Mind you why not let it take the state as a parameter
>and always pass 1? From a mailbox command point of
>view 0 is valid. The text in table 8-152 has me a little confused
>but I think it says you can set it to 1 and back to 0 and
>then reset at which point the dirty shutdown count will
>not increment.
>
>No idea why you'd do that, but just passing a 1 into
>this function would make a call of
>cxl_set_dirty_shutdown_state(mds, 1)
>seem reasonable, or pass a boolean.
I prefer how it is now, really.
No point passing the arg which will always be 1, and that
"arm/setup" is in the name, so passing 0 would be counter
intuitive as well.
Thanks,
Davidlohr
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH 3/4] cxl/pmem: Export dirty shutdown count via sysfs
2025-02-20 1:36 [PATCH v4 0/4] cxl: Dirty shutdown followups Davidlohr Bueso
2025-02-20 1:36 ` [PATCH 1/4] cxl/pci: Introduce cxl_gpf_get_dvsec() Davidlohr Bueso
2025-02-20 1:36 ` [PATCH 2/4] cxl/pmem: Rename cxl_dirty_shutdown_state() Davidlohr Bueso
@ 2025-02-20 1:36 ` Davidlohr Bueso
2025-02-20 16:11 ` Ira Weiny
2025-02-20 17:29 ` Jonathan Cameron
2025-02-20 1:36 ` [PATCH 4/4] tools/testing/cxl: Set Shutdown State support Davidlohr Bueso
3 siblings, 2 replies; 23+ messages in thread
From: Davidlohr Bueso @ 2025-02-20 1:36 UTC (permalink / raw)
To: dave.jiang, dan.j.williams
Cc: jonathan.cameron, alison.schofield, ira.weiny, vishal.l.verma,
seven.yi.lee, ming.li, a.manzanares, fan.ni, anisa.su, dave,
linux-cxl
Similar to how the acpi_nfit driver exports Optane dirty shutdown count,
introduce:
/sys/bus/cxl/devices/nvdimm-bridge0/ndbusX/nmemY/cxl/dirty_shutdown
Under the conditions that 1) dirty shutdown can be set, 2) Device GPF
DVSEC exists, and 3) the count itself can be retrieved.
Suggested-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
Documentation/ABI/testing/sysfs-bus-cxl | 12 +++
Documentation/driver-api/cxl/maturity-map.rst | 2 +-
drivers/cxl/core/mbox.c | 21 +++++
drivers/cxl/cxl.h | 1 +
drivers/cxl/cxlmem.h | 13 +++
drivers/cxl/pmem.c | 79 ++++++++++++++++---
6 files changed, 118 insertions(+), 10 deletions(-)
diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl
index 3f5627a1210a..a7491d214098 100644
--- a/Documentation/ABI/testing/sysfs-bus-cxl
+++ b/Documentation/ABI/testing/sysfs-bus-cxl
@@ -586,3 +586,15 @@ Description:
See Documentation/ABI/stable/sysfs-devices-node. access0 provides
the number to the closest initiator and access1 provides the
number to the closest CPU.
+
+
+What: /sys/bus/cxl/devices/nvdimm-bridge0/ndbusX/nmemY/cxl/dirty_shutdown
+Date: Feb, 2025
+KernelVersion: v6.15
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) The device dirty shutdown count value, which is the number
+ of times the device could have incurred in potential data loss.
+ The count is persistent across power loss and wraps back to 0
+ upon overflow. If this file is not present, the device does not
+ have the necessary support for dirty tracking.
diff --git a/Documentation/driver-api/cxl/maturity-map.rst b/Documentation/driver-api/cxl/maturity-map.rst
index 99dd2c841e69..a2288f9df658 100644
--- a/Documentation/driver-api/cxl/maturity-map.rst
+++ b/Documentation/driver-api/cxl/maturity-map.rst
@@ -130,7 +130,7 @@ Mailbox commands
* [0] Switch CCI
* [3] Timestamp
* [1] PMEM labels
-* [1] PMEM GPF / Dirty Shutdown
+* [3] PMEM GPF / Dirty Shutdown
* [0] Scan Media
PMU
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 86d13f4a1c18..6bc398182a5d 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1281,6 +1281,27 @@ int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL");
+int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count)
+{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+ struct cxl_mbox_get_health_info_out hi;
+ struct cxl_mbox_cmd mbox_cmd;
+ int rc;
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_HEALTH_INFO,
+ .size_out = sizeof(hi),
+ .payload_out = &hi,
+ };
+
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (!rc)
+ *count = le32_to_cpu(hi.dirty_shutdown_cnt);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_get_dirty_count, "CXL");
+
int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds)
{
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 29f2ab0d5bf6..8bdfa536262e 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -542,6 +542,7 @@ struct cxl_nvdimm {
struct device dev;
struct cxl_memdev *cxlmd;
u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */
+ u64 dirty_shutdowns;
};
struct cxl_pmem_region_mapping {
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 6d60030139df..3b6ef9e936c3 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -681,6 +681,18 @@ struct cxl_mbox_set_partition_info {
#define CXL_SET_PARTITION_IMMEDIATE_FLAG BIT(0)
+/* Get Health Info Output Payload CXL 3.2 Spec 8.2.10.9.3.1 Table 8-148 */
+struct cxl_mbox_get_health_info_out {
+ u8 health_status;
+ u8 media_status;
+ u8 additional_status;
+ u8 life_used;
+ __le16 device_temperature;
+ __le32 dirty_shutdown_cnt;
+ __le32 corrected_volatile_error_cnt;
+ __le32 corrected_persistent_error_cnt;
+} __packed;
+
/* Set Shutdown State Input Payload CXL 3.2 Spec 8.2.10.9.3.5 Table 8-152 */
struct cxl_mbox_set_shutdown_state_in {
u8 state;
@@ -822,6 +834,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt);
+int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count);
int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 6b284962592f..cb039cfc62cb 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -38,19 +38,48 @@ static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_dev_state *cxlds = cxl_nvd->cxlmd->cxlds;
- return sysfs_emit(buf, "%lld\n", cxlds->serial);
+ return sysfs_emit(buf, "%llu\n", cxlds->serial);
}
static DEVICE_ATTR_RO(id);
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+
+ return sysfs_emit(buf, "%lld\n", cxl_nvd->dirty_shutdowns);
+}
+static DEVICE_ATTR_RO(dirty_shutdown);
+
static struct attribute *cxl_dimm_attributes[] = {
&dev_attr_id.attr,
&dev_attr_provider.attr,
+ &dev_attr_dirty_shutdown.attr,
NULL
};
+#define CXL_INVALID_DIRTY_SHUTDOWN_COUNT ULLONG_MAX
+static umode_t cxl_dimm_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ if (a == &dev_attr_dirty_shutdown.attr) {
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+
+ if (cxl_nvd->dirty_shutdowns ==
+ CXL_INVALID_DIRTY_SHUTDOWN_COUNT)
+ return 0;
+ }
+
+ return a->mode;
+}
+
static const struct attribute_group cxl_dimm_attribute_group = {
.name = "cxl",
.attrs = cxl_dimm_attributes,
+ .is_visible = cxl_dimm_visible
};
static const struct attribute_group *cxl_dimm_attribute_groups[] = {
@@ -58,6 +87,38 @@ static const struct attribute_group *cxl_dimm_attribute_groups[] = {
NULL
};
+static void cxl_nvdimm_setup_dirty_tracking(struct cxl_nvdimm *cxl_nvd)
+{
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct device *dev = &cxl_nvd->dev;
+ u32 count;
+
+ /*
+ * Dirty tracking is enabled and exposed to the user, only when:
+ * - dirty shutdown on the device can be set, and,
+ * - the device has a Device GPF DVSEC (albeit unused), and,
+ * - the Get Health Info cmd can retrieve the device's dirty count.
+ */
+ cxl_nvd->dirty_shutdowns = CXL_INVALID_DIRTY_SHUTDOWN_COUNT;
+
+ if (cxl_arm_dirty_shutdown(mds)) {
+ dev_warn(dev, "GPF: could not set dirty shutdown state\n");
+ return;
+ }
+
+ if (!cxl_gpf_get_dvsec(cxlds->dev, false))
+ return;
+
+ if (cxl_get_dirty_count(mds, &count)) {
+ dev_warn(dev, "GPF: could not retrieve dirty count\n");
+ return;
+ }
+
+ cxl_nvd->dirty_shutdowns = count;
+}
+
static int cxl_nvdimm_probe(struct device *dev)
{
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
@@ -78,20 +139,20 @@ static int cxl_nvdimm_probe(struct device *dev)
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
- nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd,
- cxl_dimm_attribute_groups, flags,
- cmd_mask, 0, NULL, cxl_nvd->dev_id,
- cxl_security_ops, NULL);
- if (!nvdimm)
- return -ENOMEM;
/*
* Set dirty shutdown now, with the expectation that the device
* clear it upon a successful GPF flow. The exception to this
* is upon Viral detection, per CXL 3.2 section 12.4.2.
*/
- if (cxl_arm_dirty_shutdown(mds))
- dev_warn(dev, "GPF: could not dirty shutdown state\n");
+ cxl_nvdimm_setup_dirty_tracking(cxl_nvd);
+
+ nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd,
+ cxl_dimm_attribute_groups, flags,
+ cmd_mask, 0, NULL, cxl_nvd->dev_id,
+ cxl_security_ops, NULL);
+ if (!nvdimm)
+ return -ENOMEM;
dev_set_drvdata(dev, nvdimm);
return devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
--
2.39.5
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH 3/4] cxl/pmem: Export dirty shutdown count via sysfs
2025-02-20 1:36 ` [PATCH 3/4] cxl/pmem: Export dirty shutdown count via sysfs Davidlohr Bueso
@ 2025-02-20 16:11 ` Ira Weiny
2025-02-20 17:29 ` Jonathan Cameron
1 sibling, 0 replies; 23+ messages in thread
From: Ira Weiny @ 2025-02-20 16:11 UTC (permalink / raw)
To: Davidlohr Bueso, dave.jiang, dan.j.williams
Cc: jonathan.cameron, alison.schofield, ira.weiny, vishal.l.verma,
seven.yi.lee, ming.li, a.manzanares, fan.ni, anisa.su, dave,
linux-cxl
Davidlohr Bueso wrote:
> Similar to how the acpi_nfit driver exports Optane dirty shutdown count,
> introduce:
>
> /sys/bus/cxl/devices/nvdimm-bridge0/ndbusX/nmemY/cxl/dirty_shutdown
>
> Under the conditions that 1) dirty shutdown can be set, 2) Device GPF
> DVSEC exists, and 3) the count itself can be retrieved.
>
> Suggested-by: Dan Williams <dan.j.williams@intel.com>
> Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
[snip]
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 3/4] cxl/pmem: Export dirty shutdown count via sysfs
2025-02-20 1:36 ` [PATCH 3/4] cxl/pmem: Export dirty shutdown count via sysfs Davidlohr Bueso
2025-02-20 16:11 ` Ira Weiny
@ 2025-02-20 17:29 ` Jonathan Cameron
2025-02-20 19:28 ` Davidlohr Bueso
1 sibling, 1 reply; 23+ messages in thread
From: Jonathan Cameron @ 2025-02-20 17:29 UTC (permalink / raw)
To: Davidlohr Bueso
Cc: dave.jiang, dan.j.williams, alison.schofield, ira.weiny,
vishal.l.verma, seven.yi.lee, ming.li, a.manzanares, fan.ni,
anisa.su, linux-cxl
On Wed, 19 Feb 2025 17:36:03 -0800
Davidlohr Bueso <dave@stgolabs.net> wrote:
> Similar to how the acpi_nfit driver exports Optane dirty shutdown count,
> introduce:
>
> /sys/bus/cxl/devices/nvdimm-bridge0/ndbusX/nmemY/cxl/dirty_shutdown
>
> Under the conditions that 1) dirty shutdown can be set, 2) Device GPF
> DVSEC exists, and 3) the count itself can be retrieved.
>
> Suggested-by: Dan Williams <dan.j.williams@intel.com>
> Reviewed-by: Dave Jiang <dave.jiang@intel.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
One trivial thing otherwise
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
> index 6b284962592f..cb039cfc62cb 100644
> --- a/drivers/cxl/pmem.c
> +++ b/drivers/cxl/pmem.c
> @@ -38,19 +38,48 @@ static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *
> struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
> struct cxl_dev_state *cxlds = cxl_nvd->cxlmd->cxlds;
>
> - return sysfs_emit(buf, "%lld\n", cxlds->serial);
> + return sysfs_emit(buf, "%llu\n", cxlds->serial);
I guess you 'fixed' the wrong one?
> }
> static DEVICE_ATTR_RO(id);
>
> +static ssize_t dirty_shutdown_show(struct device *dev,
> + struct device_attribute *attr, char *buf)
> +{
> + struct nvdimm *nvdimm = to_nvdimm(dev);
> + struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
> +
> + return sysfs_emit(buf, "%lld\n", cxl_nvd->dirty_shutdowns);
It's unsigned so %llu though I hope no one ever tests that by
doing that many dirty shutdowns.
> +}
> +static DEVICE_ATTR_RO(dirty_shutdown);
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH 3/4] cxl/pmem: Export dirty shutdown count via sysfs
2025-02-20 17:29 ` Jonathan Cameron
@ 2025-02-20 19:28 ` Davidlohr Bueso
0 siblings, 0 replies; 23+ messages in thread
From: Davidlohr Bueso @ 2025-02-20 19:28 UTC (permalink / raw)
To: Jonathan Cameron
Cc: dave.jiang, dan.j.williams, alison.schofield, ira.weiny,
vishal.l.verma, seven.yi.lee, ming.li, a.manzanares, fan.ni,
anisa.su, linux-cxl
On Thu, 20 Feb 2025, Jonathan Cameron wrote:
>On Wed, 19 Feb 2025 17:36:03 -0800
>Davidlohr Bueso <dave@stgolabs.net> wrote:
>
>> Similar to how the acpi_nfit driver exports Optane dirty shutdown count,
>> introduce:
>>
>> /sys/bus/cxl/devices/nvdimm-bridge0/ndbusX/nmemY/cxl/dirty_shutdown
>>
>> Under the conditions that 1) dirty shutdown can be set, 2) Device GPF
>> DVSEC exists, and 3) the count itself can be retrieved.
>>
>> Suggested-by: Dan Williams <dan.j.williams@intel.com>
>> Reviewed-by: Dave Jiang <dave.jiang@intel.com>
>> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
>One trivial thing otherwise
>Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
>
>
>> diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
>> index 6b284962592f..cb039cfc62cb 100644
>> --- a/drivers/cxl/pmem.c
>> +++ b/drivers/cxl/pmem.c
>> @@ -38,19 +38,48 @@ static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *
>> struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
>> struct cxl_dev_state *cxlds = cxl_nvd->cxlmd->cxlds;
>>
>> - return sysfs_emit(buf, "%lld\n", cxlds->serial);
>> + return sysfs_emit(buf, "%llu\n", cxlds->serial);
>
>I guess you 'fixed' the wrong one?
Yes, I'm an idiot. Will send a hopefully final v5 with this fix.
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH 4/4] tools/testing/cxl: Set Shutdown State support
2025-02-20 1:36 [PATCH v4 0/4] cxl: Dirty shutdown followups Davidlohr Bueso
` (2 preceding siblings ...)
2025-02-20 1:36 ` [PATCH 3/4] cxl/pmem: Export dirty shutdown count via sysfs Davidlohr Bueso
@ 2025-02-20 1:36 ` Davidlohr Bueso
2025-02-20 16:13 ` Ira Weiny
2025-02-20 17:30 ` Jonathan Cameron
3 siblings, 2 replies; 23+ messages in thread
From: Davidlohr Bueso @ 2025-02-20 1:36 UTC (permalink / raw)
To: dave.jiang, dan.j.williams
Cc: jonathan.cameron, alison.schofield, ira.weiny, vishal.l.verma,
seven.yi.lee, ming.li, a.manzanares, fan.ni, anisa.su, dave,
linux-cxl
Add support to emulate the CXL Set Shutdown State operation.
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Li Ming <ming.li@zohomail.com>
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
tools/testing/cxl/test/mem.c | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 495199238335..9ca210b80e27 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -65,6 +65,10 @@ static struct cxl_cel_entry mock_cel[] = {
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
.effect = CXL_CMD_EFFECT_NONE,
},
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_SET_SHUTDOWN_STATE),
+ .effect = POLICY_CHANGE_IMMEDIATE,
+ },
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
.effect = CXL_CMD_EFFECT_NONE,
@@ -161,6 +165,7 @@ struct cxl_mockmem_data {
u8 event_buf[SZ_4K];
u64 timestamp;
unsigned long sanitize_timeout;
+ u8 shutdown_state;
};
static struct mock_event_log *event_find_log(struct device *dev, int log_type)
@@ -1088,6 +1093,21 @@ static int mock_health_info(struct cxl_mbox_cmd *cmd)
return 0;
}
+static int mock_set_shutdown_state(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_set_shutdown_state_in *ss = cmd->payload_in;
+
+ if (cmd->size_in != sizeof(*ss))
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ mdata->shutdown_state = ss->state;
+ return 0;
+}
+
static struct mock_poison {
struct cxl_dev_state *cxlds;
u64 dpa;
@@ -1421,6 +1441,9 @@ static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
rc = mock_passphrase_secure_erase(mdata, cmd);
break;
+ case CXL_MBOX_OP_SET_SHUTDOWN_STATE:
+ rc = mock_set_shutdown_state(mdata, cmd);
+ break;
case CXL_MBOX_OP_GET_POISON:
rc = mock_get_poison(cxlds, cmd);
break;
--
2.39.5
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH 4/4] tools/testing/cxl: Set Shutdown State support
2025-02-20 1:36 ` [PATCH 4/4] tools/testing/cxl: Set Shutdown State support Davidlohr Bueso
@ 2025-02-20 16:13 ` Ira Weiny
2025-02-20 17:30 ` Jonathan Cameron
1 sibling, 0 replies; 23+ messages in thread
From: Ira Weiny @ 2025-02-20 16:13 UTC (permalink / raw)
To: Davidlohr Bueso, dave.jiang, dan.j.williams
Cc: jonathan.cameron, alison.schofield, ira.weiny, vishal.l.verma,
seven.yi.lee, ming.li, a.manzanares, fan.ni, anisa.su, dave,
linux-cxl
Davidlohr Bueso wrote:
> Add support to emulate the CXL Set Shutdown State operation.
>
> Reviewed-by: Dave Jiang <dave.jiang@intel.com>
> Reviewed-by: Li Ming <ming.li@zohomail.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
[snip]
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 4/4] tools/testing/cxl: Set Shutdown State support
2025-02-20 1:36 ` [PATCH 4/4] tools/testing/cxl: Set Shutdown State support Davidlohr Bueso
2025-02-20 16:13 ` Ira Weiny
@ 2025-02-20 17:30 ` Jonathan Cameron
1 sibling, 0 replies; 23+ messages in thread
From: Jonathan Cameron @ 2025-02-20 17:30 UTC (permalink / raw)
To: Davidlohr Bueso
Cc: dave.jiang, dan.j.williams, alison.schofield, ira.weiny,
vishal.l.verma, seven.yi.lee, ming.li, a.manzanares, fan.ni,
anisa.su, linux-cxl
On Wed, 19 Feb 2025 17:36:04 -0800
Davidlohr Bueso <dave@stgolabs.net> wrote:
> Add support to emulate the CXL Set Shutdown State operation.
>
> Reviewed-by: Dave Jiang <dave.jiang@intel.com>
> Reviewed-by: Li Ming <ming.li@zohomail.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
FWIW looks fine to me as well.
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
^ permalink raw reply [flat|nested] 23+ messages in thread