From: Ben Cheatham <Benjamin.Cheatham@amd.com>
To: <linux-cxl@vger.kernel.org>
Cc: <benjamin.cheatham@amd.com>
Subject: [PATCH 13/17] cxl/core: Add cache id verification
Date: Tue, 11 Nov 2025 15:40:28 -0600 [thread overview]
Message-ID: <20251111214032.8188-14-Benjamin.Cheatham@amd.com> (raw)
In-Reply-To: <20251111214032.8188-1-Benjamin.Cheatham@amd.com>
The CXL cache id capability (CXL 3.2 8.2.4.28/29) is an optional
capability that allows for multiple CXL.cache devices to be enabled
under a single virtual hierarchy (VH). The cache id capability is
required for having multiple CXL.cache devices in the same VH.
It's possible for the platform to enable and set up the cache id for a
CXL.cache device. Add code to cxl_cache to check whether the device's
cache id is programmed and correct. If it is programmed, allocate the
cache id to prevent reuse.
Checking the correctness of the id requires knowing if the endpoint
device is a type 2 device using HDM-D flows. Add a requirement for type
2 driver to specify if the device is using HDM-D flows before calling
devm_cxl_add_cachedev().
Programming of cache ids will come in a later commit.
Signed-off-by: Ben Cheatham <Benjamin.Cheatham@amd.com>
---
drivers/cxl/core/cachedev.c | 20 ++++
drivers/cxl/core/pci.c | 4 +-
drivers/cxl/core/port.c | 229 ++++++++++++++++++++++++++++++++++++
drivers/cxl/core/regs.c | 20 ++++
drivers/cxl/cxl.h | 44 +++++++
drivers/cxl/cxlcache.h | 3 +-
drivers/cxl/port.c | 28 ++++-
7 files changed, 344 insertions(+), 4 deletions(-)
diff --git a/drivers/cxl/core/cachedev.c b/drivers/cxl/core/cachedev.c
index 5693a63baa9b..0b7430450b4e 100644
--- a/drivers/cxl/core/cachedev.c
+++ b/drivers/cxl/core/cachedev.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2025 Advanced Micro Devices, Inc. */
#include <linux/device.h>
#include <linux/pci.h>
+#include "cxlpci.h"
#include "../cxlcache.h"
#include "private.h"
@@ -93,3 +94,22 @@ struct cxl_cachedev *devm_cxl_cachedev_add_or_reset(struct device *host,
return cxlcd;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_cachedev_add_or_reset, "CXL");
+
+bool cxl_cachedev_is_type2(struct cxl_cachedev *cxlcd)
+{
+ struct cxl_dev_state *cxlds = cxlcd->cxlds;
+ int dvsec = cxlds->cxl_dvsec;
+ u32 cap;
+ int rc;
+
+ if (!dev_is_pci(cxlds->dev))
+ return false;
+
+ rc = pci_read_config_dword(to_pci_dev(cxlds->dev),
+ dvsec + CXL_DVSEC_CAP_OFFSET, &cap);
+ if (rc)
+ return rc;
+
+ return (cap & CXL_DVSEC_MEM_CAPABLE) && (cap & CXL_DVSEC_CACHE_CAPABLE);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cachedev_is_type2, "CXL");
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 5b1cace8fc0f..27c74e90ade5 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -1264,11 +1264,12 @@ EXPORT_SYMBOL_NS_GPL(cxl_port_get_possible_dports, "CXL");
/**
* cxl_accel_read_cache_info - Get the CXL cache information of a CXL cache device
* @cxlds: CXL device state associated with cache device
+ * @hdmd: Whether the device uses HDM-D flows
*
* Returns 0 and populates the struct cxl_cache_state member of @cxlds on
* success, error otherwise.
*/
-int cxl_accel_read_cache_info(struct cxl_dev_state *cxlds)
+int cxl_accel_read_cache_info(struct cxl_dev_state *cxlds, bool hdmd)
{
struct cxl_cache_state *cstate = &cxlds->cstate;
struct pci_dev *pdev;
@@ -1308,6 +1309,7 @@ int cxl_accel_read_cache_info(struct cxl_dev_state *cxlds)
if (!cstate->size)
return -ENXIO;
+ cxlds->hdmd = hdmd;
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_accel_read_cache_info, "CXL");
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 70666a059d1f..1504631ae620 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -750,6 +750,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
dev->parent = uport_dev;
ida_init(&port->decoder_ida);
+ ida_init(&port->cache_ida);
port->hdm_end = -1;
port->commit_end = -1;
xa_init(&port->dports);
@@ -2414,6 +2415,234 @@ int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, "CXL");
+static bool cache_decoder_committed(struct cxl_dport *dport)
+{
+ u32 cap, stat;
+
+ cap = readl(dport->regs.cidd + CXL_CACHE_IDD_CAP_OFFSET);
+ if (!(cap & CXL_CACHE_IDD_CAP_COMMIT_REQUIRED))
+ return true;
+
+ stat = readl(dport->regs.cidd + CXL_CACHE_IDD_STAT_OFFSET);
+ return (stat & CXL_CACHE_IDD_STAT_COMMITTED);
+}
+
+static bool cache_decoder_valid(struct cxl_dport *dport, int id, bool endpoint)
+{
+ struct pci_dev *pdev = to_pci_dev(dport->dport_dev);
+ bool flit_256b = cxl_pci_flit_256(pdev);
+ u32 ctrl;
+
+ if (id && !flit_256b)
+ return false;
+
+ ctrl = readl(dport->regs.cidd + CXL_CACHE_IDD_CTRL_OFFSET);
+ if ((endpoint || !flit_256b) &&
+ !(ctrl & CXL_CACHE_IDD_CTRL_ASGN_ID))
+ return false;
+ else if (!(ctrl & CXL_CACHE_IDD_CTRL_FWD_ID))
+ return false;
+
+ return true;
+}
+
+static int cxl_dport_get_cache_id(struct cxl_dport *dport,
+ struct cxl_port *endpoint)
+{
+ u32 ctrl;
+ int id;
+
+ if (!is_cxl_endpoint(endpoint))
+ return CXL_CACHE_ID_NO_ID;
+
+ ctrl = readl(dport->regs.cidd + CXL_CACHE_IDD_CTRL_OFFSET);
+ if (ctrl & CXL_CACHE_IDD_CTRL_TYPE2)
+ id = FIELD_GET(CXL_CACHE_IDD_CTRL_TYPE2_ID_MASK, ctrl);
+ else
+ id = FIELD_GET(CXL_CACHE_IDD_CTRL_LOCAL_ID_MASK, ctrl);
+
+ if (!cache_decoder_valid(dport, is_cxl_endpoint(endpoint), id) ||
+ !cache_decoder_committed(dport))
+ return CXL_CACHE_ID_NO_ID;
+
+ return id;
+}
+
+static bool cache_idrt_committed(struct cxl_port *port)
+{
+ u32 cap, stat;
+
+ cap = readl(port->regs.cidrt + CXL_CACHE_IDRT_CAP_OFFSET);
+ if (!(cap & CXL_CACHE_IDRT_CAP_COMMIT_REQUIRED))
+ return true;
+
+ stat = readl(port->regs.cidrt + CXL_CACHE_IDRT_STAT_OFFSET);
+ return (stat & CXL_CACHE_IDRT_STAT_COMMITTED);
+}
+
+static bool cache_idrt_entry_valid(struct cxl_port *port, int id)
+{
+ u16 target;
+ u32 cap;
+
+ cap = readl(port->regs.cidrt + CXL_CACHE_IDRT_CAP_OFFSET);
+ if (FIELD_GET(CXL_CACHE_IDRT_CAP_CNT_MASK, cap) <= id)
+ return false;
+
+ target = readw(port->regs.cidrt + CXL_CACHE_IDRT_TARGETN_OFFSET(id));
+ return (target & CXL_CACHE_IDRT_TARGETN_VALID);
+}
+
+int cxl_endpoint_map_cache_id_regs(struct cxl_port *port)
+{
+ struct cxl_dport *parent_dport = port->parent_dport;
+ int rc;
+
+ if (!is_cxl_cachedev(port->uport_dev))
+ return -EINVAL;
+
+ port = parent_port_of(port);
+ while (port) {
+ if (!port->reg_map.component_map.cidrt.valid)
+ return -ENXIO;
+
+ scoped_guard(device, &port->dev) {
+ if (!port->regs.cidrt) {
+ rc = cxl_map_component_regs(
+ &port->reg_map, &port->regs,
+ BIT(CXL_CM_CAP_CAP_ID_CIDRT));
+ if (rc)
+ return rc;
+ }
+ }
+
+ /*
+ * Parent dports of host bridges are cxl root (ACPI0017) dports
+ * and don't have cache id decoders.
+ */
+ if (is_cxl_root(parent_dport->port))
+ break;
+
+ if (!parent_dport->reg_map.component_map.cidd.valid)
+ return -ENXIO;
+
+ scoped_guard(device, &parent_dport->port->dev) {
+ if (!parent_dport->regs.cidd) {
+ rc = cxl_map_component_regs(
+ &parent_dport->reg_map,
+ &parent_dport->regs.component,
+ BIT(CXL_CM_CAP_CAP_ID_CIDD));
+ if (rc)
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_map_cache_id_regs, "CXL");
+
+/**
+ * cxl_endpoint_allocate_cache_id - Allocate a cache id @id on the endpoint's
+ * host bridge.
+ * @endpoint: Endpoint port representing a CXL.cache device
+ * @id: Cache id to attempt to allocate
+ *
+ * Returns rc < 0 if id allocation fails. Returns allocated id otherwise.
+ */
+int cxl_endpoint_allocate_cache_id(struct cxl_port *endpoint, int id)
+{
+ struct cxl_cachedev *cxlcd;
+ struct cxl_port *hb;
+ int nr_hdmd;
+ u32 cap;
+
+ if (!is_cxl_cachedev(endpoint->uport_dev) || id < 0)
+ return -EINVAL;
+ cxlcd = to_cxl_cachedev(endpoint->uport_dev);
+
+ hb = parent_port_of(endpoint);
+ while (!is_cxl_host_bridge(&hb->dev))
+ hb = parent_port_of(hb);
+
+ if (cxl_cachedev_is_type2(cxlcd) && cxlcd->cxlds->hdmd) {
+ cap = readl(hb->regs.cidrt + CXL_CACHE_IDRT_CAP_OFFSET);
+ nr_hdmd = FIELD_GET(CXL_CACHE_IDRT_CAP_TYPE2_CNT_MASK, cap);
+
+ guard(device)(&hb->dev);
+ if (hb->nr_hdmd + 1 >= nr_hdmd)
+ return -EINVAL;
+
+ hb->nr_hdmd++;
+ }
+
+ return ida_alloc_range(&hb->cache_ida, id, id, GFP_KERNEL);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_allocate_cache_id, "CXL");
+
+void cxl_endpoint_free_cache_id(struct cxl_port *endpoint, int id)
+{
+ struct cxl_cachedev *cxlcd;
+ struct cxl_port *hb;
+
+ if (!is_cxl_cachedev(endpoint->uport_dev))
+ return;
+ cxlcd = to_cxl_cachedev(endpoint->uport_dev);
+
+ hb = endpoint;
+ while (!is_cxl_host_bridge(&hb->dev))
+ hb = parent_port_of(hb);
+
+ if (cxl_cachedev_is_type2(cxlcd) && cxlcd->cxlds->hdmd) {
+ guard(device)(&hb->dev);
+ hb->nr_hdmd--;
+ }
+
+ ida_free(&hb->cache_ida, id);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_free_cache_id, "CXL");
+
+/**
+ * cxl_endpoint_get_cache_id - Get the cache id of a CXL.cache endpoint device
+ * @endpoint: Endpoint port representing cache device
+ * @cid: Pointer to store resulting cache id in
+ *
+ * Returns 0 and sets @cid to CXL_CACHE_ID_NO_ID if programmed cache id is
+ * invalid.
+ */
+int cxl_endpoint_get_cache_id(struct cxl_port *endpoint, int *cid)
+{
+ struct cxl_dport *dport = endpoint->parent_dport;
+ struct cxl_port *port = parent_port_of(endpoint);
+ bool ep = true;
+
+ if (!cid)
+ return -EINVAL;
+
+ *cid = cxl_dport_get_cache_id(dport, port);
+
+ while (!is_cxl_root(port)) {
+ if (!cache_idrt_entry_valid(port, *cid) ||
+ !cache_idrt_committed(port)) {
+ *cid = CXL_CACHE_ID_NO_ID;
+ break;
+ }
+
+ if (!cache_decoder_valid(dport, ep, *cid) ||
+ !cache_decoder_committed(dport)) {
+ *cid = CXL_CACHE_ID_NO_ID;
+ break;
+ }
+
+ dport = port->parent_dport;
+ port = dport->port;
+ ep = false;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_cache_id, "CXL");
+
/**
* __cxl_driver_register - register a driver for the cxl bus
* @cxl_drv: cxl driver structure to attach
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 280978b34d2f..545a1b9e026c 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -98,6 +98,24 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
length = CXL_SNOOP_CAPABILITY_LENGTH;
rmap = &map->snoop;
break;
+ case CXL_CM_CAP_CAP_ID_CIDRT: {
+ int entry_cnt;
+
+ dev_dbg(dev,
+ "found Cache ID Route Table capability (0x%x)\n",
+ offset);
+ entry_cnt = FIELD_GET(CXL_CACHE_IDRT_CAP_CNT_MASK, hdr);
+ length = 2 * entry_cnt + 0x10;
+ rmap = &map->cidrt;
+ break;
+ }
+ case CXL_CM_CAP_CAP_ID_CIDD:
+ dev_dbg(dev,
+ "found Cache ID Decoder capability (0x%x\n",
+ offset);
+ length = CXL_CACHE_IDD_CAPABILITY_LENGTH;
+ rmap = &map->cidd;
+ break;
default:
dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
offset);
@@ -218,6 +236,8 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
{ &map->component_map.hdm_decoder, ®s->hdm_decoder },
{ &map->component_map.ras, ®s->ras },
{ &map->component_map.snoop, ®s->snoop },
+ { &map->component_map.cidrt, ®s->cidrt },
+ { &map->component_map.cidd, ®s->cidd },
};
int i;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index de29ffc3d74f..f4dc912d67ed 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -42,6 +42,8 @@ extern const struct nvdimm_security_ops *cxl_security_ops;
#define CXL_CM_CAP_CAP_ID_RAS 0x2
#define CXL_CM_CAP_CAP_ID_HDM 0x5
#define CXL_CM_CAP_CAP_ID_SNOOP 0x8
+#define CXL_CM_CAP_CAP_ID_CIDRT 0xD
+#define CXL_CM_CAP_CAP_ID_CIDD 0xE
#define CXL_CM_CAP_CAP_HDM_VERSION 1
/* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */
@@ -159,6 +161,30 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
#define CXL_SNOOP_FILTER_SIZE_OFFSET 0x4
#define CXL_SNOOP_CAPABILITY_LENGTH 0x8
+/* CXL 3.2 8.2.4.28 CXL Cache ID Route Table Capability Structure */
+#define CXL_CACHE_IDRT_CAP_OFFSET 0x0
+#define CXL_CACHE_IDRT_CAP_CNT_MASK GENMASK(4, 0)
+#define CXL_CACHE_IDRT_CAP_TYPE2_CNT_MASK GENMASK(11, 8)
+#define CXL_CACHE_IDRT_CAP_COMMIT_REQUIRED BIT(16)
+#define CXL_CACHE_IDRT_STAT_OFFSET 0x8
+#define CXL_CACHE_IDRT_STAT_COMMITTED BIT(0)
+#define CXL_CACHE_IDRT_TARGETN_OFFSET(n) (0x10 + (2 * (n)))
+#define CXL_CACHE_IDRT_TARGETN_VALID BIT(0)
+#define CXL_CACHE_IDRT_TARGETN_PORTN GENMASK(15, 8)
+
+/* CXL 3.2 8.2.4.29 CXL Cache ID Decoder Capability Structure */
+#define CXL_CACHE_IDD_CAP_OFFSET 0x0
+#define CXL_CACHE_IDD_CAP_COMMIT_REQUIRED BIT(0)
+#define CXL_CACHE_IDD_CTRL_OFFSET 0x4
+#define CXL_CACHE_IDD_CTRL_FWD_ID BIT(0)
+#define CXL_CACHE_IDD_CTRL_ASGN_ID BIT(1)
+#define CXL_CACHE_IDD_CTRL_TYPE2 BIT(2)
+#define CXL_CACHE_IDD_CTRL_TYPE2_ID_MASK GENMASK(11, 8)
+#define CXL_CACHE_IDD_CTRL_LOCAL_ID_MASK GENMASK(19, 16)
+#define CXL_CACHE_IDD_STAT_OFFSET 0x8
+#define CXL_CACHE_IDD_STAT_COMMITTED BIT(0)
+#define CXL_CACHE_IDD_CAPABILITY_LENGTH 0xC
+
/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
#define CXLDEV_CAP_ARRAY_OFFSET 0x0
#define CXLDEV_CAP_ARRAY_CAP_ID 0
@@ -223,6 +249,8 @@ struct cxl_regs {
void __iomem *hdm_decoder;
void __iomem *ras;
void __iomem *snoop;
+ void __iomem *cidrt;
+ void __iomem *cidd;
);
/*
* Common set of CXL Device register block base pointers
@@ -266,6 +294,8 @@ struct cxl_component_reg_map {
struct cxl_reg_map hdm_decoder;
struct cxl_reg_map ras;
struct cxl_reg_map snoop;
+ struct cxl_reg_map cidrt;
+ struct cxl_reg_map cidd;
};
struct cxl_device_reg_map {
@@ -609,7 +639,9 @@ struct cxl_dax_region {
* @parent_dport: dport that points to this port in the parent
* @decoder_ida: allocator for decoder ids
* @reg_map: component and ras register mapping parameters
+ * @regs: component register mappings
* @nr_dports: number of entries in @dports
+ * @nr_hdmd: number of type 2 devices using hdm-d flows below this port
* @hdm_end: track last allocated HDM decoder instance for allocation ordering
* @commit_end: cursor to track highest committed decoder for commit ordering
* @dead: last ep has been removed, force port re-creation
@@ -618,6 +650,7 @@ struct cxl_dax_region {
* @cdat_available: Should a CDAT attribute be available in sysfs
* @pci_latency: Upstream latency in picoseconds
* @component_reg_phys: Physical address of component register
+ * @cache_ida: cache id allocator
*/
struct cxl_port {
struct device dev;
@@ -630,7 +663,9 @@ struct cxl_port {
struct cxl_dport *parent_dport;
struct ida decoder_ida;
struct cxl_register_map reg_map;
+ struct cxl_component_regs regs;
int nr_dports;
+ int nr_hdmd;
int hdm_end;
int commit_end;
bool dead;
@@ -642,6 +677,7 @@ struct cxl_port {
bool cdat_available;
long pci_latency;
resource_size_t component_reg_phys;
+ struct ida cache_ida;
};
/**
@@ -769,6 +805,7 @@ struct cxl_dpa_info {
int nr_partitions;
};
+#define CXL_CACHE_ID_NO_ID (-1)
#define CXL_SNOOP_ID_NO_ID (-1)
/**
@@ -780,6 +817,7 @@ struct cxl_cache_state {
u64 size;
u32 unit;
int snoop_id;
+ int cache_id;
};
/**
@@ -797,6 +835,7 @@ struct cxl_cache_state {
* @cxl_dvsec: Offset to the PCIe device DVSEC
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
* @media_ready: Indicate whether the device media is usable
+ * @hdmd: Whether this device is using HDM-D flows
* @dpa_res: Overall DPA resource tree for the device
* @part: DPA partition array
* @nr_partitions: Number of DPA partitions
@@ -815,6 +854,7 @@ struct cxl_dev_state {
int cxl_dvsec;
bool rcd;
bool media_ready;
+ bool hdmd;
struct resource dpa_res;
struct cxl_dpa_partition part[CXL_NR_PARTITIONS_MAX];
unsigned int nr_partitions;
@@ -946,6 +986,10 @@ static inline int cxl_root_decoder_autoremove(struct device *host,
return cxl_decoder_autoremove(host, &cxlrd->cxlsd.cxld);
}
int cxl_endpoint_autoremove(struct device *ep_dev, struct cxl_port *endpoint);
+int cxl_endpoint_map_cache_id_regs(struct cxl_port *endpoint);
+int cxl_endpoint_get_cache_id(struct cxl_port *endpoint, int *cid);
+int cxl_endpoint_allocate_cache_id(struct cxl_port *endpoint, int id);
+void cxl_endpoint_free_cache_id(struct cxl_port *endpoint, int id);
/**
* struct cxl_endpoint_dvsec_info - Cached DVSEC info
diff --git a/drivers/cxl/cxlcache.h b/drivers/cxl/cxlcache.h
index 6409e25dd1b4..fe9e44fda641 100644
--- a/drivers/cxl/cxlcache.h
+++ b/drivers/cxl/cxlcache.h
@@ -28,7 +28,8 @@ static inline struct cxl_cachedev *to_cxl_cachedev(struct device *dev)
bool is_cxl_cachedev(const struct device *dev);
-int cxl_accel_read_cache_info(struct cxl_dev_state *cxlds);
+int cxl_accel_read_cache_info(struct cxl_dev_state *cxlds, bool hdmd);
struct cxl_cachedev *devm_cxl_add_cachedev(struct device *host,
struct cxl_dev_state *cxlds);
+bool cxl_cachedev_is_type2(struct cxl_cachedev *cxlcd);
#endif
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index ae38b9965a84..c1d1d28bee5c 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -7,6 +7,7 @@
#include "cxlmem.h"
#include "cxlpci.h"
#include "private.h"
+#include "cxlcache.h"
/**
* DOC: cxl port
@@ -95,10 +96,33 @@ static int cxl_mem_endpoint_port_probe(struct cxl_port *port)
return 0;
}
+static void free_cache_id(void *data)
+{
+ struct cxl_cachedev *cxlcd = data;
+ int id = cxlcd->cxlds->cstate.cache_id;
+
+ cxl_endpoint_free_cache_id(cxlcd->endpoint, id);
+}
+
static int cxl_cache_endpoint_port_probe(struct cxl_port *port)
{
- /* No further set up required for CXL.cache devices */
- return 0;
+ struct cxl_cachedev *cxlcd = to_cxl_cachedev(port->uport_dev);
+ int rc, id;
+
+ rc = cxl_endpoint_map_cache_id_regs(port);
+ if (rc)
+ return rc;
+
+ rc = cxl_endpoint_get_cache_id(port, &id);
+ if (rc)
+ return rc;
+
+ rc = cxl_endpoint_allocate_cache_id(port, id);
+ if (rc < 0)
+ return rc;
+
+ cxlcd->cxlds->cstate.cache_id = id;
+ return devm_add_action_or_reset(&cxlcd->dev, free_cache_id, cxlcd);
}
static int cxl_endpoint_port_probe(struct cxl_port *port)
--
2.51.1
next prev parent reply other threads:[~2025-11-11 21:43 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-11 21:40 [RFC v2 PATCH 00/17] Initial CXL.cache device support Ben Cheatham
2025-11-11 21:40 ` [PATCH 01/17] cxl/port: Arrange for always synchronous endpoint attach Ben Cheatham
2025-11-17 15:56 ` Jonathan Cameron
2025-11-11 21:40 ` [PATCH 02/17] cxl: Move struct cxl_dev_state definition Ben Cheatham
2025-11-11 21:40 ` [PATCH 03/17] cxl/core: Add function for getting CXL cache info Ben Cheatham
2025-12-17 16:09 ` Jonathan Cameron
2025-12-17 18:01 ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 04/17] cxl/core: Add CXL.cache device struct Ben Cheatham
2025-12-17 16:14 ` Jonathan Cameron
2025-11-11 21:40 ` [PATCH 05/17] cxl/cache: Add cxl_cache driver Ben Cheatham
2025-12-17 16:17 ` Jonathan Cameron
2025-12-17 18:01 ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 06/17] cxl: Replace cxl_mem_find_port() with cxl_dev_find_port() Ben Cheatham
2025-12-17 16:18 ` Jonathan Cameron
2025-12-17 18:01 ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 07/17] cxl: Change cxl_ep_load() to use struct device * parameter Ben Cheatham
2025-11-11 21:40 ` [PATCH 08/17] cxl/core: Update devm_cxl_enumerate_ports() Ben Cheatham
2025-11-11 21:40 ` [PATCH 09/17] cxl/port: Split endpoint port probe on device type Ben Cheatham
2025-11-11 21:40 ` [PATCH 10/17] cxl/cache, mem: Prevent RAS register mapping race Ben Cheatham
2025-12-17 16:23 ` Jonathan Cameron
2025-12-17 18:02 ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 11/17] cxl/core, port: Update devm_cxl_add_endpoint() Ben Cheatham
2025-11-11 21:40 ` [PATCH 12/17] cxl/core: Add CXL snoop filter setup and allocation Ben Cheatham
2025-12-17 16:35 ` Jonathan Cameron
2025-12-17 18:02 ` Cheatham, Benjamin
2025-11-11 21:40 ` Ben Cheatham [this message]
2025-12-22 13:47 ` [PATCH 13/17] cxl/core: Add cache id verification Jonathan Cameron
2026-01-05 21:16 ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 14/17] cxl/port: Add cache id programming Ben Cheatham
2025-11-11 21:40 ` [PATCH 15/17] cxl/port: Bypass cache id for singleton cache devices Ben Cheatham
2025-11-11 21:40 ` [PATCH 16/17] cxl/core: Add cache device attributes Ben Cheatham
2025-12-17 16:12 ` Jonathan Cameron
2025-12-17 18:02 ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 17/17] cxl/core: Add cache device cache management attributes Ben Cheatham
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251111214032.8188-14-Benjamin.Cheatham@amd.com \
--to=benjamin.cheatham@amd.com \
--cc=linux-cxl@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox