public inbox for linux-cxl@vger.kernel.org
 help / color / mirror / Atom feed
From: Ben Cheatham <Benjamin.Cheatham@amd.com>
To: <linux-cxl@vger.kernel.org>
Cc: <benjamin.cheatham@amd.com>
Subject: [PATCH 14/17] cxl/port: Add cache id programming
Date: Tue, 11 Nov 2025 15:40:29 -0600	[thread overview]
Message-ID: <20251111214032.8188-15-Benjamin.Cheatham@amd.com> (raw)
In-Reply-To: <20251111214032.8188-1-Benjamin.Cheatham@amd.com>

Add programming cache ids for CXL.cache endpoint devices as part of
cxl_cache_endpoint_probe().

Programming the cache id for a CXL.cache endpoint requires allocating
a cache id beforehand. The cache id can be programmed by the platform
firmware, in which case calling cxl_endpoint_get_cache_id() will return
the pre-programmed id. In the event the id is not programmed call
cxl_endpoint_allocate_cache_id() with a value of CXL_CACHE_ID_NO_ID,
which will return the first available id. Program the relevant cache
id decoders and route tables for a device by calling
devm_cxl_program_cache_id().

Signed-off-by: Ben Cheatham <Benjamin.Cheatham@amd.com>
---
 drivers/cxl/core/port.c | 284 +++++++++++++++++++++++++++++++++++++++-
 drivers/cxl/cxl.h       |  12 ++
 drivers/cxl/port.c      |  27 ++--
 3 files changed, 310 insertions(+), 13 deletions(-)

diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 1504631ae620..e6e25a201ff9 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -2546,7 +2546,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_map_cache_id_regs, "CXL");
  * cxl_endpoint_allocate_cache_id - Allocate a cache id @id on the endpoint's
  * host bridge.
  * @endpoint: Endpoint port representing a CXL.cache device
- * @id: Cache id to attempt to allocate
+ * @id: Cache id to attempt to allocate, CXL_CACHE_ID_NO_ID for any id
  *
  * Returns rc < 0 if id allocation fails. Returns allocated id otherwise.
  */
@@ -2570,13 +2570,16 @@ int cxl_endpoint_allocate_cache_id(struct cxl_port *endpoint, int id)
 		nr_hdmd = FIELD_GET(CXL_CACHE_IDRT_CAP_TYPE2_CNT_MASK, cap);
 
 		guard(device)(&hb->dev);
-		if (hb->nr_hdmd + 1 >= nr_hdmd)
+		if (hb->nr_hdmd == nr_hdmd)
 			return -EINVAL;
 
 		hb->nr_hdmd++;
 	}
 
-	return ida_alloc_range(&hb->cache_ida, id, id, GFP_KERNEL);
+	if (id == CXL_CACHE_ID_NO_ID)
+		return ida_alloc(&hb->cache_ida, GFP_KERNEL);
+	else
+		return ida_alloc_range(&hb->cache_ida, id, id, GFP_KERNEL);
 }
 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_allocate_cache_id, "CXL");
 
@@ -2602,6 +2605,211 @@ void cxl_endpoint_free_cache_id(struct cxl_port *endpoint, int id)
 }
 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_free_cache_id, "CXL");
 
+static unsigned long cxl_cache_id_compute_timeout(u8 scale, u8 base)
+{
+	unsigned long timeout = base;
+
+	/* Give the hardware 1 millisecond just in case */
+	if (!timeout)
+		timeout = 1;
+
+	/*
+	 * The timeout scale in the cache id decoder status register is encoded
+	 * as 10 ^ (scale) microseconds. So, to convert to millis we multiply by
+	 * 10 until the scale == 1 ms.
+	 */
+	while (scale > 3) {
+		timeout *= 10;
+		scale--;
+	}
+
+	return msecs_to_jiffies(timeout);
+}
+
+static int cxl_commit_cache_decoder(struct cxl_dport *dport)
+{
+	unsigned long timeout, start;
+	u32 cap, ctrl, stat;
+	u8 scale, base;
+
+	cap = readl(dport->regs.cidd + CXL_CACHE_IDD_CAP_OFFSET);
+	if (!(cap & CXL_CACHE_IDD_CAP_COMMIT_REQUIRED))
+		return 0;
+
+	ctrl = readl(dport->regs.cidd + CXL_CACHE_IDD_CTRL_OFFSET);
+	if (ctrl & CXL_CACHE_IDD_CTRL_COMMIT) {
+		ctrl &= ~CXL_CACHE_IDD_CTRL_COMMIT;
+		writel(ctrl, dport->regs.cidd + CXL_CACHE_IDD_CTRL_OFFSET);
+	}
+
+	stat = readl(dport->regs.cidd + CXL_CACHE_IDD_CTRL_OFFSET);
+	scale = FIELD_PREP(CXL_CACHE_IDD_STAT_TIME_SCALE_MASK, stat);
+	base = FIELD_PREP(CXL_CACHE_IDD_STAT_TIME_BASE_MASK, stat);
+	timeout = cxl_cache_id_compute_timeout(scale, base);
+
+	ctrl &= CXL_CACHE_IDD_CTRL_COMMIT;
+	writel(ctrl, dport->regs.cidd + CXL_CACHE_IDD_CTRL_OFFSET);
+
+	start = jiffies;
+	do {
+		stat = readl(dport->regs.cidd + CXL_CACHE_IDD_STAT_OFFSET);
+		if (stat & CXL_CACHE_IDD_STAT_COMMITTED)
+			return 0;
+
+		if (stat & CXL_CACHE_IDD_STAT_ERR_COMMIT)
+			return -EBUSY;
+	} while (time_before(start, start + timeout));
+
+	return -ETIMEDOUT;
+}
+
+static int cxl_program_cache_decoder(struct cxl_dport *dport, int id,
+				     bool hdmd, bool endpoint)
+{
+	u32 ctrl, orig;
+	int rc;
+
+	ctrl = readl(dport->regs.cidd + CXL_CACHE_IDD_CTRL_OFFSET);
+	orig = ctrl;
+
+	if (endpoint) {
+		ctrl &= CXL_CACHE_IDD_CTRL_ASGN_ID;
+		ctrl &= FIELD_PREP(CXL_CACHE_IDD_CTRL_LOCAL_ID_MASK, id);
+	} else {
+		ctrl &= CXL_CACHE_IDD_CTRL_FWD_ID;
+	}
+
+	if (hdmd) {
+		ctrl &= CXL_CACHE_IDD_CTRL_TYPE2;
+		ctrl &= FIELD_PREP(CXL_CACHE_IDD_CTRL_TYPE2_ID_MASK, id);
+	}
+
+	if (ctrl == orig)
+		return 0;
+
+	writel(ctrl, dport->regs.cidd + CXL_CACHE_IDD_CTRL_OFFSET);
+
+	rc = cxl_commit_cache_decoder(dport);
+	if (rc)
+		writel(orig, dport->regs.cidd + CXL_CACHE_IDD_CTRL_OFFSET);
+
+	return rc;
+}
+
+static int cxl_commit_cache_idrt(struct cxl_port *port)
+{
+	unsigned long timeout, start;
+	u32 cap, ctrl, stat;
+	u8 scale, base;
+
+	cap = readl(port->regs.cidrt + CXL_CACHE_IDRT_CAP_OFFSET);
+	if (!(cap & CXL_CACHE_IDRT_CAP_COMMIT_REQUIRED))
+		return 0;
+
+	ctrl = readl(port->regs.cidrt + CXL_CACHE_IDRT_CTRL_OFFSET);
+	if (ctrl & CXL_CACHE_IDRT_CTRL_COMMIT) {
+		ctrl &= ~CXL_CACHE_IDRT_CTRL_COMMIT;
+		writel(ctrl, port->regs.cidd + CXL_CACHE_IDRT_CTRL_OFFSET);
+	}
+
+	stat = readl(port->regs.cidrt + CXL_CACHE_IDRT_CTRL_OFFSET);
+	scale = FIELD_PREP(CXL_CACHE_IDRT_STAT_TIME_SCALE_MASK, stat);
+	base = FIELD_PREP(CXL_CACHE_IDRT_STAT_TIME_BASE_MASK, stat);
+	timeout = cxl_cache_id_compute_timeout(scale, base);
+
+	ctrl &= CXL_CACHE_IDRT_CTRL_COMMIT;
+	writel(ctrl, port->regs.cidrt + CXL_CACHE_IDRT_CTRL_OFFSET);
+
+	start = jiffies;
+	do {
+		stat = readl(port->regs.cidrt + CXL_CACHE_IDRT_STAT_OFFSET);
+		if (stat & CXL_CACHE_IDRT_STAT_COMMITTED)
+			return 0;
+
+		if (stat & CXL_CACHE_IDRT_STAT_ERR_COMMIT)
+			return -EBUSY;
+	} while (time_before(jiffies, start + timeout));
+
+	return -ETIMEDOUT;
+}
+
+static int cxl_program_cache_idrt_entry(struct cxl_port *port, int id,
+					struct cxl_dport *dport, bool valid)
+{
+	u16 target, orig;
+	int rc;
+
+	target = readw(port->regs.cidrt + CXL_CACHE_IDRT_TARGETN_OFFSET(id));
+	orig = target;
+
+	/*
+	 * Touching the port number field while the entry is valid is
+	 * undefined behavior.
+	 */
+	if (target & CXL_CACHE_IDRT_TARGETN_VALID && valid) {
+		if (FIELD_GET(CXL_CACHE_IDRT_TARGETN_PORTN, target) !=
+		    dport->port_id)
+			return -EINVAL;
+
+		return 0;
+	}
+
+	target = FIELD_PREP(CXL_CACHE_IDRT_TARGETN_PORTN, dport->port_id);
+	if (valid)
+		target &= CXL_CACHE_IDRT_TARGETN_VALID;
+	else
+		target &= ~CXL_CACHE_IDRT_TARGETN_VALID;
+
+	if (orig == target)
+		return 0;
+
+	writew(target, port->regs.cidrt + CXL_CACHE_IDRT_TARGETN_OFFSET(id));
+	rc = cxl_commit_cache_idrt(port);
+	if (rc)
+		writew(orig,
+		       port->regs.cidrt + CXL_CACHE_IDRT_TARGETN_OFFSET(id));
+
+	return rc;
+}
+
+static DECLARE_RWSEM(cache_id_rwsem);
+
+static void __cxl_endpoint_deprogram_cache_id(struct cxl_port *ep,
+					      struct cxl_port *stop, int id)
+{
+	struct cxl_dport *dport = ep->parent_dport;
+	struct cxl_port *port = parent_port_of(ep);
+	int rc;
+
+	while (port != stop) {
+		dport->nr_cachedevs--;
+		if (dport->nr_cachedevs == 0) {
+			rc = cxl_program_cache_idrt_entry(port, id, dport, false);
+			if (rc)
+				dev_warn(&port->dev,
+					 "failed to decommit cache id target%d\n",
+					 id);
+		}
+
+		dport = port->parent_dport;
+		port = parent_port_of(port);
+	}
+}
+
+struct cxl_cache_id_ctx {
+	struct cxl_port *endpoint;
+	struct cxl_port *stop;
+	int id;
+};
+
+static void cxl_endpoint_deprogram_cache_id(void *data)
+{
+	struct cxl_cache_id_ctx *ctx = data;
+
+	guard(rwsem_write)(&cache_id_rwsem);
+	__cxl_endpoint_deprogram_cache_id(ctx->endpoint, ctx->stop, ctx->id);
+}
+
 /**
  * cxl_endpoint_get_cache_id - Get the cache id of a CXL.cache endpoint device
  * @endpoint: Endpoint port representing cache device
@@ -2614,11 +2822,19 @@ int cxl_endpoint_get_cache_id(struct cxl_port *endpoint, int *cid)
 {
 	struct cxl_dport *dport = endpoint->parent_dport;
 	struct cxl_port *port = parent_port_of(endpoint);
+	struct cxl_cachedev *cxlcd;
 	bool ep = true;
 
-	if (!cid)
+	if (!cid || !is_cxl_cachedev(endpoint->uport_dev))
 		return -EINVAL;
+	cxlcd = to_cxl_cachedev(endpoint->uport_dev);
 
+	struct cxl_cache_id_ctx *ctx __free(kfree) =
+		kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	guard(rwsem_read)(&cache_id_rwsem);
 	*cid = cxl_dport_get_cache_id(dport, port);
 
 	while (!is_cxl_root(port)) {
@@ -2639,10 +2855,68 @@ int cxl_endpoint_get_cache_id(struct cxl_port *endpoint, int *cid)
 		ep = false;
 	}
 
-	return 0;
+	*ctx = (struct cxl_cache_id_ctx) {
+		.endpoint = endpoint,
+		.stop = port,
+		.id = *cid,
+	};
+
+	return devm_add_action(&cxlcd->dev, cxl_endpoint_deprogram_cache_id,
+			       no_free_ptr(ctx));
 }
 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_cache_id, "CXL");
 
+int devm_cxl_endpoint_program_cache_id(struct cxl_port *endpoint, int id)
+{
+	struct cxl_dport *dport = endpoint->parent_dport;
+	struct cxl_port *port = parent_port_of(endpoint);
+	struct cxl_cachedev *cxlcd;
+	bool hdmd, ep = true;
+	int rc;
+
+	if (!is_cxl_cachedev(endpoint->uport_dev))
+		return -EINVAL;
+
+	struct cxl_cache_id_ctx *ctx __free(kfree) =
+		kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	cxlcd = to_cxl_cachedev(endpoint->uport_dev);
+	hdmd = cxl_cachedev_is_type2(cxlcd) && cxlcd->cxlds->hdmd;
+
+	guard(rwsem_write)(&cache_id_rwsem);
+	while (!is_cxl_root(port)) {
+		rc = cxl_program_cache_idrt_entry(port, id, dport, true);
+		if (rc)
+			goto err;
+
+		rc = cxl_program_cache_decoder(dport, id, hdmd, ep);
+		if (rc)
+			goto err;
+
+		ep = false;
+		dport->nr_cachedevs++;
+		dport = port->parent_dport;
+		port = parent_port_of(port);
+	}
+
+	*ctx = (struct cxl_cache_id_ctx) {
+		.endpoint = endpoint,
+		.stop = port,
+		.id = id,
+	};
+
+	return devm_add_action_or_reset(&cxlcd->dev,
+					cxl_endpoint_deprogram_cache_id,
+					no_free_ptr(ctx));
+
+err:
+	__cxl_endpoint_deprogram_cache_id(endpoint, parent_port_of(port), id);
+	return rc;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_program_cache_id, "CXL");
+
 /**
  * __cxl_driver_register - register a driver for the cxl bus
  * @cxl_drv: cxl driver structure to attach
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index f4dc912d67ed..7919c4466b0c 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -166,8 +166,13 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
 #define   CXL_CACHE_IDRT_CAP_CNT_MASK GENMASK(4, 0)
 #define   CXL_CACHE_IDRT_CAP_TYPE2_CNT_MASK GENMASK(11, 8)
 #define   CXL_CACHE_IDRT_CAP_COMMIT_REQUIRED BIT(16)
+#define CXL_CACHE_IDRT_CTRL_OFFSET 0x4
+#define   CXL_CACHE_IDRT_CTRL_COMMIT BIT(0)
 #define CXL_CACHE_IDRT_STAT_OFFSET 0x8
 #define   CXL_CACHE_IDRT_STAT_COMMITTED BIT(0)
+#define   CXL_CACHE_IDRT_STAT_ERR_COMMIT BIT(1)
+#define   CXL_CACHE_IDRT_STAT_TIME_SCALE_MASK GENMASK(11, 8)
+#define   CXL_CACHE_IDRT_STAT_TIME_BASE_MASK GENMASK(15, 12)
 #define CXL_CACHE_IDRT_TARGETN_OFFSET(n) (0x10 + (2 * (n)))
 #define   CXL_CACHE_IDRT_TARGETN_VALID BIT(0)
 #define   CXL_CACHE_IDRT_TARGETN_PORTN GENMASK(15, 8)
@@ -179,10 +184,14 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
 #define   CXL_CACHE_IDD_CTRL_FWD_ID BIT(0)
 #define   CXL_CACHE_IDD_CTRL_ASGN_ID BIT(1)
 #define   CXL_CACHE_IDD_CTRL_TYPE2 BIT(2)
+#define   CXL_CACHE_IDD_CTRL_COMMIT BIT(3)
 #define   CXL_CACHE_IDD_CTRL_TYPE2_ID_MASK GENMASK(11, 8)
 #define   CXL_CACHE_IDD_CTRL_LOCAL_ID_MASK GENMASK(19, 16)
 #define CXL_CACHE_IDD_STAT_OFFSET 0x8
 #define   CXL_CACHE_IDD_STAT_COMMITTED BIT(0)
+#define   CXL_CACHE_IDD_STAT_ERR_COMMIT BIT(1)
+#define   CXL_CACHE_IDD_STAT_TIME_SCALE_MASK GENMASK(11, 8)
+#define   CXL_CACHE_IDD_STAT_TIME_BASE_MASK GENMASK(15, 12)
 #define CXL_CACHE_IDD_CAPABILITY_LENGTH 0xC
 
 /* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
@@ -726,6 +735,7 @@ struct cxl_rcrb_info {
  * @coord: access coordinates (bandwidth and latency performance attributes)
  * @link_latency: calculated PCIe downstream latency
  * @gpf_dvsec: Cached GPF port DVSEC
+ * @nr_cachedevs: Number of CXL.cache devices with a cache id below this dport
  */
 struct cxl_dport {
 	struct device *dport_dev;
@@ -739,6 +749,7 @@ struct cxl_dport {
 	long link_latency;
 	int gpf_dvsec;
 	int snoop_id;
+	int nr_cachedevs;
 };
 
 /**
@@ -990,6 +1001,7 @@ int cxl_endpoint_map_cache_id_regs(struct cxl_port *endpoint);
 int cxl_endpoint_get_cache_id(struct cxl_port *endpoint, int *cid);
 int cxl_endpoint_allocate_cache_id(struct cxl_port *endpoint, int id);
 void cxl_endpoint_free_cache_id(struct cxl_port *endpoint, int id);
+int devm_cxl_endpoint_program_cache_id(struct cxl_port *endpoint, int id);
 
 /**
  * struct cxl_endpoint_dvsec_info - Cached DVSEC info
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index c1d1d28bee5c..4cecb731ec5b 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -27,6 +27,8 @@
  * PCIe topology.
  */
 
+static DEFINE_MUTEX(cache_id_lock);
+
 static void schedule_detach(void *cxlmd)
 {
 	schedule_cxl_memdev_detach(cxlmd);
@@ -99,30 +101,39 @@ static int cxl_mem_endpoint_port_probe(struct cxl_port *port)
 static void free_cache_id(void *data)
 {
 	struct cxl_cachedev *cxlcd = data;
-	int id = cxlcd->cxlds->cstate.cache_id;
+	struct cxl_cache_state *cstate = &cxlcd->cxlds->cstate;
 
-	cxl_endpoint_free_cache_id(cxlcd->endpoint, id);
+	cxl_endpoint_free_cache_id(cxlcd->endpoint, cstate->cache_id);
+	cstate->cache_id = CXL_CACHE_ID_NO_ID;
 }
 
 static int cxl_cache_endpoint_port_probe(struct cxl_port *port)
 {
 	struct cxl_cachedev *cxlcd = to_cxl_cachedev(port->uport_dev);
-	int rc, id;
+	int rc, orig, id;
 
 	rc = cxl_endpoint_map_cache_id_regs(port);
 	if (rc)
 		return rc;
 
-	rc = cxl_endpoint_get_cache_id(port, &id);
+	guard(mutex)(&cache_id_lock);
+	rc = cxl_endpoint_get_cache_id(port, &orig);
 	if (rc)
 		return rc;
 
-	rc = cxl_endpoint_allocate_cache_id(port, id);
-	if (rc < 0)
-		return rc;
+	id = cxl_endpoint_allocate_cache_id(port, orig);
+	if (id < 0)
+		return id;
 
 	cxlcd->cxlds->cstate.cache_id = id;
-	return devm_add_action_or_reset(&cxlcd->dev, free_cache_id, cxlcd);
+	rc = devm_add_action_or_reset(&cxlcd->dev, free_cache_id, cxlcd);
+	if (rc)
+		return rc;
+
+	if (orig == CXL_CACHE_ID_NO_ID)
+		return devm_cxl_endpoint_program_cache_id(port, id);
+
+	return 0;
 }
 
 static int cxl_endpoint_port_probe(struct cxl_port *port)
-- 
2.51.1


  parent reply	other threads:[~2025-11-11 21:43 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-11 21:40 [RFC v2 PATCH 00/17] Initial CXL.cache device support Ben Cheatham
2025-11-11 21:40 ` [PATCH 01/17] cxl/port: Arrange for always synchronous endpoint attach Ben Cheatham
2025-11-17 15:56   ` Jonathan Cameron
2025-11-11 21:40 ` [PATCH 02/17] cxl: Move struct cxl_dev_state definition Ben Cheatham
2025-11-11 21:40 ` [PATCH 03/17] cxl/core: Add function for getting CXL cache info Ben Cheatham
2025-12-17 16:09   ` Jonathan Cameron
2025-12-17 18:01     ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 04/17] cxl/core: Add CXL.cache device struct Ben Cheatham
2025-12-17 16:14   ` Jonathan Cameron
2025-11-11 21:40 ` [PATCH 05/17] cxl/cache: Add cxl_cache driver Ben Cheatham
2025-12-17 16:17   ` Jonathan Cameron
2025-12-17 18:01     ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 06/17] cxl: Replace cxl_mem_find_port() with cxl_dev_find_port() Ben Cheatham
2025-12-17 16:18   ` Jonathan Cameron
2025-12-17 18:01     ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 07/17] cxl: Change cxl_ep_load() to use struct device * parameter Ben Cheatham
2025-11-11 21:40 ` [PATCH 08/17] cxl/core: Update devm_cxl_enumerate_ports() Ben Cheatham
2025-11-11 21:40 ` [PATCH 09/17] cxl/port: Split endpoint port probe on device type Ben Cheatham
2025-11-11 21:40 ` [PATCH 10/17] cxl/cache, mem: Prevent RAS register mapping race Ben Cheatham
2025-12-17 16:23   ` Jonathan Cameron
2025-12-17 18:02     ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 11/17] cxl/core, port: Update devm_cxl_add_endpoint() Ben Cheatham
2025-11-11 21:40 ` [PATCH 12/17] cxl/core: Add CXL snoop filter setup and allocation Ben Cheatham
2025-12-17 16:35   ` Jonathan Cameron
2025-12-17 18:02     ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 13/17] cxl/core: Add cache id verification Ben Cheatham
2025-12-22 13:47   ` Jonathan Cameron
2026-01-05 21:16     ` Cheatham, Benjamin
2025-11-11 21:40 ` Ben Cheatham [this message]
2025-11-11 21:40 ` [PATCH 15/17] cxl/port: Bypass cache id for singleton cache devices Ben Cheatham
2025-11-11 21:40 ` [PATCH 16/17] cxl/core: Add cache device attributes Ben Cheatham
2025-12-17 16:12   ` Jonathan Cameron
2025-12-17 18:02     ` Cheatham, Benjamin
2025-11-11 21:40 ` [PATCH 17/17] cxl/core: Add cache device cache management attributes Ben Cheatham

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251111214032.8188-15-Benjamin.Cheatham@amd.com \
    --to=benjamin.cheatham@amd.com \
    --cc=linux-cxl@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox