Linux CXL
 help / color / mirror / Atom feed
* [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf'
@ 2024-02-01 21:47 ` Dave Jiang
  2024-02-01 21:47   ` [PATCH v3 2/3] cxl: Fix sysfs export of qos_class for memdev Dave Jiang
                     ` (3 more replies)
  0 siblings, 4 replies; 14+ messages in thread
From: Dave Jiang @ 2024-02-01 21:47 UTC (permalink / raw)
  To: linux-cxl
  Cc: dan.j.williams, ira.weiny, vishal.l.verma, alison.schofield,
	Jonathan.Cameron, dave

In order to address the issue with being able to expose qos_class sysfs
attributes under 'ram' and 'pmem' sub-directories, the attributes must
be defined as static attributes rather than under driver->dev_groups.
To avoid implementing locking for accessing the 'struct cxl_dpa_perf`
lists, convert the list to a single 'struct cxl_dpa_perf' entry in
preparation to move the attributes to statically defined.

While theoretically a partition may have multiple qos_class via CDAT, this
has not been encountered with testing on available hardware. The code is
simplified for now to not support the complex case until a use case is
needed to support that.

Link: https://lore.kernel.org/linux-cxl/65b200ba228f_2d43c29468@dwillia2-mobl3.amr.corp.intel.com.notmuch/
Suggested-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
v3:
- Add to commit log about simplification (Dan)
- Remove check for dev->driver (Dan)
- Remove check for invalid qos_class (Dan)
---
 drivers/cxl/core/cdat.c | 81 ++++++++++++-----------------------------
 drivers/cxl/core/mbox.c |  4 +-
 drivers/cxl/cxlmem.h    | 10 ++---
 drivers/cxl/mem.c       | 28 ++------------
 4 files changed, 33 insertions(+), 90 deletions(-)

diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 6fe11546889f..55b82dfd794b 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -210,19 +210,12 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
 	return 0;
 }
 
-static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
-			   struct list_head *list)
+static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
+			      struct cxl_dpa_perf *dpa_perf)
 {
-	struct cxl_dpa_perf *dpa_perf;
-
-	dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
-	if (!dpa_perf)
-		return;
-
 	dpa_perf->dpa_range = dent->dpa_range;
 	dpa_perf->coord = dent->coord;
 	dpa_perf->qos_class = dent->qos_class;
-	list_add_tail(&dpa_perf->list, list);
 	dev_dbg(dev,
 		"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
 		dent->dpa_range.start, dpa_perf->qos_class,
@@ -230,20 +223,6 @@ static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
 		dent->coord.read_latency, dent->coord.write_latency);
 }
 
-static void free_perf_ents(void *data)
-{
-	struct cxl_memdev_state *mds = data;
-	struct cxl_dpa_perf *dpa_perf, *n;
-	LIST_HEAD(discard);
-
-	list_splice_tail_init(&mds->ram_perf_list, &discard);
-	list_splice_tail_init(&mds->pmem_perf_list, &discard);
-	list_for_each_entry_safe(dpa_perf, n, &discard, list) {
-		list_del(&dpa_perf->list);
-		kfree(dpa_perf);
-	}
-}
-
 static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
 				     struct xarray *dsmas_xa)
 {
@@ -263,16 +242,14 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
 	xa_for_each(dsmas_xa, index, dent) {
 		if (resource_size(&cxlds->ram_res) &&
 		    range_contains(&ram_range, &dent->dpa_range))
-			add_perf_entry(dev, dent, &mds->ram_perf_list);
+			update_perf_entry(dev, dent, &mds->ram_perf);
 		else if (resource_size(&cxlds->pmem_res) &&
 			 range_contains(&pmem_range, &dent->dpa_range))
-			add_perf_entry(dev, dent, &mds->pmem_perf_list);
+			update_perf_entry(dev, dent, &mds->pmem_perf);
 		else
 			dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
 				dent->dpa_range.start);
 	}
-
-	devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
 }
 
 static int match_cxlrd_qos_class(struct device *dev, void *data)
@@ -293,24 +270,25 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
 	return 0;
 }
 
+static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
+{
+	memset(&dpa_perf, 0, sizeof(*dpa_perf));
+	dpa_perf->qos_class = CXL_QOS_CLASS_INVALID;
+}
+
 static void cxl_qos_match(struct cxl_port *root_port,
-			  struct list_head *work_list,
-			  struct list_head *discard_list)
+			  struct cxl_dpa_perf *dpa_perf)
 {
-	struct cxl_dpa_perf *dpa_perf, *n;
+	int rc;
 
-	list_for_each_entry_safe(dpa_perf, n, work_list, list) {
-		int rc;
+	if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
+		return;
 
-		if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
-			return;
-
-		rc = device_for_each_child(&root_port->dev,
-					   (void *)&dpa_perf->qos_class,
-					   match_cxlrd_qos_class);
-		if (!rc)
-			list_move_tail(&dpa_perf->list, discard_list);
-	}
+	rc = device_for_each_child(&root_port->dev,
+				   &dpa_perf->qos_class,
+				   match_cxlrd_qos_class);
+	if (!rc)
+		reset_dpa_perf(dpa_perf);
 }
 
 static int match_cxlrd_hb(struct device *dev, void *data)
@@ -334,23 +312,10 @@ static int match_cxlrd_hb(struct device *dev, void *data)
 	return 0;
 }
 
-static void discard_dpa_perf(struct list_head *list)
-{
-	struct cxl_dpa_perf *dpa_perf, *n;
-
-	list_for_each_entry_safe(dpa_perf, n, list, list) {
-		list_del(&dpa_perf->list);
-		kfree(dpa_perf);
-	}
-}
-DEFINE_FREE(dpa_perf, struct list_head *, if (!list_empty(_T)) discard_dpa_perf(_T))
-
 static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
 {
 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-	LIST_HEAD(__discard);
-	struct list_head *discard __free(dpa_perf) = &__discard;
 	struct cxl_port *root_port;
 	int rc;
 
@@ -363,16 +328,16 @@ static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
 	root_port = &cxl_root->port;
 
 	/* Check that the QTG IDs are all sane between end device and root decoders */
-	cxl_qos_match(root_port, &mds->ram_perf_list, discard);
-	cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
+	cxl_qos_match(root_port, &mds->ram_perf);
+	cxl_qos_match(root_port, &mds->pmem_perf);
 
 	/* Check to make sure that the device's host bridge is under a root decoder */
 	rc = device_for_each_child(&root_port->dev,
 				   (void *)cxlmd->endpoint->host_bridge,
 				   match_cxlrd_hb);
 	if (!rc) {
-		list_splice_tail_init(&mds->ram_perf_list, discard);
-		list_splice_tail_init(&mds->pmem_perf_list, discard);
+		reset_dpa_perf(&mds->ram_perf);
+		reset_dpa_perf(&mds->pmem_perf);
 	}
 
 	return rc;
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 27166a411705..9adda4795eb7 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1391,8 +1391,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
 	mds->cxlds.reg_map.host = dev;
 	mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
 	mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
-	INIT_LIST_HEAD(&mds->ram_perf_list);
-	INIT_LIST_HEAD(&mds->pmem_perf_list);
+	mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
+	mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
 
 	return mds;
 }
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 5303d6942b88..20fb3b35e89e 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -395,13 +395,11 @@ enum cxl_devtype {
 
 /**
  * struct cxl_dpa_perf - DPA performance property entry
- * @list - list entry
  * @dpa_range - range for DPA address
  * @coord - QoS performance data (i.e. latency, bandwidth)
  * @qos_class - QoS Class cookies
  */
 struct cxl_dpa_perf {
-	struct list_head list;
 	struct range dpa_range;
 	struct access_coordinate coord;
 	int qos_class;
@@ -471,8 +469,8 @@ struct cxl_dev_state {
  * @security: security driver state info
  * @fw: firmware upload / activation state
  * @mbox_send: @dev specific transport for transmitting mailbox commands
- * @ram_perf_list: performance data entries matched to RAM
- * @pmem_perf_list: performance data entries matched to PMEM
+ * @ram_perf: performance data entry matched to RAM partition
+ * @pmem_perf: performance data entry matched to PMEM partition
  *
  * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
  * details on capacity parameters.
@@ -494,8 +492,8 @@ struct cxl_memdev_state {
 	u64 next_volatile_bytes;
 	u64 next_persistent_bytes;
 
-	struct list_head ram_perf_list;
-	struct list_head pmem_perf_list;
+	struct cxl_dpa_perf ram_perf;
+	struct cxl_dpa_perf pmem_perf;
 
 	struct cxl_event_state event;
 	struct cxl_poison_state poison;
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index c5c9d8e0d88d..547f5a145fc5 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -221,18 +221,8 @@ static ssize_t ram_qos_class_show(struct device *dev,
 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-	struct cxl_dpa_perf *dpa_perf;
 
-	if (!dev->driver)
-		return -ENOENT;
-
-	if (list_empty(&mds->ram_perf_list))
-		return -ENOENT;
-
-	dpa_perf = list_first_entry(&mds->ram_perf_list, struct cxl_dpa_perf,
-				    list);
-
-	return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
+	return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
 }
 
 static struct device_attribute dev_attr_ram_qos_class =
@@ -244,18 +234,8 @@ static ssize_t pmem_qos_class_show(struct device *dev,
 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-	struct cxl_dpa_perf *dpa_perf;
 
-	if (!dev->driver)
-		return -ENOENT;
-
-	if (list_empty(&mds->pmem_perf_list))
-		return -ENOENT;
-
-	dpa_perf = list_first_entry(&mds->pmem_perf_list, struct cxl_dpa_perf,
-				    list);
-
-	return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
+	return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
 }
 
 static struct device_attribute dev_attr_pmem_qos_class =
@@ -273,11 +253,11 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
 			return 0;
 
 	if (a == &dev_attr_pmem_qos_class.attr)
-		if (list_empty(&mds->pmem_perf_list))
+		if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
 			return 0;
 
 	if (a == &dev_attr_ram_qos_class.attr)
-		if (list_empty(&mds->ram_perf_list))
+		if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
 			return 0;
 
 	return a->mode;
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v3 2/3] cxl: Fix sysfs export of qos_class for memdev
  2024-02-01 21:47 ` [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf' Dave Jiang
@ 2024-02-01 21:47   ` Dave Jiang
  2024-02-05 11:40     ` Jonathan Cameron
  2024-02-01 21:47   ` [PATCH v3 3/3] cxl/test: Add support for qos_class checking Dave Jiang
                     ` (2 subsequent siblings)
  3 siblings, 1 reply; 14+ messages in thread
From: Dave Jiang @ 2024-02-01 21:47 UTC (permalink / raw)
  To: linux-cxl
  Cc: dan.j.williams, ira.weiny, vishal.l.verma, alison.schofield,
	Jonathan.Cameron, dave

Current implementation exports only to
/sys/bus/cxl/devices/.../memN/qos_class. With both ram and pmem exposed,
the second registered sysfs attribute is rejected as duplicate. It's not
possible to create qos_class under the dev_groups via the driver due to
the ram and pmem sysfs sub-directories already created by the device sysfs
groups. Move the ram and pmem qos_class to the device sysfs groups and add
a call to sysfs_update() after the perf data are validated so the
qos_class can be visible. The end results should be
/sys/bus/cxl/devices/.../memN/ram/qos_class and
/sys/bus/cxl/devices/.../memN/pmem/qos_class.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
v3:
- Updated based on pervious patch changes
---
 drivers/cxl/core/cdat.c   |  1 +
 drivers/cxl/core/memdev.c | 71 +++++++++++++++++++++++++++++++++++++++
 drivers/cxl/cxl.h         |  2 ++
 drivers/cxl/mem.c         | 36 --------------------
 4 files changed, 74 insertions(+), 36 deletions(-)

diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 55b82dfd794b..5c93bf9d5253 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -382,6 +382,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
 
 	cxl_memdev_set_qos_class(cxlds, dsmas_xa);
 	cxl_qos_class_verify(cxlmd);
+	cxl_memdev_update_attribute_groups(cxlmd);
 }
 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
 
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index dae8802ecdb0..ed85096a33fb 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = {
 	NULL,
 };
 
+static ssize_t pmem_qos_class_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+	struct cxl_dev_state *cxlds = cxlmd->cxlds;
+	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+	return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_pmem_qos_class =
+	__ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
+
 static struct attribute *cxl_memdev_pmem_attributes[] = {
 	&dev_attr_pmem_size.attr,
+	&dev_attr_pmem_qos_class.attr,
 	NULL,
 };
 
+static ssize_t ram_qos_class_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+	struct cxl_dev_state *cxlds = cxlmd->cxlds;
+	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+	return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_ram_qos_class =
+	__ATTR(qos_class, 0444, ram_qos_class_show, NULL);
+
 static struct attribute *cxl_memdev_ram_attributes[] = {
 	&dev_attr_ram_size.attr,
+	&dev_attr_ram_qos_class.attr,
 	NULL,
 };
 
@@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = {
 	.is_visible = cxl_memdev_visible,
 };
 
+static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+	if (a == &dev_attr_ram_qos_class.attr)
+		if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
+			return 0;
+
+	return a->mode;
+}
+
 static struct attribute_group cxl_memdev_ram_attribute_group = {
 	.name = "ram",
 	.attrs = cxl_memdev_ram_attributes,
+	.is_visible = cxl_ram_visible,
 };
 
+static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+	if (a == &dev_attr_pmem_qos_class.attr)
+		if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
+			return 0;
+
+	return a->mode;
+}
+
 static struct attribute_group cxl_memdev_pmem_attribute_group = {
 	.name = "pmem",
 	.attrs = cxl_memdev_pmem_attributes,
+	.is_visible = cxl_pmem_visible,
 };
 
 static umode_t cxl_memdev_security_visible(struct kobject *kobj,
@@ -519,6 +575,21 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
 	NULL,
 };
 
+void cxl_memdev_update_attribute_groups(struct cxl_memdev *cxlmd)
+{
+	const struct attribute_group *grp = cxl_memdev_attribute_groups[0];
+
+	for (int i = 0; grp; i++) {
+		int rc = sysfs_update_group(&cxlmd->dev.kobj, grp);
+
+		if (rc)
+			dev_dbg(&cxlmd->dev,
+				"Unable to update memdev attribute group.\n");
+		grp = cxl_memdev_attribute_groups[i + 1];
+	}
+}
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_attribute_groups, CXL);
+
 static const struct device_type cxl_memdev_type = {
 	.name = "cxl_memdev",
 	.release = cxl_memdev_release,
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index b6017c0c57b4..1f630b30d845 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -880,6 +880,8 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
 int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
 				      struct access_coordinate *coord);
 
+void cxl_memdev_update_attribute_groups(struct cxl_memdev *cxlmd);
+
 /*
  * Unit test builds overrides this to __weak, find the 'strong' version
  * of these symbols in tools/testing/cxl/.
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 547f5a145fc5..0c79d9ce877c 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -215,32 +215,6 @@ static ssize_t trigger_poison_list_store(struct device *dev,
 }
 static DEVICE_ATTR_WO(trigger_poison_list);
 
-static ssize_t ram_qos_class_show(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-	struct cxl_dev_state *cxlds = cxlmd->cxlds;
-	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-
-	return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
-}
-
-static struct device_attribute dev_attr_ram_qos_class =
-	__ATTR(qos_class, 0444, ram_qos_class_show, NULL);
-
-static ssize_t pmem_qos_class_show(struct device *dev,
-				   struct device_attribute *attr, char *buf)
-{
-	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-	struct cxl_dev_state *cxlds = cxlmd->cxlds;
-	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-
-	return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
-}
-
-static struct device_attribute dev_attr_pmem_qos_class =
-	__ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
-
 static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
 {
 	struct device *dev = kobj_to_dev(kobj);
@@ -252,21 +226,11 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
 			      mds->poison.enabled_cmds))
 			return 0;
 
-	if (a == &dev_attr_pmem_qos_class.attr)
-		if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
-			return 0;
-
-	if (a == &dev_attr_ram_qos_class.attr)
-		if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
-			return 0;
-
 	return a->mode;
 }
 
 static struct attribute *cxl_mem_attrs[] = {
 	&dev_attr_trigger_poison_list.attr,
-	&dev_attr_ram_qos_class.attr,
-	&dev_attr_pmem_qos_class.attr,
 	NULL
 };
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v3 3/3] cxl/test: Add support for qos_class checking
  2024-02-01 21:47 ` [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf' Dave Jiang
  2024-02-01 21:47   ` [PATCH v3 2/3] cxl: Fix sysfs export of qos_class for memdev Dave Jiang
@ 2024-02-01 21:47   ` Dave Jiang
  2024-02-02  4:21   ` [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf' Wonjae Lee
  2024-02-05 11:32   ` Jonathan Cameron
  3 siblings, 0 replies; 14+ messages in thread
From: Dave Jiang @ 2024-02-01 21:47 UTC (permalink / raw)
  To: linux-cxl
  Cc: dan.j.williams, ira.weiny, vishal.l.verma, alison.schofield,
	Jonathan.Cameron, dave

Set a fake qos_class to a unique value in order to do simple testing of
qos_class for root decoders and mem devs via user cxl_test. A mock
function is added to set the fake qos_class values for memory device
and overrides cxl_endpoint_parse_cdat() in cxl driver code.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
 tools/testing/cxl/Kbuild      |  1 +
 tools/testing/cxl/test/cxl.c  | 63 ++++++++++++++++++++++++++++++-----
 tools/testing/cxl/test/mock.c | 14 ++++++++
 tools/testing/cxl/test/mock.h |  1 +
 4 files changed, 70 insertions(+), 9 deletions(-)

diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index caff3834671f..030b388800f0 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -13,6 +13,7 @@ ldflags-y += --wrap=cxl_hdm_decode_init
 ldflags-y += --wrap=cxl_dvsec_rr_decode
 ldflags-y += --wrap=devm_cxl_add_rch_dport
 ldflags-y += --wrap=cxl_rcd_component_reg_phys
+ldflags-y += --wrap=cxl_endpoint_parse_cdat
 
 DRIVERS := ../../../drivers
 CXL_SRC := $(DRIVERS)/cxl
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index a3cdbb2be038..59dc28bfcb5e 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -15,6 +15,8 @@
 
 static int interleave_arithmetic;
 
+#define FAKE_QTG_ID	42
+
 #define NR_CXL_HOST_BRIDGES 2
 #define NR_CXL_SINGLE_HOST 1
 #define NR_CXL_RCH 1
@@ -209,7 +211,7 @@ static struct {
 			.granularity = 4,
 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
-			.qtg_id = 0,
+			.qtg_id = FAKE_QTG_ID,
 			.window_size = SZ_256M * 4UL,
 		},
 		.target = { 0 },
@@ -224,7 +226,7 @@ static struct {
 			.granularity = 4,
 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
-			.qtg_id = 1,
+			.qtg_id = FAKE_QTG_ID,
 			.window_size = SZ_256M * 8UL,
 		},
 		.target = { 0, 1, },
@@ -239,7 +241,7 @@ static struct {
 			.granularity = 4,
 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-			.qtg_id = 2,
+			.qtg_id = FAKE_QTG_ID,
 			.window_size = SZ_256M * 4UL,
 		},
 		.target = { 0 },
@@ -254,7 +256,7 @@ static struct {
 			.granularity = 4,
 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-			.qtg_id = 3,
+			.qtg_id = FAKE_QTG_ID,
 			.window_size = SZ_256M * 8UL,
 		},
 		.target = { 0, 1, },
@@ -269,7 +271,7 @@ static struct {
 			.granularity = 4,
 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-			.qtg_id = 4,
+			.qtg_id = FAKE_QTG_ID,
 			.window_size = SZ_256M * 4UL,
 		},
 		.target = { 2 },
@@ -284,7 +286,7 @@ static struct {
 			.granularity = 4,
 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
-			.qtg_id = 5,
+			.qtg_id = FAKE_QTG_ID,
 			.window_size = SZ_256M,
 		},
 		.target = { 3 },
@@ -301,7 +303,7 @@ static struct {
 			.granularity = 4,
 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-			.qtg_id = 0,
+			.qtg_id = FAKE_QTG_ID,
 			.window_size = SZ_256M * 8UL,
 		},
 		.target = { 0, },
@@ -317,7 +319,7 @@ static struct {
 			.granularity = 0,
 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-			.qtg_id = 1,
+			.qtg_id = FAKE_QTG_ID,
 			.window_size = SZ_256M * 8UL,
 		},
 		.target = { 0, 1, },
@@ -333,7 +335,7 @@ static struct {
 			.granularity = 0,
 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-			.qtg_id = 0,
+			.qtg_id = FAKE_QTG_ID,
 			.window_size = SZ_256M * 16UL,
 		},
 		.target = { 0, 1, 0, 1, },
@@ -976,6 +978,48 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
 	return 0;
 }
 
+/*
+ * Faking the cxl_dpa_perf for the memdev when appropriate.
+ */
+static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
+			   struct cxl_dpa_perf *dpa_perf)
+{
+	dpa_perf->qos_class = FAKE_QTG_ID;
+	dpa_perf->dpa_range = *range;
+	dpa_perf->coord.read_latency = 500;
+	dpa_perf->coord.write_latency = 500;
+	dpa_perf->coord.read_bandwidth = 1000;
+	dpa_perf->coord.write_bandwidth = 1000;
+}
+
+static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+	struct cxl_root *cxl_root __free(put_cxl_root) =
+		find_cxl_root(port);
+	struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+	struct cxl_dev_state *cxlds = cxlmd->cxlds;
+	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+	struct range pmem_range = {
+		.start = cxlds->pmem_res.start,
+		.end = cxlds->pmem_res.end,
+	};
+	struct range ram_range = {
+		.start = cxlds->ram_res.start,
+		.end = cxlds->ram_res.end,
+	};
+
+	if (!cxl_root)
+		return;
+
+	if (range_len(&ram_range))
+		dpa_perf_setup(port, &ram_range, &mds->ram_perf);
+
+	if (range_len(&pmem_range))
+		dpa_perf_setup(port, &pmem_range, &mds->pmem_perf);
+
+	cxl_memdev_update_attribute_groups(cxlmd);
+}
+
 static struct cxl_mock_ops cxl_mock_ops = {
 	.is_mock_adev = is_mock_adev,
 	.is_mock_bridge = is_mock_bridge,
@@ -989,6 +1033,7 @@ static struct cxl_mock_ops cxl_mock_ops = {
 	.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
 	.devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
 	.devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
+	.cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
 	.list = LIST_HEAD_INIT(cxl_mock_ops.list),
 };
 
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index 1a61e68e3095..6f737941dc0e 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -285,6 +285,20 @@ resource_size_t __wrap_cxl_rcd_component_reg_phys(struct device *dev,
 }
 EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcd_component_reg_phys, CXL);
 
+void __wrap_cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+	int index;
+	struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+	struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+
+	if (ops && ops->is_mock_dev(cxlmd->dev.parent))
+		ops->cxl_endpoint_parse_cdat(port);
+	else
+		cxl_endpoint_parse_cdat(port);
+	put_cxl_mock_ops(index);
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_endpoint_parse_cdat, CXL);
+
 MODULE_LICENSE("GPL v2");
 MODULE_IMPORT_NS(ACPI);
 MODULE_IMPORT_NS(CXL);
diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h
index a94223750346..d1b0271d2822 100644
--- a/tools/testing/cxl/test/mock.h
+++ b/tools/testing/cxl/test/mock.h
@@ -25,6 +25,7 @@ struct cxl_mock_ops {
 	int (*devm_cxl_add_passthrough_decoder)(struct cxl_port *port);
 	int (*devm_cxl_enumerate_decoders)(
 		struct cxl_hdm *hdm, struct cxl_endpoint_dvsec_info *info);
+	void (*cxl_endpoint_parse_cdat)(struct cxl_port *port);
 };
 
 void register_cxl_mock_ops(struct cxl_mock_ops *ops);
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* RE: [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf'
  2024-02-01 21:47 ` [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf' Dave Jiang
  2024-02-01 21:47   ` [PATCH v3 2/3] cxl: Fix sysfs export of qos_class for memdev Dave Jiang
  2024-02-01 21:47   ` [PATCH v3 3/3] cxl/test: Add support for qos_class checking Dave Jiang
@ 2024-02-02  4:21   ` Wonjae Lee
  2024-02-02  5:28     ` Dan Williams
  2024-02-02 15:38     ` Dave Jiang
  2024-02-05 11:32   ` Jonathan Cameron
  3 siblings, 2 replies; 14+ messages in thread
From: Wonjae Lee @ 2024-02-02  4:21 UTC (permalink / raw)
  To: Dave Jiang, linux-cxl@vger.kernel.org
  Cc: dan.j.williams@intel.com, ira.weiny@intel.com,
	vishal.l.verma@intel.com, alison.schofield@intel.com,
	Jonathan.Cameron@huawei.com, dave@stgolabs.net, KyungSan Kim,
	Hojin Nam

On Thu, Feb 01, 2024 at 02:47:29PM -0700, Dave Jiang wrote:
> In order to address the issue with being able to expose qos_class sysfs
> attributes under 'ram' and 'pmem' sub-directories, the attributes must
> be defined as static attributes rather than under driver->dev_groups.
> To avoid implementing locking for accessing the 'struct cxl_dpa_perf`
> lists, convert the list to a single 'struct cxl_dpa_perf' entry in
> preparation to move the attributes to statically defined.
>
> While theoretically a partition may have multiple qos_class via CDAT, this
> has not been encountered with testing on available hardware. The code is
> simplified for now to not support the complex case until a use case is
> needed to support that.
>
> Link: https://lore.kernel.org/linux-cxl/65b200ba228f_2d43c29468@dwillia2-mobl3.amr.corp.intel.com.notmuch/
> Suggested-by: Dan Williams <dan.j.williams@intel.com>
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> ---
> v3:
> - Add to commit log about simplification (Dan)
> - Remove check for dev->driver (Dan)
> - Remove check for invalid qos_class (Dan)
> ---
>  drivers/cxl/core/cdat.c | 81 ++++++++++++-----------------------------
>  drivers/cxl/core/mbox.c |  4 +-
>  drivers/cxl/cxlmem.h    | 10 ++---
>  drivers/cxl/mem.c       | 28 ++------------
>  4 files changed, 33 insertions(+), 90 deletions(-)
>
> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
> index 6fe11546889f..55b82dfd794b 100644
> --- a/drivers/cxl/core/cdat.c
> +++ b/drivers/cxl/core/cdat.c
> @@ -210,19 +210,12 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
>   return 0;
>  }
>
> -static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
> -            struct list_head *list)
> +static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
> +               struct cxl_dpa_perf *dpa_perf)
>  {
> - struct cxl_dpa_perf *dpa_perf;
> -
> - dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
> - if (!dpa_perf)
> -     return;
> -
>   dpa_perf->dpa_range = dent->dpa_range;
>   dpa_perf->coord = dent->coord;
>   dpa_perf->qos_class = dent->qos_class;
> - list_add_tail(&dpa_perf->list, list);
>   dev_dbg(dev,
>       "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
>       dent->dpa_range.start, dpa_perf->qos_class,
> @@ -230,20 +223,6 @@ static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
>       dent->coord.read_latency, dent->coord.write_latency);
>  }
>
> -static void free_perf_ents(void *data)
> -{
> - struct cxl_memdev_state *mds = data;
> - struct cxl_dpa_perf *dpa_perf, *n;
> - LIST_HEAD(discard);
> -
> - list_splice_tail_init(&mds->ram_perf_list, &discard);
> - list_splice_tail_init(&mds->pmem_perf_list, &discard);
> - list_for_each_entry_safe(dpa_perf, n, &discard, list) {
> -     list_del(&dpa_perf->list);
> -     kfree(dpa_perf);
> - }
> -}
> -
>  static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
>                    struct xarray *dsmas_xa)
>  {
> @@ -263,16 +242,14 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
>   xa_for_each(dsmas_xa, index, dent) {
>       if (resource_size(&cxlds->ram_res) &&
>           range_contains(&ram_range, &dent->dpa_range))
> -         add_perf_entry(dev, dent, &mds->ram_perf_list);
> +         update_perf_entry(dev, dent, &mds->ram_perf);
>       else if (resource_size(&cxlds->pmem_res) &&
>            range_contains(&pmem_range, &dent->dpa_range))
> -         add_perf_entry(dev, dent, &mds->pmem_perf_list);
> +         update_perf_entry(dev, dent, &mds->pmem_perf);
>       else
>           dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
>               dent->dpa_range.start);
>   }
> -
> - devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
>  }
>
>  static int match_cxlrd_qos_class(struct device *dev, void *data)
> @@ -293,24 +270,25 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
>   return 0;
>  }
>
> +static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
> +{
> + memset(&dpa_perf, 0, sizeof(*dpa_perf));

Hello,

I think you meant dpa_perf instead of &dpa_perf, right?

diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 5c93bf9d5253..7091619f12a9 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -272,7 +272,7 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)

 static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
 {
-       memset(&dpa_perf, 0, sizeof(*dpa_perf));
+       memset(dpa_perf, 0, sizeof(*dpa_perf));
        dpa_perf->qos_class = CXL_QOS_CLASS_INVALID;
 }

Thanks,
Wonjae

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* RE: [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf'
  2024-02-02  4:21   ` [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf' Wonjae Lee
@ 2024-02-02  5:28     ` Dan Williams
  2024-02-02 15:40       ` Dave Jiang
  2024-02-02 15:38     ` Dave Jiang
  1 sibling, 1 reply; 14+ messages in thread
From: Dan Williams @ 2024-02-02  5:28 UTC (permalink / raw)
  To: Wonjae Lee, Dave Jiang, linux-cxl@vger.kernel.org
  Cc: dan.j.williams@intel.com, ira.weiny@intel.com,
	vishal.l.verma@intel.com, alison.schofield@intel.com,
	Jonathan.Cameron@huawei.com, dave@stgolabs.net, KyungSan Kim,
	Hojin Nam

Wonjae Lee wrote:
> On Thu, Feb 01, 2024 at 02:47:29PM -0700, Dave Jiang wrote:
> > In order to address the issue with being able to expose qos_class sysfs
> > attributes under 'ram' and 'pmem' sub-directories, the attributes must
> > be defined as static attributes rather than under driver->dev_groups.
> > To avoid implementing locking for accessing the 'struct cxl_dpa_perf`
> > lists, convert the list to a single 'struct cxl_dpa_perf' entry in
> > preparation to move the attributes to statically defined.
> >
> > While theoretically a partition may have multiple qos_class via CDAT, this
> > has not been encountered with testing on available hardware. The code is
> > simplified for now to not support the complex case until a use case is
> > needed to support that.
> >
> > Link: https://lore.kernel.org/linux-cxl/65b200ba228f_2d43c29468@dwillia2-mobl3.amr.corp.intel.com.notmuch/
> > Suggested-by: Dan Williams <dan.j.williams@intel.com>
> > Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> > ---
> > v3:
> > - Add to commit log about simplification (Dan)
> > - Remove check for dev->driver (Dan)
> > - Remove check for invalid qos_class (Dan)
> > ---
> >  drivers/cxl/core/cdat.c | 81 ++++++++++++-----------------------------
> >  drivers/cxl/core/mbox.c |  4 +-
> >  drivers/cxl/cxlmem.h    | 10 ++---
> >  drivers/cxl/mem.c       | 28 ++------------
> >  4 files changed, 33 insertions(+), 90 deletions(-)
> >
[..]
> > diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
> > index 6fe11546889f..55b82dfd794b 100644
> > --- a/drivers/cxl/core/cdat.c
> > +++ b/drivers/cxl/core/cdat.c
> > @@ -293,24 +270,25 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
> >   return 0;
> >  }
> >
> > +static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
> > +{
> > + memset(&dpa_perf, 0, sizeof(*dpa_perf));
> 
> Hello,
> 
> I think you meant dpa_perf instead of &dpa_perf, right?
> 
> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
> index 5c93bf9d5253..7091619f12a9 100644
> --- a/drivers/cxl/core/cdat.c
> +++ b/drivers/cxl/core/cdat.c
> @@ -272,7 +272,7 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
> 
>  static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
>  {
> -       memset(&dpa_perf, 0, sizeof(*dpa_perf));
> +       memset(dpa_perf, 0, sizeof(*dpa_perf));

Good catch!

...or even better kill this function and just do:

    *dpa_perf = { 0 };

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf'
  2024-02-02  4:21   ` [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf' Wonjae Lee
  2024-02-02  5:28     ` Dan Williams
@ 2024-02-02 15:38     ` Dave Jiang
  1 sibling, 0 replies; 14+ messages in thread
From: Dave Jiang @ 2024-02-02 15:38 UTC (permalink / raw)
  To: wj28.lee, linux-cxl@vger.kernel.org
  Cc: dan.j.williams@intel.com, ira.weiny@intel.com,
	vishal.l.verma@intel.com, alison.schofield@intel.com,
	Jonathan.Cameron@huawei.com, dave@stgolabs.net, KyungSan Kim,
	Hojin Nam



On 2/1/24 21:21, Wonjae Lee wrote:
> On Thu, Feb 01, 2024 at 02:47:29PM -0700, Dave Jiang wrote:
>> In order to address the issue with being able to expose qos_class sysfs
>> attributes under 'ram' and 'pmem' sub-directories, the attributes must
>> be defined as static attributes rather than under driver->dev_groups.
>> To avoid implementing locking for accessing the 'struct cxl_dpa_perf`
>> lists, convert the list to a single 'struct cxl_dpa_perf' entry in
>> preparation to move the attributes to statically defined.
>>
>> While theoretically a partition may have multiple qos_class via CDAT, this
>> has not been encountered with testing on available hardware. The code is
>> simplified for now to not support the complex case until a use case is
>> needed to support that.
>>
>> Link: https://lore.kernel.org/linux-cxl/65b200ba228f_2d43c29468@dwillia2-mobl3.amr.corp.intel.com.notmuch/
>> Suggested-by: Dan Williams <dan.j.williams@intel.com>
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>> ---
>> v3:
>> - Add to commit log about simplification (Dan)
>> - Remove check for dev->driver (Dan)
>> - Remove check for invalid qos_class (Dan)
>> ---
>>  drivers/cxl/core/cdat.c | 81 ++++++++++++-----------------------------
>>  drivers/cxl/core/mbox.c |  4 +-
>>  drivers/cxl/cxlmem.h    | 10 ++---
>>  drivers/cxl/mem.c       | 28 ++------------
>>  4 files changed, 33 insertions(+), 90 deletions(-)
>>
>> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
>> index 6fe11546889f..55b82dfd794b 100644
>> --- a/drivers/cxl/core/cdat.c
>> +++ b/drivers/cxl/core/cdat.c
>> @@ -210,19 +210,12 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
>>   return 0;
>>  }
>>
>> -static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
>> -            struct list_head *list)
>> +static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
>> +               struct cxl_dpa_perf *dpa_perf)
>>  {
>> - struct cxl_dpa_perf *dpa_perf;
>> -
>> - dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
>> - if (!dpa_perf)
>> -     return;
>> -
>>   dpa_perf->dpa_range = dent->dpa_range;
>>   dpa_perf->coord = dent->coord;
>>   dpa_perf->qos_class = dent->qos_class;
>> - list_add_tail(&dpa_perf->list, list);
>>   dev_dbg(dev,
>>       "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
>>       dent->dpa_range.start, dpa_perf->qos_class,
>> @@ -230,20 +223,6 @@ static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
>>       dent->coord.read_latency, dent->coord.write_latency);
>>  }
>>
>> -static void free_perf_ents(void *data)
>> -{
>> - struct cxl_memdev_state *mds = data;
>> - struct cxl_dpa_perf *dpa_perf, *n;
>> - LIST_HEAD(discard);
>> -
>> - list_splice_tail_init(&mds->ram_perf_list, &discard);
>> - list_splice_tail_init(&mds->pmem_perf_list, &discard);
>> - list_for_each_entry_safe(dpa_perf, n, &discard, list) {
>> -     list_del(&dpa_perf->list);
>> -     kfree(dpa_perf);
>> - }
>> -}
>> -
>>  static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
>>                    struct xarray *dsmas_xa)
>>  {
>> @@ -263,16 +242,14 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
>>   xa_for_each(dsmas_xa, index, dent) {
>>       if (resource_size(&cxlds->ram_res) &&
>>           range_contains(&ram_range, &dent->dpa_range))
>> -         add_perf_entry(dev, dent, &mds->ram_perf_list);
>> +         update_perf_entry(dev, dent, &mds->ram_perf);
>>       else if (resource_size(&cxlds->pmem_res) &&
>>            range_contains(&pmem_range, &dent->dpa_range))
>> -         add_perf_entry(dev, dent, &mds->pmem_perf_list);
>> +         update_perf_entry(dev, dent, &mds->pmem_perf);
>>       else
>>           dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
>>               dent->dpa_range.start);
>>   }
>> -
>> - devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
>>  }
>>
>>  static int match_cxlrd_qos_class(struct device *dev, void *data)
>> @@ -293,24 +270,25 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
>>   return 0;
>>  }
>>
>> +static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
>> +{
>> + memset(&dpa_perf, 0, sizeof(*dpa_perf));
> 
> Hello,
> 
> I think you meant dpa_perf instead of &dpa_perf, right?

Yes thank you!

> 
> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
> index 5c93bf9d5253..7091619f12a9 100644
> --- a/drivers/cxl/core/cdat.c
> +++ b/drivers/cxl/core/cdat.c
> @@ -272,7 +272,7 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
> 
>  static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
>  {
> -       memset(&dpa_perf, 0, sizeof(*dpa_perf));
> +       memset(dpa_perf, 0, sizeof(*dpa_perf));
>         dpa_perf->qos_class = CXL_QOS_CLASS_INVALID;
>  }
> 
> Thanks,
> Wonjae

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf'
  2024-02-02  5:28     ` Dan Williams
@ 2024-02-02 15:40       ` Dave Jiang
  2024-02-02 15:51         ` Dave Jiang
  0 siblings, 1 reply; 14+ messages in thread
From: Dave Jiang @ 2024-02-02 15:40 UTC (permalink / raw)
  To: Dan Williams, Wonjae Lee, linux-cxl@vger.kernel.org
  Cc: ira.weiny@intel.com, vishal.l.verma@intel.com,
	alison.schofield@intel.com, Jonathan.Cameron@huawei.com,
	dave@stgolabs.net, KyungSan Kim, Hojin Nam



On 2/1/24 22:28, Dan Williams wrote:
> Wonjae Lee wrote:
>> On Thu, Feb 01, 2024 at 02:47:29PM -0700, Dave Jiang wrote:
>>> In order to address the issue with being able to expose qos_class sysfs
>>> attributes under 'ram' and 'pmem' sub-directories, the attributes must
>>> be defined as static attributes rather than under driver->dev_groups.
>>> To avoid implementing locking for accessing the 'struct cxl_dpa_perf`
>>> lists, convert the list to a single 'struct cxl_dpa_perf' entry in
>>> preparation to move the attributes to statically defined.
>>>
>>> While theoretically a partition may have multiple qos_class via CDAT, this
>>> has not been encountered with testing on available hardware. The code is
>>> simplified for now to not support the complex case until a use case is
>>> needed to support that.
>>>
>>> Link: https://lore.kernel.org/linux-cxl/65b200ba228f_2d43c29468@dwillia2-mobl3.amr.corp.intel.com.notmuch/
>>> Suggested-by: Dan Williams <dan.j.williams@intel.com>
>>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>>> ---
>>> v3:
>>> - Add to commit log about simplification (Dan)
>>> - Remove check for dev->driver (Dan)
>>> - Remove check for invalid qos_class (Dan)
>>> ---
>>>  drivers/cxl/core/cdat.c | 81 ++++++++++++-----------------------------
>>>  drivers/cxl/core/mbox.c |  4 +-
>>>  drivers/cxl/cxlmem.h    | 10 ++---
>>>  drivers/cxl/mem.c       | 28 ++------------
>>>  4 files changed, 33 insertions(+), 90 deletions(-)
>>>
> [..]
>>> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
>>> index 6fe11546889f..55b82dfd794b 100644
>>> --- a/drivers/cxl/core/cdat.c
>>> +++ b/drivers/cxl/core/cdat.c
>>> @@ -293,24 +270,25 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
>>>   return 0;
>>>  }
>>>
>>> +static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
>>> +{
>>> + memset(&dpa_perf, 0, sizeof(*dpa_perf));
>>
>> Hello,
>>
>> I think you meant dpa_perf instead of &dpa_perf, right?
>>
>> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
>> index 5c93bf9d5253..7091619f12a9 100644
>> --- a/drivers/cxl/core/cdat.c
>> +++ b/drivers/cxl/core/cdat.c
>> @@ -272,7 +272,7 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
>>
>>  static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
>>  {
>> -       memset(&dpa_perf, 0, sizeof(*dpa_perf));
>> +       memset(dpa_perf, 0, sizeof(*dpa_perf));
> 
> Good catch!
> 
> ...or even better kill this function and just do:
> 
>     *dpa_perf = { 0 };

We need to reinit the qos_class to -1 as well. 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf'
  2024-02-02 15:40       ` Dave Jiang
@ 2024-02-02 15:51         ` Dave Jiang
  2024-02-05 11:15           ` Jonathan Cameron
  2024-02-05 18:40           ` Dan Williams
  0 siblings, 2 replies; 14+ messages in thread
From: Dave Jiang @ 2024-02-02 15:51 UTC (permalink / raw)
  To: Dan Williams, Wonjae Lee, linux-cxl@vger.kernel.org
  Cc: ira.weiny@intel.com, vishal.l.verma@intel.com,
	alison.schofield@intel.com, Jonathan.Cameron@huawei.com,
	dave@stgolabs.net, KyungSan Kim, Hojin Nam



On 2/2/24 08:40, Dave Jiang wrote:
> 
> 
> On 2/1/24 22:28, Dan Williams wrote:
>> Wonjae Lee wrote:
>>> On Thu, Feb 01, 2024 at 02:47:29PM -0700, Dave Jiang wrote:
>>>> In order to address the issue with being able to expose qos_class sysfs
>>>> attributes under 'ram' and 'pmem' sub-directories, the attributes must
>>>> be defined as static attributes rather than under driver->dev_groups.
>>>> To avoid implementing locking for accessing the 'struct cxl_dpa_perf`
>>>> lists, convert the list to a single 'struct cxl_dpa_perf' entry in
>>>> preparation to move the attributes to statically defined.
>>>>
>>>> While theoretically a partition may have multiple qos_class via CDAT, this
>>>> has not been encountered with testing on available hardware. The code is
>>>> simplified for now to not support the complex case until a use case is
>>>> needed to support that.
>>>>
>>>> Link: https://lore.kernel.org/linux-cxl/65b200ba228f_2d43c29468@dwillia2-mobl3.amr.corp.intel.com.notmuch/
>>>> Suggested-by: Dan Williams <dan.j.williams@intel.com>
>>>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>>>> ---
>>>> v3:
>>>> - Add to commit log about simplification (Dan)
>>>> - Remove check for dev->driver (Dan)
>>>> - Remove check for invalid qos_class (Dan)
>>>> ---
>>>>  drivers/cxl/core/cdat.c | 81 ++++++++++++-----------------------------
>>>>  drivers/cxl/core/mbox.c |  4 +-
>>>>  drivers/cxl/cxlmem.h    | 10 ++---
>>>>  drivers/cxl/mem.c       | 28 ++------------
>>>>  4 files changed, 33 insertions(+), 90 deletions(-)
>>>>
>> [..]
>>>> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
>>>> index 6fe11546889f..55b82dfd794b 100644
>>>> --- a/drivers/cxl/core/cdat.c
>>>> +++ b/drivers/cxl/core/cdat.c
>>>> @@ -293,24 +270,25 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
>>>>   return 0;
>>>>  }
>>>>
>>>> +static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
>>>> +{
>>>> + memset(&dpa_perf, 0, sizeof(*dpa_perf));
>>>
>>> Hello,
>>>
>>> I think you meant dpa_perf instead of &dpa_perf, right?
>>>
>>> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
>>> index 5c93bf9d5253..7091619f12a9 100644
>>> --- a/drivers/cxl/core/cdat.c
>>> +++ b/drivers/cxl/core/cdat.c
>>> @@ -272,7 +272,7 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
>>>
>>>  static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
>>>  {
>>> -       memset(&dpa_perf, 0, sizeof(*dpa_perf));
>>> +       memset(dpa_perf, 0, sizeof(*dpa_perf));
>>
>> Good catch!
>>
>> ...or even better kill this function and just do:
>>
>>     *dpa_perf = { 0 };
> 
> We need to reinit the qos_class to -1 as well. 
>


This should do it right? The rest should be zeroed. 

	*dpa_perf = (struct cxl_dpa_perf) {
		.qos_class = CXL_QOS_CLASS_INVALID,
	};


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf'
  2024-02-02 15:51         ` Dave Jiang
@ 2024-02-05 11:15           ` Jonathan Cameron
  2024-02-05 18:40           ` Dan Williams
  1 sibling, 0 replies; 14+ messages in thread
From: Jonathan Cameron @ 2024-02-05 11:15 UTC (permalink / raw)
  To: Dave Jiang
  Cc: Dan Williams, Wonjae Lee, linux-cxl@vger.kernel.org,
	ira.weiny@intel.com, vishal.l.verma@intel.com,
	alison.schofield@intel.com, dave@stgolabs.net, KyungSan Kim,
	Hojin Nam

On Fri, 2 Feb 2024 08:51:01 -0700
Dave Jiang <dave.jiang@intel.com> wrote:

> On 2/2/24 08:40, Dave Jiang wrote:
> > 
> > 
> > On 2/1/24 22:28, Dan Williams wrote:  
> >> Wonjae Lee wrote:  
> >>> On Thu, Feb 01, 2024 at 02:47:29PM -0700, Dave Jiang wrote:  
> >>>> In order to address the issue with being able to expose qos_class sysfs
> >>>> attributes under 'ram' and 'pmem' sub-directories, the attributes must
> >>>> be defined as static attributes rather than under driver->dev_groups.
> >>>> To avoid implementing locking for accessing the 'struct cxl_dpa_perf`
> >>>> lists, convert the list to a single 'struct cxl_dpa_perf' entry in
> >>>> preparation to move the attributes to statically defined.
> >>>>
> >>>> While theoretically a partition may have multiple qos_class via CDAT, this
> >>>> has not been encountered with testing on available hardware. The code is
> >>>> simplified for now to not support the complex case until a use case is
> >>>> needed to support that.
> >>>>
> >>>> Link: https://lore.kernel.org/linux-cxl/65b200ba228f_2d43c29468@dwillia2-mobl3.amr.corp.intel.com.notmuch/
> >>>> Suggested-by: Dan Williams <dan.j.williams@intel.com>
> >>>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> >>>> ---
> >>>> v3:
> >>>> - Add to commit log about simplification (Dan)
> >>>> - Remove check for dev->driver (Dan)
> >>>> - Remove check for invalid qos_class (Dan)
> >>>> ---
> >>>>  drivers/cxl/core/cdat.c | 81 ++++++++++++-----------------------------
> >>>>  drivers/cxl/core/mbox.c |  4 +-
> >>>>  drivers/cxl/cxlmem.h    | 10 ++---
> >>>>  drivers/cxl/mem.c       | 28 ++------------
> >>>>  4 files changed, 33 insertions(+), 90 deletions(-)
> >>>>  
> >> [..]  
> >>>> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
> >>>> index 6fe11546889f..55b82dfd794b 100644
> >>>> --- a/drivers/cxl/core/cdat.c
> >>>> +++ b/drivers/cxl/core/cdat.c
> >>>> @@ -293,24 +270,25 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
> >>>>   return 0;
> >>>>  }
> >>>>
> >>>> +static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
> >>>> +{
> >>>> + memset(&dpa_perf, 0, sizeof(*dpa_perf));  
> >>>
> >>> Hello,
> >>>
> >>> I think you meant dpa_perf instead of &dpa_perf, right?
> >>>
> >>> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
> >>> index 5c93bf9d5253..7091619f12a9 100644
> >>> --- a/drivers/cxl/core/cdat.c
> >>> +++ b/drivers/cxl/core/cdat.c
> >>> @@ -272,7 +272,7 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
> >>>
> >>>  static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
> >>>  {
> >>> -       memset(&dpa_perf, 0, sizeof(*dpa_perf));
> >>> +       memset(dpa_perf, 0, sizeof(*dpa_perf));  
> >>
> >> Good catch!
> >>
> >> ...or even better kill this function and just do:
> >>
> >>     *dpa_perf = { 0 };  
> > 
> > We need to reinit the qos_class to -1 as well. 
> >  
> 
> 
> This should do it right? The rest should be zeroed. 
> 
> 	*dpa_perf = (struct cxl_dpa_perf) {
> 		.qos_class = CXL_QOS_CLASS_INVALID,
> 	};
> 

yes

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf'
  2024-02-01 21:47 ` [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf' Dave Jiang
                     ` (2 preceding siblings ...)
  2024-02-02  4:21   ` [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf' Wonjae Lee
@ 2024-02-05 11:32   ` Jonathan Cameron
  2024-02-05 18:05     ` Dave Jiang
  3 siblings, 1 reply; 14+ messages in thread
From: Jonathan Cameron @ 2024-02-05 11:32 UTC (permalink / raw)
  To: Dave Jiang
  Cc: linux-cxl, dan.j.williams, ira.weiny, vishal.l.verma,
	alison.schofield, dave

On Thu, 1 Feb 2024 14:47:29 -0700
Dave Jiang <dave.jiang@intel.com> wrote:

> In order to address the issue with being able to expose qos_class sysfs
> attributes under 'ram' and 'pmem' sub-directories, the attributes must
> be defined as static attributes rather than under driver->dev_groups.
> To avoid implementing locking for accessing the 'struct cxl_dpa_perf`
> lists, convert the list to a single 'struct cxl_dpa_perf' entry in
> preparation to move the attributes to statically defined.
> 
> While theoretically a partition may have multiple qos_class via CDAT, this
> has not been encountered with testing on available hardware. The code is
> simplified for now to not support the complex case until a use case is
> needed to support that.
> 
> Link: https://lore.kernel.org/linux-cxl/65b200ba228f_2d43c29468@dwillia2-mobl3.amr.corp.intel.com.notmuch/
> Suggested-by: Dan Williams <dan.j.williams@intel.com>
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
A few comments inline.

Jonathan

> ---
> v3:
> - Add to commit log about simplification (Dan)
> - Remove check for dev->driver (Dan)
> - Remove check for invalid qos_class (Dan)
> ---
>  drivers/cxl/core/cdat.c | 81 ++++++++++++-----------------------------
>  drivers/cxl/core/mbox.c |  4 +-
>  drivers/cxl/cxlmem.h    | 10 ++---
>  drivers/cxl/mem.c       | 28 ++------------
>  4 files changed, 33 insertions(+), 90 deletions(-)
> 
> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
> index 6fe11546889f..55b82dfd794b 100644
> --- a/drivers/cxl/core/cdat.c

>  static void cxl_qos_match(struct cxl_port *root_port,
> -			  struct list_head *work_list,
> -			  struct list_head *discard_list)
> +			  struct cxl_dpa_perf *dpa_perf)
>  {
> -	struct cxl_dpa_perf *dpa_perf, *n;
> +	int rc;
>  
> -	list_for_each_entry_safe(dpa_perf, n, work_list, list) {
> -		int rc;
> +	if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
> +		return;
>  
> -		if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
> -			return;
> -
> -		rc = device_for_each_child(&root_port->dev,
> -					   (void *)&dpa_perf->qos_class,
> -					   match_cxlrd_qos_class);
> -		if (!rc)
> -			list_move_tail(&dpa_perf->list, discard_list);
> -	}
> +	rc = device_for_each_child(&root_port->dev,

Over aggressive wrap.

> +				   &dpa_perf->qos_class,
> +				   match_cxlrd_qos_class);
> +	if (!rc)
> +		reset_dpa_perf(dpa_perf);

I'm not particularly keen on a function that on failure to match resets
some internal state in one of it's inputs.
Would prefer to see this return a bool then the caller decide to reset it.

>  }
>  
>  static int match_cxlrd_hb(struct device *dev, void *data)
> @@ -334,23 +312,10 @@ static int match_cxlrd_hb(struct device *dev, void *data)
>  	return 0;
>  }
>  

>  static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
>  {
>  	struct cxl_dev_state *cxlds = cxlmd->cxlds;
>  	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
> -	LIST_HEAD(__discard);
> -	struct list_head *discard __free(dpa_perf) = &__discard;
>  	struct cxl_port *root_port;
>  	int rc;
>  
> @@ -363,16 +328,16 @@ static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
>  	root_port = &cxl_root->port;
>  
>  	/* Check that the QTG IDs are all sane between end device and root decoders */
> -	cxl_qos_match(root_port, &mds->ram_perf_list, discard);
> -	cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
> +	cxl_qos_match(root_port, &mds->ram_perf);
> +	cxl_qos_match(root_port, &mds->pmem_perf);
>  
>  	/* Check to make sure that the device's host bridge is under a root decoder */
>  	rc = device_for_each_child(&root_port->dev,
>  				   (void *)cxlmd->endpoint->host_bridge,

Why is the explicit void * cast needed?  It's not removing const or anything like
that so usual c rules on it being fine to implicitly cast to void * should apply.


>  				   match_cxlrd_hb);
>  	if (!rc) {
> -		list_splice_tail_init(&mds->ram_perf_list, discard);
> -		list_splice_tail_init(&mds->pmem_perf_list, discard);
> +		reset_dpa_perf(&mds->ram_perf);
> +		reset_dpa_perf(&mds->pmem_perf);
>  	}
>  
>  	return rc;

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 2/3] cxl: Fix sysfs export of qos_class for memdev
  2024-02-01 21:47   ` [PATCH v3 2/3] cxl: Fix sysfs export of qos_class for memdev Dave Jiang
@ 2024-02-05 11:40     ` Jonathan Cameron
  2024-02-05 18:23       ` Dave Jiang
  0 siblings, 1 reply; 14+ messages in thread
From: Jonathan Cameron @ 2024-02-05 11:40 UTC (permalink / raw)
  To: Dave Jiang
  Cc: linux-cxl, dan.j.williams, ira.weiny, vishal.l.verma,
	alison.schofield, dave

On Thu, 1 Feb 2024 14:47:30 -0700
Dave Jiang <dave.jiang@intel.com> wrote:

> Current implementation exports only to
> /sys/bus/cxl/devices/.../memN/qos_class. With both ram and pmem exposed,
> the second registered sysfs attribute is rejected as duplicate. It's not
> possible to create qos_class under the dev_groups via the driver due to
> the ram and pmem sysfs sub-directories already created by the device sysfs
> groups. Move the ram and pmem qos_class to the device sysfs groups and add
> a call to sysfs_update() after the perf data are validated so the
> qos_class can be visible. The end results should be
> /sys/bus/cxl/devices/.../memN/ram/qos_class and
> /sys/bus/cxl/devices/.../memN/pmem/qos_class.
> 
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Some comments inline.  Only think I really care about though is a comment
in the code to say why sysfs_update_groups() is not appropriate.

> ---
> v3:
> - Updated based on pervious patch changes
> ---
>  drivers/cxl/core/cdat.c   |  1 +
>  drivers/cxl/core/memdev.c | 71 +++++++++++++++++++++++++++++++++++++++
>  drivers/cxl/cxl.h         |  2 ++
>  drivers/cxl/mem.c         | 36 --------------------
>  4 files changed, 74 insertions(+), 36 deletions(-)
> 
> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
> index 55b82dfd794b..5c93bf9d5253 100644
> --- a/drivers/cxl/core/cdat.c
> +++ b/drivers/cxl/core/cdat.c
> @@ -382,6 +382,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
>  
>  	cxl_memdev_set_qos_class(cxlds, dsmas_xa);
>  	cxl_qos_class_verify(cxlmd);
> +	cxl_memdev_update_attribute_groups(cxlmd);
>  }
>  EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
>  
> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> index dae8802ecdb0..ed85096a33fb 100644
> --- a/drivers/cxl/core/memdev.c
> +++ b/drivers/cxl/core/memdev.c
> @@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = {
>  	NULL,
>  };
>  
> +static ssize_t pmem_qos_class_show(struct device *dev,
> +				   struct device_attribute *attr, char *buf)
> +{
> +	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
> +	struct cxl_dev_state *cxlds = cxlmd->cxlds;
> +	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
> +
> +	return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
> +}
> +
> +static struct device_attribute dev_attr_pmem_qos_class =
> +	__ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
> +
>  static struct attribute *cxl_memdev_pmem_attributes[] = {
>  	&dev_attr_pmem_size.attr,
> +	&dev_attr_pmem_qos_class.attr,
>  	NULL,
>  };
>  
> +static ssize_t ram_qos_class_show(struct device *dev,
> +				  struct device_attribute *attr, char *buf)
> +{
> +	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
> +	struct cxl_dev_state *cxlds = cxlmd->cxlds;
> +	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
> +
> +	return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
> +}
> +
> +static struct device_attribute dev_attr_ram_qos_class =
> +	__ATTR(qos_class, 0444, ram_qos_class_show, NULL);
> +
>  static struct attribute *cxl_memdev_ram_attributes[] = {
>  	&dev_attr_ram_size.attr,
> +	&dev_attr_ram_qos_class.attr,
>  	NULL,
>  };
>  
> @@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = {
>  	.is_visible = cxl_memdev_visible,
>  };
>  
> +static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
> +{
> +	struct device *dev = kobj_to_dev(kobj);
> +	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
> +	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
> +
> +	if (a == &dev_attr_ram_qos_class.attr)
> +		if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
> +			return 0;
> +
> +	return a->mode;
> +}
> +
>  static struct attribute_group cxl_memdev_ram_attribute_group = {
>  	.name = "ram",
>  	.attrs = cxl_memdev_ram_attributes,
> +	.is_visible = cxl_ram_visible,
>  };
>  
> +static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
> +{
> +	struct device *dev = kobj_to_dev(kobj);
> +	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
> +	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
> +
> +	if (a == &dev_attr_pmem_qos_class.attr)
> +		if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
> +			return 0;
> +
> +	return a->mode;
> +}
> +
>  static struct attribute_group cxl_memdev_pmem_attribute_group = {
>  	.name = "pmem",
>  	.attrs = cxl_memdev_pmem_attributes,
> +	.is_visible = cxl_pmem_visible,
>  };
>  
>  static umode_t cxl_memdev_security_visible(struct kobject *kobj,
> @@ -519,6 +575,21 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
>  	NULL,
>  };
>  
> +void cxl_memdev_update_attribute_groups(struct cxl_memdev *cxlmd)
> +{
> +	const struct attribute_group *grp = cxl_memdev_attribute_groups[0];
> +
> +	for (int i = 0; grp; i++) {
> +		int rc = sysfs_update_group(&cxlmd->dev.kobj, grp);
> +
> +		if (rc)
> +			dev_dbg(&cxlmd->dev,
> +				"Unable to update memdev attribute group.\n");
> +		grp = cxl_memdev_attribute_groups[i + 1];
We don't need i explicitly so maybe cleaner as something like.

	for (const struct attribute_group **grp = &cxl_memdev_attribute_groups[0];
	     grp; grp++)
		if (sysfs_update_group(&cxlmd->dev.kobj, *grp)
			dev_dbg(....)

Mind you I'm not that convinced, so fine if you want to stick with current code :)

I'd also like a comment on why sysfs_update_groups() is not appropriate here.

> +	}
> +}
> +EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_attribute_groups, CXL);
> +
>  static const struct device_type cxl_memdev_type = {
>  	.name = "cxl_memdev",
>  	.release = cxl_memdev_release,

>  


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf'
  2024-02-05 11:32   ` Jonathan Cameron
@ 2024-02-05 18:05     ` Dave Jiang
  0 siblings, 0 replies; 14+ messages in thread
From: Dave Jiang @ 2024-02-05 18:05 UTC (permalink / raw)
  To: Jonathan Cameron
  Cc: linux-cxl, dan.j.williams, ira.weiny, vishal.l.verma,
	alison.schofield, dave



On 2/5/24 4:32 AM, Jonathan Cameron wrote:
> On Thu, 1 Feb 2024 14:47:29 -0700
> Dave Jiang <dave.jiang@intel.com> wrote:
> 
>> In order to address the issue with being able to expose qos_class sysfs
>> attributes under 'ram' and 'pmem' sub-directories, the attributes must
>> be defined as static attributes rather than under driver->dev_groups.
>> To avoid implementing locking for accessing the 'struct cxl_dpa_perf`
>> lists, convert the list to a single 'struct cxl_dpa_perf' entry in
>> preparation to move the attributes to statically defined.
>>
>> While theoretically a partition may have multiple qos_class via CDAT, this
>> has not been encountered with testing on available hardware. The code is
>> simplified for now to not support the complex case until a use case is
>> needed to support that.
>>
>> Link: https://lore.kernel.org/linux-cxl/65b200ba228f_2d43c29468@dwillia2-mobl3.amr.corp.intel.com.notmuch/
>> Suggested-by: Dan Williams <dan.j.williams@intel.com>
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> A few comments inline.
> 
> Jonathan
> 
>> ---
>> v3:
>> - Add to commit log about simplification (Dan)
>> - Remove check for dev->driver (Dan)
>> - Remove check for invalid qos_class (Dan)
>> ---
>>  drivers/cxl/core/cdat.c | 81 ++++++++++++-----------------------------
>>  drivers/cxl/core/mbox.c |  4 +-
>>  drivers/cxl/cxlmem.h    | 10 ++---
>>  drivers/cxl/mem.c       | 28 ++------------
>>  4 files changed, 33 insertions(+), 90 deletions(-)
>>
>> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
>> index 6fe11546889f..55b82dfd794b 100644
>> --- a/drivers/cxl/core/cdat.c
> 
>>  static void cxl_qos_match(struct cxl_port *root_port,
>> -			  struct list_head *work_list,
>> -			  struct list_head *discard_list)
>> +			  struct cxl_dpa_perf *dpa_perf)
>>  {
>> -	struct cxl_dpa_perf *dpa_perf, *n;
>> +	int rc;
>>  
>> -	list_for_each_entry_safe(dpa_perf, n, work_list, list) {
>> -		int rc;
>> +	if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
>> +		return;
>>  
>> -		if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
>> -			return;
>> -
>> -		rc = device_for_each_child(&root_port->dev,
>> -					   (void *)&dpa_perf->qos_class,
>> -					   match_cxlrd_qos_class);
>> -		if (!rc)
>> -			list_move_tail(&dpa_perf->list, discard_list);
>> -	}
>> +	rc = device_for_each_child(&root_port->dev,
> 
> Over aggressive wrap.

Will fix

> 
>> +				   &dpa_perf->qos_class,
>> +				   match_cxlrd_qos_class);
>> +	if (!rc)
>> +		reset_dpa_perf(dpa_perf);
> 
> I'm not particularly keen on a function that on failure to match resets
> some internal state in one of it's inputs.
> Would prefer to see this return a bool then the caller decide to reset it.

Ok I'll change.
> 
>>  }
>>  
>>  static int match_cxlrd_hb(struct device *dev, void *data)
>> @@ -334,23 +312,10 @@ static int match_cxlrd_hb(struct device *dev, void *data)
>>  	return 0;
>>  }
>>  
> 
>>  static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
>>  {
>>  	struct cxl_dev_state *cxlds = cxlmd->cxlds;
>>  	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
>> -	LIST_HEAD(__discard);
>> -	struct list_head *discard __free(dpa_perf) = &__discard;
>>  	struct cxl_port *root_port;
>>  	int rc;
>>  
>> @@ -363,16 +328,16 @@ static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
>>  	root_port = &cxl_root->port;
>>  
>>  	/* Check that the QTG IDs are all sane between end device and root decoders */
>> -	cxl_qos_match(root_port, &mds->ram_perf_list, discard);
>> -	cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
>> +	cxl_qos_match(root_port, &mds->ram_perf);
>> +	cxl_qos_match(root_port, &mds->pmem_perf);
>>  
>>  	/* Check to make sure that the device's host bridge is under a root decoder */
>>  	rc = device_for_each_child(&root_port->dev,
>>  				   (void *)cxlmd->endpoint->host_bridge,
> 
> Why is the explicit void * cast needed?  It's not removing const or anything like
> that so usual c rules on it being fine to implicitly cast to void * should apply.

I'll fix that in a different patch.

> 
> 
>>  				   match_cxlrd_hb);
>>  	if (!rc) {
>> -		list_splice_tail_init(&mds->ram_perf_list, discard);
>> -		list_splice_tail_init(&mds->pmem_perf_list, discard);
>> +		reset_dpa_perf(&mds->ram_perf);
>> +		reset_dpa_perf(&mds->pmem_perf);
>>  	}
>>  
>>  	return rc;

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 2/3] cxl: Fix sysfs export of qos_class for memdev
  2024-02-05 11:40     ` Jonathan Cameron
@ 2024-02-05 18:23       ` Dave Jiang
  0 siblings, 0 replies; 14+ messages in thread
From: Dave Jiang @ 2024-02-05 18:23 UTC (permalink / raw)
  To: Jonathan Cameron
  Cc: linux-cxl, dan.j.williams, ira.weiny, vishal.l.verma,
	alison.schofield, dave



On 2/5/24 4:40 AM, Jonathan Cameron wrote:
> On Thu, 1 Feb 2024 14:47:30 -0700
> Dave Jiang <dave.jiang@intel.com> wrote:
> 
>> Current implementation exports only to
>> /sys/bus/cxl/devices/.../memN/qos_class. With both ram and pmem exposed,
>> the second registered sysfs attribute is rejected as duplicate. It's not
>> possible to create qos_class under the dev_groups via the driver due to
>> the ram and pmem sysfs sub-directories already created by the device sysfs
>> groups. Move the ram and pmem qos_class to the device sysfs groups and add
>> a call to sysfs_update() after the perf data are validated so the
>> qos_class can be visible. The end results should be
>> /sys/bus/cxl/devices/.../memN/ram/qos_class and
>> /sys/bus/cxl/devices/.../memN/pmem/qos_class.
>>
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> Some comments inline.  Only think I really care about though is a comment
> in the code to say why sysfs_update_groups() is not appropriate.

Didn't realize that helper existed. I'll move over to using that. 
DJ

> 
>> ---
>> v3:
>> - Updated based on pervious patch changes
>> ---
>>  drivers/cxl/core/cdat.c   |  1 +
>>  drivers/cxl/core/memdev.c | 71 +++++++++++++++++++++++++++++++++++++++
>>  drivers/cxl/cxl.h         |  2 ++
>>  drivers/cxl/mem.c         | 36 --------------------
>>  4 files changed, 74 insertions(+), 36 deletions(-)
>>
>> diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
>> index 55b82dfd794b..5c93bf9d5253 100644
>> --- a/drivers/cxl/core/cdat.c
>> +++ b/drivers/cxl/core/cdat.c
>> @@ -382,6 +382,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
>>  
>>  	cxl_memdev_set_qos_class(cxlds, dsmas_xa);
>>  	cxl_qos_class_verify(cxlmd);
>> +	cxl_memdev_update_attribute_groups(cxlmd);
>>  }
>>  EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
>>  
>> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
>> index dae8802ecdb0..ed85096a33fb 100644
>> --- a/drivers/cxl/core/memdev.c
>> +++ b/drivers/cxl/core/memdev.c
>> @@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = {
>>  	NULL,
>>  };
>>  
>> +static ssize_t pmem_qos_class_show(struct device *dev,
>> +				   struct device_attribute *attr, char *buf)
>> +{
>> +	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
>> +	struct cxl_dev_state *cxlds = cxlmd->cxlds;
>> +	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
>> +
>> +	return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
>> +}
>> +
>> +static struct device_attribute dev_attr_pmem_qos_class =
>> +	__ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
>> +
>>  static struct attribute *cxl_memdev_pmem_attributes[] = {
>>  	&dev_attr_pmem_size.attr,
>> +	&dev_attr_pmem_qos_class.attr,
>>  	NULL,
>>  };
>>  
>> +static ssize_t ram_qos_class_show(struct device *dev,
>> +				  struct device_attribute *attr, char *buf)
>> +{
>> +	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
>> +	struct cxl_dev_state *cxlds = cxlmd->cxlds;
>> +	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
>> +
>> +	return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
>> +}
>> +
>> +static struct device_attribute dev_attr_ram_qos_class =
>> +	__ATTR(qos_class, 0444, ram_qos_class_show, NULL);
>> +
>>  static struct attribute *cxl_memdev_ram_attributes[] = {
>>  	&dev_attr_ram_size.attr,
>> +	&dev_attr_ram_qos_class.attr,
>>  	NULL,
>>  };
>>  
>> @@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = {
>>  	.is_visible = cxl_memdev_visible,
>>  };
>>  
>> +static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
>> +{
>> +	struct device *dev = kobj_to_dev(kobj);
>> +	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
>> +	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
>> +
>> +	if (a == &dev_attr_ram_qos_class.attr)
>> +		if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
>> +			return 0;
>> +
>> +	return a->mode;
>> +}
>> +
>>  static struct attribute_group cxl_memdev_ram_attribute_group = {
>>  	.name = "ram",
>>  	.attrs = cxl_memdev_ram_attributes,
>> +	.is_visible = cxl_ram_visible,
>>  };
>>  
>> +static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
>> +{
>> +	struct device *dev = kobj_to_dev(kobj);
>> +	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
>> +	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
>> +
>> +	if (a == &dev_attr_pmem_qos_class.attr)
>> +		if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
>> +			return 0;
>> +
>> +	return a->mode;
>> +}
>> +
>>  static struct attribute_group cxl_memdev_pmem_attribute_group = {
>>  	.name = "pmem",
>>  	.attrs = cxl_memdev_pmem_attributes,
>> +	.is_visible = cxl_pmem_visible,
>>  };
>>  
>>  static umode_t cxl_memdev_security_visible(struct kobject *kobj,
>> @@ -519,6 +575,21 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
>>  	NULL,
>>  };
>>  
>> +void cxl_memdev_update_attribute_groups(struct cxl_memdev *cxlmd)
>> +{
>> +	const struct attribute_group *grp = cxl_memdev_attribute_groups[0];
>> +
>> +	for (int i = 0; grp; i++) {
>> +		int rc = sysfs_update_group(&cxlmd->dev.kobj, grp);
>> +
>> +		if (rc)
>> +			dev_dbg(&cxlmd->dev,
>> +				"Unable to update memdev attribute group.\n");
>> +		grp = cxl_memdev_attribute_groups[i + 1];
> We don't need i explicitly so maybe cleaner as something like.
> 
> 	for (const struct attribute_group **grp = &cxl_memdev_attribute_groups[0];
> 	     grp; grp++)
> 		if (sysfs_update_group(&cxlmd->dev.kobj, *grp)
> 			dev_dbg(....)
> 
> Mind you I'm not that convinced, so fine if you want to stick with current code :)
> 
> I'd also like a comment on why sysfs_update_groups() is not appropriate here.
> 
>> +	}
>> +}
>> +EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_attribute_groups, CXL);
>> +
>>  static const struct device_type cxl_memdev_type = {
>>  	.name = "cxl_memdev",
>>  	.release = cxl_memdev_release,
> 
>>  
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf'
  2024-02-02 15:51         ` Dave Jiang
  2024-02-05 11:15           ` Jonathan Cameron
@ 2024-02-05 18:40           ` Dan Williams
  1 sibling, 0 replies; 14+ messages in thread
From: Dan Williams @ 2024-02-05 18:40 UTC (permalink / raw)
  To: Dave Jiang, Dan Williams, Wonjae Lee, linux-cxl@vger.kernel.org
  Cc: ira.weiny@intel.com, vishal.l.verma@intel.com,
	alison.schofield@intel.com, Jonathan.Cameron@huawei.com,
	dave@stgolabs.net, KyungSan Kim, Hojin Nam

Dave Jiang wrote:
> >>
> >> Good catch!
> >>
> >> ...or even better kill this function and just do:
> >>
> >>     *dpa_perf = { 0 };
> > 
> > We need to reinit the qos_class to -1 as well. 
> >
> 
> 
> This should do it right? The rest should be zeroed. 
> 
> 	*dpa_perf = (struct cxl_dpa_perf) {
> 		.qos_class = CXL_QOS_CLASS_INVALID,
> 	};
> 

Yup, was about to send the same.

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2024-02-05 18:41 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <CGME20240201214822epcas2p3062089e1281f483fb26eea3c80a71475@epcms2p3>
2024-02-01 21:47 ` [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf' Dave Jiang
2024-02-01 21:47   ` [PATCH v3 2/3] cxl: Fix sysfs export of qos_class for memdev Dave Jiang
2024-02-05 11:40     ` Jonathan Cameron
2024-02-05 18:23       ` Dave Jiang
2024-02-01 21:47   ` [PATCH v3 3/3] cxl/test: Add support for qos_class checking Dave Jiang
2024-02-02  4:21   ` [PATCH v3 1/3] cxl: Change 'struct cxl_memdev_state' *_perf_list to single 'struct cxl_dpa_perf' Wonjae Lee
2024-02-02  5:28     ` Dan Williams
2024-02-02 15:40       ` Dave Jiang
2024-02-02 15:51         ` Dave Jiang
2024-02-05 11:15           ` Jonathan Cameron
2024-02-05 18:40           ` Dan Williams
2024-02-02 15:38     ` Dave Jiang
2024-02-05 11:32   ` Jonathan Cameron
2024-02-05 18:05     ` Dave Jiang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox