public inbox for linux-cxl@vger.kernel.org
 help / color / mirror / Atom feed
From: Dan Williams <dan.j.williams@intel.com>
To: dave.jiang@intel.com
Cc: patches@lists.linux.dev, linux-cxl@vger.kernel.org,
	alison.schofield@intel.com,
	Smita.KoralahalliChannabasappa@amd.com
Subject: [PATCH 6/9] dax/hmem: Fix singleton confusion between dax_hmem_work and hmem devices
Date: Thu, 26 Mar 2026 22:28:18 -0700	[thread overview]
Message-ID: <20260327052821.440749-7-dan.j.williams@intel.com> (raw)
In-Reply-To: <20260327052821.440749-1-dan.j.williams@intel.com>

dax_hmem (ab)uses a platform device to allow for a module to autoload in
the presence of "Soft Reserved" resources. The dax_hmem driver had no
dependencies on the "hmem_platform" device being a singleton until the
recent "dax_hmem vs dax_cxl" takeover solution.

Replace the layering violation of dax_hmem_work assuming that there will
never be more than one "hmem_platform" device associated with a global work
item with a dax_hmem local workqueue that can theoretically support any
number of hmem_platform devices.

Fixup the reference counting to only pin the device while it is live in the
queue.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/dax/bus.h         |  15 +++++-
 drivers/dax/hmem/device.c |  28 ++++++----
 drivers/dax/hmem/hmem.c   | 108 +++++++++++++++++++-------------------
 3 files changed, 85 insertions(+), 66 deletions(-)

diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index ebbfe2d6da14..7b1a83f1ce1f 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -3,7 +3,9 @@
 #ifndef __DAX_BUS_H__
 #define __DAX_BUS_H__
 #include <linux/device.h>
+#include <linux/platform_device.h>
 #include <linux/range.h>
+#include <linux/workqueue.h>
 
 struct dev_dax;
 struct resource;
@@ -49,8 +51,19 @@ void dax_driver_unregister(struct dax_device_driver *dax_drv);
 void kill_dev_dax(struct dev_dax *dev_dax);
 bool static_dev_dax(struct dev_dax *dev_dax);
 
+struct hmem_platform_device {
+	struct platform_device pdev;
+	struct work_struct work;
+	bool did_probe;
+};
+
+static inline struct hmem_platform_device *
+to_hmem_platform_device(struct platform_device *pdev)
+{
+	return container_of(pdev, struct hmem_platform_device, pdev);
+}
+
 #if IS_ENABLED(CONFIG_DEV_DAX_HMEM)
-extern bool dax_hmem_initial_probe;
 void dax_hmem_flush_work(void);
 #else
 static inline void dax_hmem_flush_work(void) { }
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index 675d56276d78..d70359b4307b 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -4,13 +4,11 @@
 #include <linux/module.h>
 #include <linux/dax.h>
 #include <linux/mm.h>
+#include "../bus.h"
 
 static bool nohmem;
 module_param_named(disable, nohmem, bool, 0444);
 
-bool dax_hmem_initial_probe;
-EXPORT_SYMBOL_FOR_MODULES(dax_hmem_initial_probe, "dax_hmem");
-
 static bool platform_initialized;
 static DEFINE_MUTEX(hmem_resource_lock);
 static struct resource hmem_active = {
@@ -36,9 +34,21 @@ int walk_hmem_resources(struct device *host, walk_hmem_fn fn)
 }
 EXPORT_SYMBOL_GPL(walk_hmem_resources);
 
+static void hmem_work(struct work_struct *work)
+{
+	/* place holder until dax_hmem driver attaches */
+}
+
+static struct hmem_platform_device hmem_platform = {
+	.pdev = {
+		.name = "hmem_platform",
+		.id = 0,
+	},
+	.work = __WORK_INITIALIZER(hmem_platform.work, hmem_work),
+};
+
 static void __hmem_register_resource(int target_nid, struct resource *res)
 {
-	struct platform_device *pdev;
 	struct resource *new;
 	int rc;
 
@@ -54,17 +64,13 @@ static void __hmem_register_resource(int target_nid, struct resource *res)
 	if (platform_initialized)
 		return;
 
-	pdev = platform_device_alloc("hmem_platform", 0);
-	if (!pdev) {
+	rc = platform_device_register(&hmem_platform.pdev);
+	if (rc) {
 		pr_err_once("failed to register device-dax hmem_platform device\n");
 		return;
 	}
 
-	rc = platform_device_add(pdev);
-	if (rc)
-		platform_device_put(pdev);
-	else
-		platform_initialized = true;
+	platform_initialized = true;
 }
 
 void hmem_register_resource(int target_nid, struct resource *res)
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index dd3d7f93baee..e1dae83dae8d 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -59,20 +59,11 @@ static void release_hmem(void *pdev)
 	platform_device_unregister(pdev);
 }
 
-struct dax_defer_work {
-	struct platform_device *pdev;
-	struct work_struct work;
-};
-
-static void process_defer_work(struct work_struct *w);
-
-static struct dax_defer_work dax_hmem_work = {
-	.work = __WORK_INITIALIZER(dax_hmem_work.work, process_defer_work),
-};
+static struct workqueue_struct *dax_hmem_wq;
 
 void dax_hmem_flush_work(void)
 {
-	flush_work(&dax_hmem_work.work);
+	flush_workqueue(dax_hmem_wq);
 }
 EXPORT_SYMBOL_FOR_MODULES(dax_hmem_flush_work, "dax_cxl");
 
@@ -134,24 +125,6 @@ static int __hmem_register_device(struct device *host, int target_nid,
 	return rc;
 }
 
-static int hmem_register_device(struct device *host, int target_nid,
-				const struct resource *res)
-{
-	if (IS_ENABLED(CONFIG_DEV_DAX_CXL) &&
-	    region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
-			      IORES_DESC_CXL) != REGION_DISJOINT) {
-		if (!dax_hmem_initial_probe) {
-			dev_dbg(host, "await CXL initial probe: %pr\n", res);
-			queue_work(system_long_wq, &dax_hmem_work.work);
-			return 0;
-		}
-		dev_dbg(host, "deferring range to CXL: %pr\n", res);
-		return 0;
-	}
-
-	return __hmem_register_device(host, target_nid, res);
-}
-
 static int hmem_register_cxl_device(struct device *host, int target_nid,
 				    const struct resource *res)
 {
@@ -170,35 +143,55 @@ static int hmem_register_cxl_device(struct device *host, int target_nid,
 
 static void process_defer_work(struct work_struct *w)
 {
-	struct dax_defer_work *work = container_of(w, typeof(*work), work);
-	struct platform_device *pdev;
-
-	if (!work->pdev)
-		return;
-
-	pdev = work->pdev;
+	struct hmem_platform_device *hpdev = container_of(w, typeof(*hpdev), work);
+	struct device *dev = &hpdev->pdev.dev;
 
 	/* Relies on cxl_acpi and cxl_pci having had a chance to load */
 	wait_for_device_probe();
 
-	guard(device)(&pdev->dev);
-	if (!pdev->dev.driver)
-		return;
+	guard(device)(dev);
+	if (!dev->driver)
+		goto out;
 
-	if (!dax_hmem_initial_probe) {
-		dax_hmem_initial_probe = true;
-		walk_hmem_resources(&pdev->dev, hmem_register_cxl_device);
+	if (!hpdev->did_probe) {
+		hpdev->did_probe = true;
+		walk_hmem_resources(dev, hmem_register_cxl_device);
 	}
+out:
+	put_device(dev);
+}
+
+static int hmem_register_device(struct device *host, int target_nid,
+				const struct resource *res)
+{
+	struct platform_device *pdev = to_platform_device(host);
+	struct hmem_platform_device *hpdev = to_hmem_platform_device(pdev);
+
+	if (IS_ENABLED(CONFIG_DEV_DAX_CXL) &&
+	    region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
+			      IORES_DESC_CXL) != REGION_DISJOINT) {
+		if (!hpdev->did_probe) {
+			dev_dbg(host, "await CXL initial probe: %pr\n", res);
+			hpdev->work.func = process_defer_work;
+			get_device(host);
+			if (!queue_work(dax_hmem_wq, &hpdev->work))
+				put_device(host);
+			return 0;
+		}
+		dev_dbg(host, "deferring range to CXL: %pr\n", res);
+		return 0;
+	}
+
+	return __hmem_register_device(host, target_nid, res);
 }
 
 static int dax_hmem_platform_probe(struct platform_device *pdev)
 {
-	if (work_pending(&dax_hmem_work.work))
-		return -EBUSY;
+	struct hmem_platform_device *hpdev = to_hmem_platform_device(pdev);
 
-	if (!dax_hmem_work.pdev)
-		dax_hmem_work.pdev =
-			to_platform_device(get_device(&pdev->dev));
+	/* queue is only flushed on module unload, fail rebind with pending work */
+	if (work_pending(&hpdev->work))
+		return -EBUSY;
 
 	return walk_hmem_resources(&pdev->dev, hmem_register_device);
 }
@@ -224,26 +217,33 @@ static __init int dax_hmem_init(void)
 		request_module("cxl_pci");
 	}
 
+	dax_hmem_wq = alloc_ordered_workqueue("dax_hmem_wq", 0);
+	if (!dax_hmem_wq)
+		return -ENOMEM;
+
 	rc = platform_driver_register(&dax_hmem_platform_driver);
 	if (rc)
-		return rc;
+		goto err_platform_driver;
 
 	rc = platform_driver_register(&dax_hmem_driver);
 	if (rc)
-		platform_driver_unregister(&dax_hmem_platform_driver);
+		goto err_driver;
+
+	return 0;
+
+err_driver:
+	platform_driver_unregister(&dax_hmem_platform_driver);
+err_platform_driver:
+	destroy_workqueue(dax_hmem_wq);
 
 	return rc;
 }
 
 static __exit void dax_hmem_exit(void)
 {
-	if (dax_hmem_work.pdev) {
-		flush_work(&dax_hmem_work.work);
-		put_device(&dax_hmem_work.pdev->dev);
-	}
-
 	platform_driver_unregister(&dax_hmem_driver);
 	platform_driver_unregister(&dax_hmem_platform_driver);
+	destroy_workqueue(dax_hmem_wq);
 }
 
 module_init(dax_hmem_init);
-- 
2.53.0


  parent reply	other threads:[~2026-03-27  5:27 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-27  5:28 [PATCH 0/9] dax/hmem: Add tests for the dax_hmem takeover capability Dan Williams
2026-03-27  5:28 ` [PATCH 1/9] cxl/region: Fix use-after-free from auto assembly failure Dan Williams
2026-03-27 16:28   ` Dave Jiang
2026-03-27 19:20   ` Alison Schofield
2026-03-27 21:54     ` Dan Williams
2026-03-27 22:37       ` Alison Schofield
2026-03-27 23:43   ` Alison Schofield
2026-03-30 20:24   ` Ira Weiny
2026-03-27  5:28 ` [PATCH 2/9] dax/cxl: Fix HMEM dependencies Dan Williams
2026-03-27 16:29   ` Dave Jiang
2026-03-27 23:44   ` Alison Schofield
2026-03-30 21:10   ` Ira Weiny
2026-03-27  5:28 ` [PATCH 3/9] cxl/region: Limit visibility of cxl_region_contains_resource() Dan Williams
2026-03-27 16:39   ` Dave Jiang
2026-03-27 23:45   ` Alison Schofield
2026-03-30 22:19   ` Ira Weiny
2026-03-27  5:28 ` [PATCH 4/9] cxl/region: Constify cxl_region_resource_contains() Dan Williams
2026-03-27 16:40   ` Dave Jiang
2026-03-27 23:45   ` Alison Schofield
2026-03-30 22:22   ` Ira Weiny
2026-03-27  5:28 ` [PATCH 5/9] dax/hmem: Reduce visibility of dax_cxl coordination symbols Dan Williams
2026-03-27 16:46   ` Dave Jiang
2026-03-27 23:46   ` Alison Schofield
2026-03-30 22:26   ` Ira Weiny
2026-03-27  5:28 ` Dan Williams [this message]
2026-03-27 17:06   ` [PATCH 6/9] dax/hmem: Fix singleton confusion between dax_hmem_work and hmem devices Dave Jiang
2026-03-27 23:46   ` Alison Schofield
2026-03-31 17:32   ` Ira Weiny
2026-03-27  5:28 ` [PATCH 7/9] dax/hmem: Parent dax_hmem devices Dan Williams
2026-03-27 17:07   ` Dave Jiang
2026-03-27 23:47   ` Alison Schofield
2026-03-31 17:42   ` Ira Weiny
2026-03-27  5:28 ` [PATCH 8/9] tools/testing/cxl: Simulate auto-assembly failure Dan Williams
2026-03-27 17:08   ` Dave Jiang
2026-03-27 23:48   ` Alison Schofield
2026-03-31 17:43   ` Ira Weiny
2026-03-27  5:28 ` [PATCH 9/9] tools/testing/cxl: Test dax_hmem takeover of CXL regions Dan Williams
2026-03-27 17:10   ` Dave Jiang
2026-03-27 23:58   ` Alison Schofield
2026-03-28  3:20     ` Dan Williams
2026-03-31 17:57   ` Ira Weiny
2026-03-31 18:13   ` Alison Schofield
2026-03-27 23:42 ` [PATCH 0/9] dax/hmem: Add tests for the dax_hmem takeover capability Alison Schofield
2026-03-30 21:12 ` Koralahalli Channabasappa, Smita
2026-03-30 21:17   ` Dave Jiang
2026-03-31 21:57 ` Dave Jiang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260327052821.440749-7-dan.j.williams@intel.com \
    --to=dan.j.williams@intel.com \
    --cc=Smita.KoralahalliChannabasappa@amd.com \
    --cc=alison.schofield@intel.com \
    --cc=dave.jiang@intel.com \
    --cc=linux-cxl@vger.kernel.org \
    --cc=patches@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox