From: <smadhavan@nvidia.com>
To: <dave@stgolabs.net>, <jonathan.cameron@huawei.com>,
<dave.jiang@intel.com>, <alison.schofield@intel.com>,
<vishal.l.verma@intel.com>, <ira.weiny@intel.com>,
<dan.j.williams@intel.com>, <bhelgaas@google.com>,
<ming.li@zohomail.com>, <rrichter@amd.com>,
<Smita.KoralahalliChannabasappa@amd.com>,
<huaisheng.ye@intel.com>, <linux-cxl@vger.kernel.org>,
<linux-pci@vger.kernel.org>
Cc: <smadhavan@nvidia.com>, <vaslot@nvidia.com>, <vsethi@nvidia.com>,
<sdonthineni@nvidia.com>, <vidyas@nvidia.com>, <mochs@nvidia.com>,
<jsequeira@nvidia.com>
Subject: [PATCH v4 07/10] cxl: add host cache flush and multi-function reset
Date: Tue, 20 Jan 2026 22:26:07 +0000 [thread overview]
Message-ID: <20260120222610.2227109-8-smadhavan@nvidia.com> (raw)
In-Reply-To: <20260120222610.2227109-1-smadhavan@nvidia.com>
From: Srirangan Madhavan <smadhavan@nvidia.com>
Flush host CPU caches for mapped HDM ranges after teardown and prepare
sibling Type 2 functions on multi-function devices. The host cache
maintenance uses wbinvd_on_all_cpus() on x86 and VA-based PoC clean+
invalidate on arm64 via memremap() and on_each_cpu(), matching the
required ordering before reset.
Signed-off-by: Srirangan Madhavan <smadhavan@nvidia.com>
---
drivers/cxl/pci.c | 150 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 148 insertions(+), 2 deletions(-)
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index e4134162e82a..f9cc452ccb8a 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -11,6 +11,10 @@
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/io.h>
+#include <linux/align.h>
+#include <linux/cache.h>
+#include <linux/cacheflush.h>
+#include <linux/smp.h>
#include <cxl/mailbox.h>
#include <cxl/pci.h>
#include "cxlmem.h"
@@ -1085,6 +1089,71 @@ bool cxl_is_type2_device(struct pci_dev *pdev)
return cxlds->type == CXL_DEVTYPE_DEVMEM;
}
+#ifdef CONFIG_ARM64
+struct cxl_cache_flush_ctx {
+ void *va;
+ size_t len;
+};
+
+static void cxl_flush_by_va_local(void *info)
+{
+ struct cxl_cache_flush_ctx *ctx = info;
+
+ dcache_clean_inval_poc((unsigned long)ctx->va,
+ (unsigned long)ctx->va + ctx->len);
+ asm volatile("dsb ish" ::: "memory");
+}
+#endif
+
+static int cxl_region_flush_host_cpu_caches(struct device *dev, void *data)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ struct cxl_region *cxlr = cxled->cxld.region;
+ struct resource *res;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ if (!cxlr || !cxlr->params.res)
+ return 0;
+
+ res = cxlr->params.res;
+
+#ifdef CONFIG_X86
+ static bool flushed;
+
+ if (!flushed) {
+ wbinvd_on_all_cpus();
+ flushed = true;
+ }
+#elif defined(CONFIG_ARM64)
+ void *va;
+ size_t len, line_size = L1_CACHE_BYTES;
+ phys_addr_t start, end, aligned_start, aligned_end;
+ struct cxl_cache_flush_ctx flush_ctx;
+
+ start = res->start;
+ end = res->end;
+
+ aligned_start = ALIGN_DOWN(start, line_size);
+ aligned_end = ALIGN(end + 1, line_size);
+ len = aligned_end - aligned_start;
+
+ va = memremap(aligned_start, len, MEMREMAP_WB);
+ if (!va) {
+ pr_warn("Failed to map region for cache flush\n");
+ return 0;
+ }
+
+ flush_ctx.va = va;
+ flush_ctx.len = len;
+ on_each_cpu(cxl_flush_by_va_local, &flush_ctx, 1);
+
+ memunmap(va);
+#endif
+ return 0;
+}
+
static int cxl_check_region_driver_bound(struct device *dev, void *data)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
@@ -1245,6 +1314,9 @@ static int cxl_reset_prepare_memdev(struct pci_dev *pdev)
return rc;
}
+ device_for_each_child(&endpoint->dev, NULL,
+ cxl_region_flush_host_cpu_caches);
+
/* Keep cxl_region_rwsem held, released by cleanup function */
return 0;
}
@@ -1259,12 +1331,79 @@ static void cxl_reset_cleanup_memdev(struct pci_dev *pdev)
up_write(&cxl_region_rwsem);
}
+static int cxl_reset_prepare_all_functions(struct pci_dev *pdev)
+{
+ struct pci_dev *func_dev;
+ unsigned int devfn;
+ int func, rc;
+ struct pci_dev *prepared_funcs[8] = { NULL };
+ int prepared_count = 0;
+
+ for (func = 0; func < 8; func++) {
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), func);
+
+ if (devfn == pdev->devfn)
+ continue;
+
+ func_dev = pci_get_slot(pdev->bus, devfn);
+ if (!func_dev)
+ continue;
+
+ if (!cxl_is_type2_device(func_dev)) {
+ pci_dev_put(func_dev);
+ continue;
+ }
+
+ rc = cxl_reset_prepare_memdev(func_dev);
+ if (rc) {
+ pci_dev_put(func_dev);
+ goto cleanup_funcs;
+ }
+
+ prepared_funcs[prepared_count++] = func_dev;
+ }
+
+ return 0;
+
+cleanup_funcs:
+ for (func = 0; func < prepared_count; func++) {
+ if (prepared_funcs[func]) {
+ cxl_reset_cleanup_memdev(prepared_funcs[func]);
+ pci_dev_put(prepared_funcs[func]);
+ }
+ }
+ return rc;
+}
+
+static void cxl_reset_cleanup_all_functions(struct pci_dev *pdev)
+{
+ struct pci_dev *func_dev;
+ unsigned int devfn;
+ int func;
+
+ for (func = 0; func < 8; func++) {
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), func);
+
+ if (devfn == pdev->devfn)
+ continue;
+
+ func_dev = pci_get_slot(pdev->bus, devfn);
+ if (!func_dev)
+ continue;
+
+ if (cxl_is_type2_device(func_dev))
+ cxl_reset_cleanup_memdev(func_dev);
+
+ pci_dev_put(func_dev);
+ }
+}
+
/**
* cxl_reset_prepare_device - Prepare CXL device for reset
* @pdev: PCI device being reset
*
* CXL-reset-specific preparation. Validates memory is offline, flushes
- * device caches, and tears down regions.
+ * device caches, and tears down regions for device and siblings.
*
* Returns: 0 on success, -EBUSY if memory online, negative on error
*/
@@ -1283,6 +1422,12 @@ int cxl_reset_prepare_device(struct pci_dev *pdev)
return rc;
}
+ rc = cxl_reset_prepare_all_functions(pdev);
+ if (rc) {
+ cxl_reset_cleanup_memdev(pdev);
+ return rc;
+ }
+
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_reset_prepare_device, "CXL");
@@ -1291,10 +1436,11 @@ EXPORT_SYMBOL_NS_GPL(cxl_reset_prepare_device, "CXL");
* cxl_reset_cleanup_device - Cleanup after CXL reset
* @pdev: PCI device that was reset
*
- * Releases region locks held during reset.
+ * Releases region locks for device and all sibling functions.
*/
void cxl_reset_cleanup_device(struct pci_dev *pdev)
{
+ cxl_reset_cleanup_all_functions(pdev);
cxl_reset_cleanup_memdev(pdev);
}
EXPORT_SYMBOL_NS_GPL(cxl_reset_cleanup_device, "CXL");
--
2.34.1
next prev parent reply other threads:[~2026-01-20 22:27 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-20 22:26 [PATCH v4 0/10] CXL Reset support for Type 2 devices smadhavan
2026-01-20 22:26 ` [PATCH v4 01/10] cxl: move DVSEC defines to cxl pci header smadhavan
2026-01-21 10:31 ` Jonathan Cameron
2026-01-20 22:26 ` [PATCH v4 02/10] PCI: switch CXL port DVSEC defines smadhavan
2026-01-21 10:34 ` Jonathan Cameron
2026-01-20 22:26 ` [PATCH v4 03/10] cxl: add type 2 helper and reset DVSEC bits smadhavan
2026-01-20 23:27 ` Dave Jiang
2026-01-21 10:45 ` Jonathan Cameron
2026-01-20 22:26 ` [PATCH v4 04/10] PCI: add CXL reset method smadhavan
2026-01-21 0:08 ` Dave Jiang
2026-01-21 10:57 ` Jonathan Cameron
2026-01-23 13:54 ` kernel test robot
2026-01-20 22:26 ` [PATCH v4 05/10] cxl: add reset prepare and region teardown smadhavan
2026-01-21 11:09 ` Jonathan Cameron
2026-01-21 21:25 ` Dave Jiang
2026-01-20 22:26 ` [PATCH v4 06/10] PCI: wire CXL reset prepare/cleanup smadhavan
2026-01-21 22:13 ` Dave Jiang
2026-01-22 2:17 ` Srirangan Madhavan
2026-01-22 15:11 ` Dave Jiang
2026-01-24 7:54 ` kernel test robot
2026-01-20 22:26 ` smadhavan [this message]
2026-01-21 11:20 ` [PATCH v4 07/10] cxl: add host cache flush and multi-function reset Jonathan Cameron
2026-01-21 20:27 ` Davidlohr Bueso
2026-01-22 9:53 ` Jonathan Cameron
2026-01-21 22:19 ` Vikram Sethi
2026-01-22 9:40 ` Souvik Chakravarty
[not found] ` <PH7PR12MB9175CDFC163843BB497073CEBD96A@PH7PR12MB9175.namprd12.prod.outlook.com>
2026-01-22 10:31 ` Jonathan Cameron
2026-01-22 19:24 ` Vikram Sethi
2026-01-23 13:13 ` Jonathan Cameron
2026-01-21 23:59 ` Dave Jiang
2026-01-20 22:26 ` [PATCH v4 08/10] cxl: add DVSEC config save/restore smadhavan
2026-01-21 11:31 ` Jonathan Cameron
2026-01-20 22:26 ` [PATCH v4 09/10] PCI: save/restore CXL config around reset smadhavan
2026-01-21 22:32 ` Dave Jiang
2026-01-22 10:01 ` Lukas Wunner
2026-01-22 10:47 ` Jonathan Cameron
2026-01-26 22:34 ` Alex Williamson
2026-03-12 18:24 ` Jonathan Cameron
2026-01-20 22:26 ` [PATCH v4 10/10] cxl: add HDM decoder and IDE save/restore smadhavan
2026-01-21 11:42 ` Jonathan Cameron
2026-01-22 15:09 ` Dave Jiang
2026-01-21 1:19 ` [PATCH v4 0/10] CXL Reset support for Type 2 devices Alison Schofield
2026-01-22 0:00 ` Bjorn Helgaas
2026-01-27 16:33 ` Alex Williamson
2026-01-27 17:02 ` dan.j.williams
2026-01-27 18:07 ` Vikram Sethi
2026-01-28 3:42 ` dan.j.williams
2026-01-28 12:36 ` Jonathan Cameron
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260120222610.2227109-8-smadhavan@nvidia.com \
--to=smadhavan@nvidia.com \
--cc=Smita.KoralahalliChannabasappa@amd.com \
--cc=alison.schofield@intel.com \
--cc=bhelgaas@google.com \
--cc=dan.j.williams@intel.com \
--cc=dave.jiang@intel.com \
--cc=dave@stgolabs.net \
--cc=huaisheng.ye@intel.com \
--cc=ira.weiny@intel.com \
--cc=jonathan.cameron@huawei.com \
--cc=jsequeira@nvidia.com \
--cc=linux-cxl@vger.kernel.org \
--cc=linux-pci@vger.kernel.org \
--cc=ming.li@zohomail.com \
--cc=mochs@nvidia.com \
--cc=rrichter@amd.com \
--cc=sdonthineni@nvidia.com \
--cc=vaslot@nvidia.com \
--cc=vidyas@nvidia.com \
--cc=vishal.l.verma@intel.com \
--cc=vsethi@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox