From: <smadhavan@nvidia.com>
To: <dave@stgolabs.net>, <jonathan.cameron@huawei.com>,
<dave.jiang@intel.com>, <alison.schofield@intel.com>,
<vishal.l.verma@intel.com>, <ira.weiny@intel.com>,
<dan.j.williams@intel.com>, <bhelgaas@google.com>,
<ming.li@zohomail.com>, <rrichter@amd.com>,
<Smita.KoralahalliChannabasappa@amd.com>,
<huaisheng.ye@intel.com>, <linux-cxl@vger.kernel.org>,
<linux-pci@vger.kernel.org>
Cc: <smadhavan@nvidia.com>, <vaslot@nvidia.com>, <vsethi@nvidia.com>,
<sdonthineni@nvidia.com>, <vidyas@nvidia.com>, <mochs@nvidia.com>,
<jsequeira@nvidia.com>
Subject: [PATCH v3 7/10] cxl: add host cache flush and multi-function reset
Date: Fri, 16 Jan 2026 01:41:43 +0000 [thread overview]
Message-ID: <20260116014146.2149236-8-smadhavan@nvidia.com> (raw)
In-Reply-To: <20260116014146.2149236-1-smadhavan@nvidia.com>
From: Srirangan Madhavan <smadhavan@nvidia.com>
Flush host CPU caches for mapped HDM ranges after teardown and prepare
sibling Type 2 functions on multi-function devices. The host cache
maintenance uses wbinvd_on_all_cpus() on x86 and VA-based PoC clean+
invalidate on arm64 via memremap() and on_each_cpu(), matching the
required ordering before reset.
Signed-off-by: Srirangan Madhavan <smadhavan@nvidia.com>
---
drivers/cxl/pci.c | 150 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 148 insertions(+), 2 deletions(-)
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 8da69c2125af..5d2bb4431de3 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -11,6 +11,10 @@
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/io.h>
+#include <linux/align.h>
+#include <linux/cache.h>
+#include <linux/cacheflush.h>
+#include <linux/smp.h>
#include <cxl/mailbox.h>
#include <cxl/pci.h>
#include "cxlmem.h"
@@ -1092,6 +1096,71 @@ bool cxl_is_type2_device(struct pci_dev *pdev)
return cxlds->type == CXL_DEVTYPE_DEVMEM;
}
+#ifdef CONFIG_ARM64
+struct cxl_cache_flush_ctx {
+ void *va;
+ size_t len;
+};
+
+static void cxl_flush_by_va_local(void *info)
+{
+ struct cxl_cache_flush_ctx *ctx = info;
+
+ dcache_clean_inval_poc((unsigned long)ctx->va,
+ (unsigned long)ctx->va + ctx->len);
+ asm volatile("dsb ish" ::: "memory");
+}
+#endif
+
+static int cxl_region_flush_host_cpu_caches(struct device *dev, void *data)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ struct cxl_region *cxlr = cxled->cxld.region;
+ struct resource *res;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ if (!cxlr || !cxlr->params.res)
+ return 0;
+
+ res = cxlr->params.res;
+
+#ifdef CONFIG_X86
+ static bool flushed;
+
+ if (!flushed) {
+ wbinvd_on_all_cpus();
+ flushed = true;
+ }
+#elif defined(CONFIG_ARM64)
+ void *va;
+ size_t len, line_size = L1_CACHE_BYTES;
+ phys_addr_t start, end, aligned_start, aligned_end;
+ struct cxl_cache_flush_ctx flush_ctx;
+
+ start = res->start;
+ end = res->end;
+
+ aligned_start = ALIGN_DOWN(start, line_size);
+ aligned_end = ALIGN(end + 1, line_size);
+ len = aligned_end - aligned_start;
+
+ va = memremap(aligned_start, len, MEMREMAP_WB);
+ if (!va) {
+ pr_warn("Failed to map region for cache flush\n");
+ return 0;
+ }
+
+ flush_ctx.va = va;
+ flush_ctx.len = len;
+ on_each_cpu(cxl_flush_by_va_local, &flush_ctx, 1);
+
+ memunmap(va);
+#endif
+ return 0;
+}
+
static int cxl_check_region_driver_bound(struct device *dev, void *data)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
@@ -1252,6 +1321,9 @@ static int cxl_reset_prepare_memdev(struct pci_dev *pdev)
return rc;
}
+ device_for_each_child(&endpoint->dev, NULL,
+ cxl_region_flush_host_cpu_caches);
+
/* Keep cxl_region_rwsem held, released by cleanup function */
return 0;
}
@@ -1266,12 +1338,79 @@ static void cxl_reset_cleanup_memdev(struct pci_dev *pdev)
up_write(&cxl_region_rwsem);
}
+static int cxl_reset_prepare_all_functions(struct pci_dev *pdev)
+{
+ struct pci_dev *func_dev;
+ unsigned int devfn;
+ int func, rc;
+ struct pci_dev *prepared_funcs[8] = { NULL };
+ int prepared_count = 0;
+
+ for (func = 0; func < 8; func++) {
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), func);
+
+ if (devfn == pdev->devfn)
+ continue;
+
+ func_dev = pci_get_slot(pdev->bus, devfn);
+ if (!func_dev)
+ continue;
+
+ if (!cxl_is_type2_device(func_dev)) {
+ pci_dev_put(func_dev);
+ continue;
+ }
+
+ rc = cxl_reset_prepare_memdev(func_dev);
+ if (rc) {
+ pci_dev_put(func_dev);
+ goto cleanup_funcs;
+ }
+
+ prepared_funcs[prepared_count++] = func_dev;
+ }
+
+ return 0;
+
+cleanup_funcs:
+ for (func = 0; func < prepared_count; func++) {
+ if (prepared_funcs[func]) {
+ cxl_reset_cleanup_memdev(prepared_funcs[func]);
+ pci_dev_put(prepared_funcs[func]);
+ }
+ }
+ return rc;
+}
+
+static void cxl_reset_cleanup_all_functions(struct pci_dev *pdev)
+{
+ struct pci_dev *func_dev;
+ unsigned int devfn;
+ int func;
+
+ for (func = 0; func < 8; func++) {
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), func);
+
+ if (devfn == pdev->devfn)
+ continue;
+
+ func_dev = pci_get_slot(pdev->bus, devfn);
+ if (!func_dev)
+ continue;
+
+ if (cxl_is_type2_device(func_dev))
+ cxl_reset_cleanup_memdev(func_dev);
+
+ pci_dev_put(func_dev);
+ }
+}
+
/**
* cxl_reset_prepare_device - Prepare CXL device for reset
* @pdev: PCI device being reset
*
* CXL-reset-specific preparation. Validates memory is offline, flushes
- * device caches, and tears down regions.
+ * device caches, and tears down regions for device and siblings.
*
* Returns: 0 on success, -EBUSY if memory online, negative on error
*/
@@ -1290,6 +1429,12 @@ int cxl_reset_prepare_device(struct pci_dev *pdev)
return rc;
}
+ rc = cxl_reset_prepare_all_functions(pdev);
+ if (rc) {
+ cxl_reset_cleanup_memdev(pdev);
+ return rc;
+ }
+
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_reset_prepare_device, "CXL");
@@ -1298,10 +1443,11 @@ EXPORT_SYMBOL_NS_GPL(cxl_reset_prepare_device, "CXL");
* cxl_reset_cleanup_device - Cleanup after CXL reset
* @pdev: PCI device that was reset
*
- * Releases region locks held during reset.
+ * Releases region locks for device and all sibling functions.
*/
void cxl_reset_cleanup_device(struct pci_dev *pdev)
{
+ cxl_reset_cleanup_all_functions(pdev);
cxl_reset_cleanup_memdev(pdev);
}
EXPORT_SYMBOL_NS_GPL(cxl_reset_cleanup_device, "CXL");
--
2.34.1
next prev parent reply other threads:[~2026-01-16 1:42 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-16 1:41 [PATCH v3 0/10] CXL reset support for Type 2 devices smadhavan
2026-01-16 1:41 ` [PATCH v3 1/10] cxl: move DVSEC defines to cxl pci header smadhavan
2026-01-16 1:41 ` [PATCH v3 2/10] PCI: switch CXL port DVSEC defines smadhavan
2026-01-16 1:41 ` [PATCH v3 3/10] cxl: add type 2 helper and reset DVSEC bits smadhavan
2026-01-16 1:41 ` [PATCH v3 4/10] PCI: add CXL reset method smadhavan
2026-01-17 13:56 ` kernel test robot
2026-01-17 14:28 ` kernel test robot
2026-01-16 1:41 ` [PATCH v3 5/10] cxl: add reset prepare and region teardown smadhavan
2026-01-16 1:41 ` [PATCH v3 6/10] PCI: wire CXL reset prepare/cleanup smadhavan
2026-01-16 1:41 ` smadhavan [this message]
2026-01-16 1:41 ` [PATCH v3 8/10] cxl: add DVSEC config save/restore smadhavan
2026-01-16 1:41 ` [PATCH v3 9/10] PCI: save/restore CXL config around reset smadhavan
2026-01-16 1:41 ` [PATCH v3 10/10] cxl: add HDM decoder and IDE save/restore smadhavan
2026-01-18 22:29 ` [PATCH v3 0/10] CXL reset support for Type 2 devices Alison Schofield
2026-01-20 22:33 ` Srirangan Madhavan
[not found] ` <CY5PR12MB6226EE35D88E6F4442572D1CC389A@CY5PR12MB6226.namprd12.prod.outlook.com>
2026-01-21 0:30 ` Alison Schofield
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260116014146.2149236-8-smadhavan@nvidia.com \
--to=smadhavan@nvidia.com \
--cc=Smita.KoralahalliChannabasappa@amd.com \
--cc=alison.schofield@intel.com \
--cc=bhelgaas@google.com \
--cc=dan.j.williams@intel.com \
--cc=dave.jiang@intel.com \
--cc=dave@stgolabs.net \
--cc=huaisheng.ye@intel.com \
--cc=ira.weiny@intel.com \
--cc=jonathan.cameron@huawei.com \
--cc=jsequeira@nvidia.com \
--cc=linux-cxl@vger.kernel.org \
--cc=linux-pci@vger.kernel.org \
--cc=ming.li@zohomail.com \
--cc=mochs@nvidia.com \
--cc=rrichter@amd.com \
--cc=sdonthineni@nvidia.com \
--cc=vaslot@nvidia.com \
--cc=vidyas@nvidia.com \
--cc=vishal.l.verma@intel.com \
--cc=vsethi@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox