* [PATCH 1/4 v2] cxl/core: Change match_*_by_range() calling convention
2025-01-14 20:32 [PATCH 0/4 v2] cxl/core: Enable Region creation on x86 with Low Mem Hole Fabio M. De Francesco
@ 2025-01-14 20:32 ` Fabio M. De Francesco
2025-01-14 20:32 ` [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86 Fabio M. De Francesco
` (2 subsequent siblings)
3 siblings, 0 replies; 12+ messages in thread
From: Fabio M. De Francesco @ 2025-01-14 20:32 UTC (permalink / raw)
To: Davidlohr Bueso, Jonathan Cameron, Dave Jiang, Alison Schofield,
Vishal Verma, Ira Weiny, Dan Williams
Cc: Robert Richter, ming.li, linux-kernel, linux-cxl,
Fabio M. De Francesco
Replace struct range parameter with struct cxl_endpoint_decoder of
which range is a member in the match_*_by_range() functions.
This is in preparation for expanding these helpers to perform arch
specific region matching that requires a cxl_endpoint_decoder.
No functional changes.
Cc: Alison Schofield <alison.schofield@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Alison Schofield <alison.schofield@intel.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com>
---
drivers/cxl/core/region.c | 25 ++++++++++++++++---------
1 file changed, 16 insertions(+), 9 deletions(-)
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index b98b1ccffd1ca..9d2c31f5caf26 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -1735,23 +1735,27 @@ static struct cxl_port *next_port(struct cxl_port *port)
static int match_switch_decoder_by_range(struct device *dev, void *data)
{
+ struct cxl_endpoint_decoder *cxled = data;
struct cxl_switch_decoder *cxlsd;
- struct range *r1, *r2 = data;
+ struct range *r1, *r2;
if (!is_switch_decoder(dev))
return 0;
cxlsd = to_cxl_switch_decoder(dev);
r1 = &cxlsd->cxld.hpa_range;
+ r2 = &cxled->cxld.hpa_range;
if (is_root_decoder(dev))
return range_contains(r1, r2);
return (r1->start == r2->start && r1->end == r2->end);
}
-static int find_pos_and_ways(struct cxl_port *port, struct range *range,
+static int find_pos_and_ways(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
int *pos, int *ways)
{
+ struct range *range = &cxled->cxld.hpa_range;
struct cxl_switch_decoder *cxlsd;
struct cxl_port *parent;
struct device *dev;
@@ -1761,7 +1765,7 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
if (!parent)
return rc;
- dev = device_find_child(&parent->dev, range,
+ dev = device_find_child(&parent->dev, cxled,
match_switch_decoder_by_range);
if (!dev) {
dev_err(port->uport_dev,
@@ -1841,7 +1845,7 @@ static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
if (is_cxl_root(iter))
break;
- rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
+ rc = find_pos_and_ways(iter, cxled, &parent_pos, &parent_ways);
if (rc)
return rc;
@@ -3189,22 +3193,26 @@ static int devm_cxl_add_dax_region(struct cxl_region *cxlr)
static int match_root_decoder_by_range(struct device *dev, void *data)
{
- struct range *r1, *r2 = data;
+ struct cxl_endpoint_decoder *cxled = data;
struct cxl_root_decoder *cxlrd;
+ struct range *r1, *r2;
if (!is_root_decoder(dev))
return 0;
cxlrd = to_cxl_root_decoder(dev);
r1 = &cxlrd->cxlsd.cxld.hpa_range;
+ r2 = &cxled->cxld.hpa_range;
+
return range_contains(r1, r2);
}
static int match_region_by_range(struct device *dev, void *data)
{
+ struct cxl_endpoint_decoder *cxled = data;
+ struct range *r = &cxled->cxld.hpa_range;
struct cxl_region_params *p;
struct cxl_region *cxlr;
- struct range *r = data;
int rc = 0;
if (!is_cxl_region(dev))
@@ -3308,7 +3316,6 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct range *hpa = &cxled->cxld.hpa_range;
struct cxl_decoder *cxld = &cxled->cxld;
struct device *cxlrd_dev, *region_dev;
struct cxl_root_decoder *cxlrd;
@@ -3317,7 +3324,7 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
bool attach = false;
int rc;
- cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
+ cxlrd_dev = device_find_child(&root->dev, cxled,
match_root_decoder_by_range);
if (!cxlrd_dev) {
dev_err(cxlmd->dev.parent,
@@ -3334,7 +3341,7 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
* one does the construction and the others add to that.
*/
mutex_lock(&cxlrd->range_lock);
- region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
+ region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, cxled,
match_region_by_range);
if (!region_dev) {
cxlr = construct_region(cxlrd, cxled);
--
2.47.1
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86
2025-01-14 20:32 [PATCH 0/4 v2] cxl/core: Enable Region creation on x86 with Low Mem Hole Fabio M. De Francesco
2025-01-14 20:32 ` [PATCH 1/4 v2] cxl/core: Change match_*_by_range() calling convention Fabio M. De Francesco
@ 2025-01-14 20:32 ` Fabio M. De Francesco
2025-01-15 2:23 ` Gregory Price
2025-01-14 20:32 ` [PATCH 3/4 v2] cxl/core: Enable Region creation on x86 with Low Memory Hole Fabio M. De Francesco
2025-01-14 20:32 ` [PATCH 4/4 v2] cxl/test: Simulate an x86 Low Memory Hole for tests Fabio M. De Francesco
3 siblings, 1 reply; 12+ messages in thread
From: Fabio M. De Francesco @ 2025-01-14 20:32 UTC (permalink / raw)
To: Davidlohr Bueso, Jonathan Cameron, Dave Jiang, Alison Schofield,
Vishal Verma, Ira Weiny, Dan Williams
Cc: Robert Richter, ming.li, linux-kernel, linux-cxl,
Fabio M. De Francesco
In x86 with Low memory Hole, the BIOS may publishes CFMWS that describe
SPA ranges which are subsets of the corresponding CXL Endpoint Decoders
HPA's because the CFMWS never intersects LMH's while EP Decoders HPA's
ranges are always guaranteed to align to the NIW * 256M rule.
In order to construct Regions and attach Decoders, the driver needs to
match Root Decoders and Regions with Endpoint Decoders, but it fails and
the entire process returns errors because it doesn't expect to deal with
SPA range lengths smaller than corresponding HPA's.
Introduce functions that indirectly detect x86 LMH's by comparing SPA's
with corresponding HPA's. They will be used in the process of Regions
creation and Endpoint attachments to prevent driver failures in a few
steps of the above-mentioned process.
The helpers return true when HPA/SPA misalignments are detected under
specific conditions: both the SPA and HPA ranges must start at
LMH_CFMWS_RANGE_START (that in x86 with LMH's is 0x0), SPA range sizes
be less than HPA's, SPA's range's size be less than 4G, HPA's size be
aligned to the NIW * 256M rule.
Also introduce a function to adjust the range end of the Regions to be
created on x86 with LMH's.
Cc: Alison Schofield <alison.schofield@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com>
---
drivers/cxl/core/lmh.c | 55 ++++++++++++++++++++++++++++++++++++++++++
drivers/cxl/core/lmh.h | 28 +++++++++++++++++++++
2 files changed, 83 insertions(+)
create mode 100644 drivers/cxl/core/lmh.c
create mode 100644 drivers/cxl/core/lmh.h
diff --git a/drivers/cxl/core/lmh.c b/drivers/cxl/core/lmh.c
new file mode 100644
index 0000000000000..232ebea0a8364
--- /dev/null
+++ b/drivers/cxl/core/lmh.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/range.h>
+#include "lmh.h"
+
+/* Start of CFMWS range that end before x86 Low Memory Holes */
+#define LMH_CFMWS_RANGE_START 0x0ULL
+
+/*
+ * Match CXL Root and Endpoint Decoders by comparing SPA and HPA ranges.
+ *
+ * On x86, CFMWS ranges never intersect memory holes while endpoint decoders
+ * HPA range sizes are always guaranteed aligned to NIW * 256MB; therefore,
+ * the given endpoint decoder HPA range size is always expected aligned and
+ * also larger than that of the matching root decoder. If there are LMH's,
+ * the root decoder range end is always less than SZ_4G.
+ */
+bool arch_match_spa(struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct range *r1, *r2;
+ int niw;
+
+ r1 = &cxlrd->cxlsd.cxld.hpa_range;
+ r2 = &cxled->cxld.hpa_range;
+ niw = cxled->cxld.interleave_ways;
+
+ if (r1->start == LMH_CFMWS_RANGE_START && r1->start == r2->start &&
+ r1->end < (LMH_CFMWS_RANGE_START + SZ_4G) && r1->end < r2->end &&
+ IS_ALIGNED(range_len(r2), niw * SZ_256M))
+ return true;
+
+ return false;
+}
+
+/* Similar to arch_match_spa(), it matches regions and decoders */
+bool arch_match_region(struct cxl_region_params *p, struct cxl_decoder *cxld)
+{
+ struct range *r = &cxld->hpa_range;
+ struct resource *res = p->res;
+ int niw = cxld->interleave_ways;
+
+ if (res->start == LMH_CFMWS_RANGE_START && res->start == r->start &&
+ res->end < (LMH_CFMWS_RANGE_START + SZ_4G) && res->end < r->end &&
+ IS_ALIGNED(range_len(r), niw * SZ_256M))
+ return true;
+
+ return false;
+}
+
+void arch_adjust_region_resource(struct resource *res,
+ struct cxl_root_decoder *cxlrd)
+{
+ res->end = cxlrd->res->end;
+}
diff --git a/drivers/cxl/core/lmh.h b/drivers/cxl/core/lmh.h
new file mode 100644
index 0000000000000..ec8907145afe8
--- /dev/null
+++ b/drivers/cxl/core/lmh.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "cxl.h"
+
+#ifdef CONFIG_CXL_ARCH_LOW_MEMORY_HOLE
+bool arch_match_spa(struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled);
+bool arch_match_region(struct cxl_region_params *p, struct cxl_decoder *cxld);
+void arch_adjust_region_resource(struct resource *res,
+ struct cxl_root_decoder *cxlrd);
+#else
+static bool arch_match_spa(struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled)
+{
+ return false;
+}
+
+static bool arch_match_region(struct cxl_region_params *p,
+ struct cxl_decoder *cxld)
+{
+ return false;
+}
+
+static void arch_adjust_region_resource(struct resource *res,
+ struct cxl_root_decoder *cxlrd)
+{
+}
+#endif /* CXL_ARCH_LOW_MEMORY_HOLE */
--
2.47.1
^ permalink raw reply related [flat|nested] 12+ messages in thread* Re: [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86
2025-01-14 20:32 ` [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86 Fabio M. De Francesco
@ 2025-01-15 2:23 ` Gregory Price
2025-01-21 20:35 ` Fabio M. De Francesco
2025-04-01 20:32 ` Dan Williams
0 siblings, 2 replies; 12+ messages in thread
From: Gregory Price @ 2025-01-15 2:23 UTC (permalink / raw)
To: Fabio M. De Francesco
Cc: Davidlohr Bueso, Jonathan Cameron, Dave Jiang, Alison Schofield,
Vishal Verma, Ira Weiny, Dan Williams, Robert Richter, ming.li,
linux-kernel, linux-cxl
On Tue, Jan 14, 2025 at 09:32:54PM +0100, Fabio M. De Francesco wrote:
> +/*
> + * Match CXL Root and Endpoint Decoders by comparing SPA and HPA ranges.
> + *
> + * On x86, CFMWS ranges never intersect memory holes while endpoint decoders
> + * HPA range sizes are always guaranteed aligned to NIW * 256MB; therefore,
> + * the given endpoint decoder HPA range size is always expected aligned and
> + * also larger than that of the matching root decoder. If there are LMH's,
> + * the root decoder range end is always less than SZ_4G.
> + */
Is there any reason to limit this memory-hole handling to only low
memory holes? I have observed systems where the following memory
hole situation occurs:
(example, not exact sizes)
CFMW1: [ 0xc0000000 - 0xdfffffff ] 512MB range
Reserved [ 0xe0000000 - 0xffffffff ] 512MB range
CFMW2: [ 0x100000000 - 0x15fffffff ] 1.5GB range
2 CXL Memory Devices w/ 1GB capacity each (again, an example).
Note that 1 device has its capacity split across the hole, but
if the devices are interleaved then both devices have their capacity
split across the hole.
It seems with some mild modification, this patch set could be
re-used to handle this memory hole scenario as well
(w/ addr translation - Robert's patch set)
Is there a reason not to handle more than just LMH's in this set?
(I may try to hack this up on my test system and report back.)
~Gregory
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86
2025-01-15 2:23 ` Gregory Price
@ 2025-01-21 20:35 ` Fabio M. De Francesco
2025-04-01 20:32 ` Dan Williams
1 sibling, 0 replies; 12+ messages in thread
From: Fabio M. De Francesco @ 2025-01-21 20:35 UTC (permalink / raw)
To: Gregory Price
Cc: Davidlohr Bueso, Jonathan Cameron, Dave Jiang, Alison Schofield,
Vishal Verma, Ira Weiny, Dan Williams, Robert Richter, ming.li,
linux-kernel, linux-cxl
On Wednesday, January 15, 2025 3:23:36 AM GMT+1 Gregory Price wrote:
> On Tue, Jan 14, 2025 at 09:32:54PM +0100, Fabio M. De Francesco wrote:
> > +/*
> > + * Match CXL Root and Endpoint Decoders by comparing SPA and HPA ranges.
> > + *
> > + * On x86, CFMWS ranges never intersect memory holes while endpoint decoders
> > + * HPA range sizes are always guaranteed aligned to NIW * 256MB; therefore,
> > + * the given endpoint decoder HPA range size is always expected aligned and
> > + * also larger than that of the matching root decoder. If there are LMH's,
> > + * the root decoder range end is always less than SZ_4G.
> > + */
>
> Is there any reason to limit this memory-hole handling to only low
> memory holes?
>
No I don't see any special reasons to limit this to only low memory holes.
It's just that I didn't know about others.
>
> I have observed systems where the following memory
> hole situation occurs:
>
> (example, not exact sizes)
> CFMW1: [ 0xc0000000 - 0xdfffffff ] 512MB range
> Reserved [ 0xe0000000 - 0xffffffff ] 512MB range
> CFMW2: [ 0x100000000 - 0x15fffffff ] 1.5GB range
>
> 2 CXL Memory Devices w/ 1GB capacity each (again, an example).
>
> Note that 1 device has its capacity split across the hole, but
> if the devices are interleaved then both devices have their capacity
> split across the hole.
>
> It seems with some mild modification, this patch set could be
> re-used to handle this memory hole scenario as well
> (w/ addr translation - Robert's patch set)
>
> Is there a reason not to handle more than just LMH's in this set?
>
> (I may try to hack this up on my test system and report back.)
>
I'd appreciate it if you want to report back.
Thank you,
Fabio
>
> ~Gregory
>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86
2025-01-15 2:23 ` Gregory Price
2025-01-21 20:35 ` Fabio M. De Francesco
@ 2025-04-01 20:32 ` Dan Williams
2025-04-01 21:40 ` Gregory Price
1 sibling, 1 reply; 12+ messages in thread
From: Dan Williams @ 2025-04-01 20:32 UTC (permalink / raw)
To: Gregory Price, Fabio M. De Francesco
Cc: Davidlohr Bueso, Jonathan Cameron, Dave Jiang, Alison Schofield,
Vishal Verma, Ira Weiny, Dan Williams, Robert Richter, ming.li,
linux-kernel, linux-cxl
Gregory Price wrote:
> On Tue, Jan 14, 2025 at 09:32:54PM +0100, Fabio M. De Francesco wrote:
> > +/*
> > + * Match CXL Root and Endpoint Decoders by comparing SPA and HPA ranges.
> > + *
> > + * On x86, CFMWS ranges never intersect memory holes while endpoint decoders
> > + * HPA range sizes are always guaranteed aligned to NIW * 256MB; therefore,
> > + * the given endpoint decoder HPA range size is always expected aligned and
> > + * also larger than that of the matching root decoder. If there are LMH's,
> > + * the root decoder range end is always less than SZ_4G.
> > + */
>
> Is there any reason to limit this memory-hole handling to only low
> memory holes? I have observed systems where the following memory
> hole situation occurs:
>
> (example, not exact sizes)
> CFMW1: [ 0xc0000000 - 0xdfffffff ] 512MB range
> Reserved [ 0xe0000000 - 0xffffffff ] 512MB range
> CFMW2: [ 0x100000000 - 0x15fffffff ] 1.5GB range
>
> 2 CXL Memory Devices w/ 1GB capacity each (again, an example).
>
> Note that 1 device has its capacity split across the hole, but
> if the devices are interleaved then both devices have their capacity
> split across the hole.
>
> It seems with some mild modification, this patch set could be
> re-used to handle this memory hole scenario as well
> (w/ addr translation - Robert's patch set)
>
> Is there a reason not to handle more than just LMH's in this set?
This discussion was referenced recently on an IM and I wanted to share
my response to it:
The rules for when to apply this memory hole quirk are explicit and
suitable to add to the CXL specification. I want the same standard for
any other quirk and ideally some proof-of-work to get that quirk
recognized by the specification. Otherwise, I worry that generalizing
for all the possible ways that platform BIOS tries to be clever means we
end up with something that has no rules.
The spec is there to allow software to delineate valid configurations vs
mistakes, and this slow drip of "Linux does not understand this platform
configuration" is a spec gap.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86
2025-04-01 20:32 ` Dan Williams
@ 2025-04-01 21:40 ` Gregory Price
2025-04-01 23:34 ` Dan Williams
0 siblings, 1 reply; 12+ messages in thread
From: Gregory Price @ 2025-04-01 21:40 UTC (permalink / raw)
To: Dan Williams
Cc: Fabio M. De Francesco, Davidlohr Bueso, Jonathan Cameron,
Dave Jiang, Alison Schofield, Vishal Verma, Ira Weiny,
Robert Richter, ming.li, linux-kernel, linux-cxl
On Tue, Apr 01, 2025 at 01:32:33PM -0700, Dan Williams wrote:
> Gregory Price wrote:
> > Is there a reason not to handle more than just LMH's in this set?
>
> This discussion was referenced recently on an IM and I wanted to share
> my response to it:
>
> The rules for when to apply this memory hole quirk are explicit and
> suitable to add to the CXL specification. I want the same standard for
> any other quirk and ideally some proof-of-work to get that quirk
> recognized by the specification. Otherwise, I worry that generalizing
> for all the possible ways that platform BIOS tries to be clever means we
> end up with something that has no rules.
>
> The spec is there to allow software to delineate valid configurations vs
> mistakes, and this slow drip of "Linux does not understand this platform
> configuration" is a spec gap.
Note: I've since come around to understand the whole ecosystem a bit
better since i wrote this response. I don't know that it's needed.
Some of the explanation of this patch series is a bit confusing. It
justifies itself by saying CFMWS don't intersect memory holes and that
endpoint decoders have to be 256MB aligned.
/*
* Match CXL Root and Endpoint Decoders by comparing SPA and HPA ranges.
*
* On x86, CFMWS ranges never intersect memory holes while endpoint decoders
* HPA range sizes are always guaranteed aligned to NIW * 256MB; therefore,
* the given endpoint decoder HPA range size is always expected aligned and
* also larger than that of the matching root decoder. If there are LMH's,
* the root decoder range end is always less than SZ_4G.
*/
But per the spec, CFMWS is also aligned to be aligned to 256MB.
Shouldn't the platform work around memory holes to generate multiple
CFMWS for the entire capacity, and then use multiple endpoint decoders
(1 per CFMWS) to map the capacity accordingly?
(Also, I still don't understand the oracle value of <4GB address range.
It seems like if this is some quirk of SPA vs HPA alignment, then it
can hold for *all* ocurrances, not just stuff below 4GB)
~Gregory
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86
2025-04-01 21:40 ` Gregory Price
@ 2025-04-01 23:34 ` Dan Williams
2025-04-02 5:05 ` Gregory Price
0 siblings, 1 reply; 12+ messages in thread
From: Dan Williams @ 2025-04-01 23:34 UTC (permalink / raw)
To: Gregory Price, Dan Williams
Cc: Fabio M. De Francesco, Davidlohr Bueso, Jonathan Cameron,
Dave Jiang, Alison Schofield, Vishal Verma, Ira Weiny,
Robert Richter, ming.li, linux-kernel, linux-cxl
Gregory Price wrote:
> On Tue, Apr 01, 2025 at 01:32:33PM -0700, Dan Williams wrote:
> > Gregory Price wrote:
> > > Is there a reason not to handle more than just LMH's in this set?
> >
> > This discussion was referenced recently on an IM and I wanted to share
> > my response to it:
> >
> > The rules for when to apply this memory hole quirk are explicit and
> > suitable to add to the CXL specification. I want the same standard for
> > any other quirk and ideally some proof-of-work to get that quirk
> > recognized by the specification. Otherwise, I worry that generalizing
> > for all the possible ways that platform BIOS tries to be clever means we
> > end up with something that has no rules.
> >
> > The spec is there to allow software to delineate valid configurations vs
> > mistakes, and this slow drip of "Linux does not understand this platform
> > configuration" is a spec gap.
>
> Note: I've since come around to understand the whole ecosystem a bit
> better since i wrote this response.
Yes, I should have acknowledged shifts in understanding since this
thread went quiet. Fabio was about to spin this set again to add more
"generalization" and I wanted to clarify my current thinking that
generalization is the opposite of what should happen here.
> I don't know that it's needed.
Referring to spec changes? I think they are, see below
> Some of the explanation of this patch series is a bit confusing. It
> justifies itself by saying CFMWS don't intersect memory holes and that
> endpoint decoders have to be 256MB aligned.
>
> /*
> * Match CXL Root and Endpoint Decoders by comparing SPA and HPA ranges.
> *
> * On x86, CFMWS ranges never intersect memory holes while endpoint decoders
> * HPA range sizes are always guaranteed aligned to NIW * 256MB; therefore,
> * the given endpoint decoder HPA range size is always expected aligned and
> * also larger than that of the matching root decoder. If there are LMH's,
> * the root decoder range end is always less than SZ_4G.
> */
>
> But per the spec, CFMWS is also aligned to be aligned to 256MB.
Right, something has to give, i.e. "spec meet reality". Hardware
endpoint decoders must be aligned, that is a shipping expectation, and
endpoints are not in a position to know or care about host platform
constraints. In constrast, the CFMWS definition runs into a practical
problem meeting the same expectation given competing host phyiscal
memory map constraints.
The platforms with this condition want to support CXL mapped starting at
zero *and* the typical/historical PCI MMIO space in low memory (for
those PCI devices that do not support 64-bit addressing). If the CFMWS
blindly followed the 256MB*NIW constraint the CXL window would overlap
the MMIO space. So the choices are:
1/ Give up on mapping CXL starting at zero when 256MB * NIW does not fit
2/ Give up on maintaining historical availabilty of and compatibility
with 32-bit only PCI devices (PCI configuration regression)
3/ Trim CFMWS to match the reality that the platform will always route
memory cycles in that PCI MMIO range to PCI MMIO and never to CXL.
4/ Define some new protocol for when CFMWS is explicitly countermanded
by other platform resource descriptors, and not a BIOS bug.
The platform in question chose option 3.
> Shouldn't the platform work around memory holes to generate multiple
> CFMWS for the entire capacity, and then use multiple endpoint decoders
> (1 per CFMWS) to map the capacity accordingly?
Per above, the maths do not work out to be able to support that relative
to a CXL region with problematic NIW.
> (Also, I still don't understand the oracle value of <4GB address range.
> It seems like if this is some quirk of SPA vs HPA alignment, then it
> can hold for *all* ocurrances, not just stuff below 4GB)
The goal is to get platform vendors to define the rules so that an OS
has a reasonable expectation to know what is a valid vs invalid
configuration. A hole above 4GB has no reason to exist, there is no
resource conflict like PCI MMIO that explains why typical spec
expectation can not be met.
So I want the subsystem to have an explicit set of platform quirks
ideally backed up by updated spec language. That allows us to validate
that the Linux implementation is correct by some objective source of
truth, encourage platform vendors to update that source of truth when
they create new corner cases, or even better, be more mindful to not
create new corner cases.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86
2025-04-01 23:34 ` Dan Williams
@ 2025-04-02 5:05 ` Gregory Price
2025-04-02 14:20 ` Ira Weiny
0 siblings, 1 reply; 12+ messages in thread
From: Gregory Price @ 2025-04-02 5:05 UTC (permalink / raw)
To: Dan Williams
Cc: Fabio M. De Francesco, Davidlohr Bueso, Jonathan Cameron,
Dave Jiang, Alison Schofield, Vishal Verma, Ira Weiny,
Robert Richter, ming.li, linux-kernel, linux-cxl
On Tue, Apr 01, 2025 at 04:34:44PM -0700, Dan Williams wrote:
> The platforms with this condition want to support CXL mapped starting at
> zero *and* the typical/historical PCI MMIO space in low memory (for
> those PCI devices that do not support 64-bit addressing). If the CFMWS
> blindly followed the 256MB*NIW constraint the CXL window would overlap
> the MMIO space. So the choices are:
>
If I'm understanding everything correctly, then I think this is intended
to work only when EFI_MEMORY_SP is *not* set for these particular CXL
devices - so it comes up as memory in early boot and we're just trying
to wire up all the bits to let the driver manage it accordingly?
If that's the case, then ignore me, i'm just bikeshedding at this point.
I can't imagine a system where the memory at 0x0-4GB is intended to come
up as EFI_MEMORY_SP, so I hope no one ever tries this :D
> > (Also, I still don't understand the oracle value of <4GB address range.
> > It seems like if this is some quirk of SPA vs HPA alignment, then it
> > can hold for *all* ocurrances, not just stuff below 4GB)
>
> The goal is to get platform vendors to define the rules so that an OS
> has a reasonable expectation to know what is a valid vs invalid
> configuration. A hole above 4GB has no reason to exist, there is no
> resource conflict like PCI MMIO that explains why typical spec
> expectation can not be met.
>
> So I want the subsystem to have an explicit set of platform quirks
> ideally backed up by updated spec language. That allows us to validate
> that the Linux implementation is correct by some objective source of
> truth, encourage platform vendors to update that source of truth when
> they create new corner cases, or even better, be more mindful to not
> create new corner cases.
I follow, seems reasonable.
~Gregory
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86
2025-04-02 5:05 ` Gregory Price
@ 2025-04-02 14:20 ` Ira Weiny
0 siblings, 0 replies; 12+ messages in thread
From: Ira Weiny @ 2025-04-02 14:20 UTC (permalink / raw)
To: Gregory Price, Dan Williams
Cc: Fabio M. De Francesco, Davidlohr Bueso, Jonathan Cameron,
Dave Jiang, Alison Schofield, Vishal Verma, Ira Weiny,
Robert Richter, ming.li, linux-kernel, linux-cxl
Gregory Price wrote:
> On Tue, Apr 01, 2025 at 04:34:44PM -0700, Dan Williams wrote:
> > The platforms with this condition want to support CXL mapped starting at
> > zero *and* the typical/historical PCI MMIO space in low memory (for
> > those PCI devices that do not support 64-bit addressing). If the CFMWS
> > blindly followed the 256MB*NIW constraint the CXL window would overlap
> > the MMIO space. So the choices are:
> >
>
> If I'm understanding everything correctly, then I think this is intended
> to work only when EFI_MEMORY_SP is *not* set for these particular CXL
> devices - so it comes up as memory in early boot and we're just trying
> to wire up all the bits to let the driver manage it accordingly?
That is how I understand things. But I'm just jumping in just to review
the patches so I could be wrong...
Ira
>
> If that's the case, then ignore me, i'm just bikeshedding at this point.
>
> I can't imagine a system where the memory at 0x0-4GB is intended to come
> up as EFI_MEMORY_SP, so I hope no one ever tries this :D
>
> > > (Also, I still don't understand the oracle value of <4GB address range.
> > > It seems like if this is some quirk of SPA vs HPA alignment, then it
> > > can hold for *all* ocurrances, not just stuff below 4GB)
> >
> > The goal is to get platform vendors to define the rules so that an OS
> > has a reasonable expectation to know what is a valid vs invalid
> > configuration. A hole above 4GB has no reason to exist, there is no
> > resource conflict like PCI MMIO that explains why typical spec
> > expectation can not be met.
> >
> > So I want the subsystem to have an explicit set of platform quirks
> > ideally backed up by updated spec language. That allows us to validate
> > that the Linux implementation is correct by some objective source of
> > truth, encourage platform vendors to update that source of truth when
> > they create new corner cases, or even better, be more mindful to not
> > create new corner cases.
>
> I follow, seems reasonable.
>
> ~Gregory
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 3/4 v2] cxl/core: Enable Region creation on x86 with Low Memory Hole
2025-01-14 20:32 [PATCH 0/4 v2] cxl/core: Enable Region creation on x86 with Low Mem Hole Fabio M. De Francesco
2025-01-14 20:32 ` [PATCH 1/4 v2] cxl/core: Change match_*_by_range() calling convention Fabio M. De Francesco
2025-01-14 20:32 ` [PATCH 2/4 v2] cxl/core: Add helpers to detect Low memory Holes on x86 Fabio M. De Francesco
@ 2025-01-14 20:32 ` Fabio M. De Francesco
2025-01-14 20:32 ` [PATCH 4/4 v2] cxl/test: Simulate an x86 Low Memory Hole for tests Fabio M. De Francesco
3 siblings, 0 replies; 12+ messages in thread
From: Fabio M. De Francesco @ 2025-01-14 20:32 UTC (permalink / raw)
To: Davidlohr Bueso, Jonathan Cameron, Dave Jiang, Alison Schofield,
Vishal Verma, Ira Weiny, Dan Williams
Cc: Robert Richter, ming.li, linux-kernel, linux-cxl,
Fabio M. De Francesco
The CXL Fixed Memory Window Structure (CFMWS) describes zero or more Host
Physical Address (HPA) windows that are associated with each CXL Host
Bridge. Each window represents a contiguous HPA that may be interleaved
with one or more targets (CXL v3.1 - 9.18.1.3).
The Low Memory Hole (LMH) of x86 is a range of addresses of physical low
memory to which systems cannot send transactions. In some cases the size
of that hole is not compatible with the CXL hardware decoder constraint
that the size is always aligned to 256M * Interleave Ways.
On those systems, BIOS publishes CFMWS which communicate the active System
Physical Address (SPA) ranges that map to a subset of the Host Physical
Address (HPA) ranges. The SPA range trims out the hole, and capacity in
the endpoint is lost with no SPA to map to CXL HPA in that hole.
In the early stages of CXL Regions construction and attach on platforms
with Low Memory Holes, cxl_add_to_region() fails and returns an error
because it can't find any CXL Window that matches a given CXL Endpoint
Decoder.
Detect a Low Memory Hole by comparing Root Decoders and Endpoint Decoders
ranges with the use of arch_match_{spa,region}() helpers.
Match Root Decoders and CXL Regions with corresponding CXL Endpoint
Decoders. Currently a Low Memory Holes would prevent the matching functions
to return true.
Construct CXL Regions with HPA range's end adjusted to the matching SPA.
Allow the attach target process to complete by allowing Regions to not
comply with alignment constraints (i.e., alignment to NIW * 256M rule).
Cc: Alison Schofield <alison.schofield@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com>
---
drivers/cxl/Kconfig | 5 ++++
drivers/cxl/core/Makefile | 1 +
drivers/cxl/core/region.c | 56 ++++++++++++++++++++++++++++++++-------
tools/testing/cxl/Kbuild | 1 +
4 files changed, 54 insertions(+), 9 deletions(-)
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 876469e23f7a7..07b87f217e590 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -128,6 +128,11 @@ config CXL_REGION
If unsure say 'y'
+config CXL_ARCH_LOW_MEMORY_HOLE
+ def_bool y
+ depends on CXL_REGION
+ depends on X86
+
config CXL_REGION_INVALIDATION_TEST
bool "CXL: Region Cache Management Bypass (TEST)"
depends on CXL_REGION
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 9259bcc6773c8..6e80215e8444f 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -15,4 +15,5 @@ cxl_core-y += hdm.o
cxl_core-y += pmu.o
cxl_core-y += cdat.o
cxl_core-$(CONFIG_TRACING) += trace.o
+cxl_core-$(CONFIG_CXL_ARCH_LOW_MEMORY_HOLE) += lmh.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 9d2c31f5caf26..b25e48da17d53 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -13,6 +13,7 @@
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
+#include "lmh.h"
/**
* DOC: cxl core region
@@ -836,8 +837,12 @@ static int match_auto_decoder(struct device *dev, void *data)
cxld = to_cxl_decoder(dev);
r = &cxld->hpa_range;
- if (p->res && p->res->start == r->start && p->res->end == r->end)
- return 1;
+ if (p->res) {
+ if (p->res->start == r->start && p->res->end == r->end)
+ return 1;
+ if (arch_match_region(p, cxld))
+ return 1;
+ }
return 0;
}
@@ -1425,7 +1430,8 @@ static int cxl_port_setup_targets(struct cxl_port *port,
if (cxld->interleave_ways != iw ||
cxld->interleave_granularity != ig ||
cxld->hpa_range.start != p->res->start ||
- cxld->hpa_range.end != p->res->end ||
+ (cxld->hpa_range.end != p->res->end &&
+ !arch_match_region(p, cxld)) ||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
"%s:%s %s expected iw: %d ig: %d %pr\n",
@@ -1737,6 +1743,7 @@ static int match_switch_decoder_by_range(struct device *dev, void *data)
{
struct cxl_endpoint_decoder *cxled = data;
struct cxl_switch_decoder *cxlsd;
+ struct cxl_root_decoder *cxlrd;
struct range *r1, *r2;
if (!is_switch_decoder(dev))
@@ -1746,8 +1753,13 @@ static int match_switch_decoder_by_range(struct device *dev, void *data)
r1 = &cxlsd->cxld.hpa_range;
r2 = &cxled->cxld.hpa_range;
- if (is_root_decoder(dev))
- return range_contains(r1, r2);
+ if (is_root_decoder(dev)) {
+ if (range_contains(r1, r2))
+ return 1;
+ cxlrd = to_cxl_root_decoder(dev);
+ if (arch_match_spa(cxlrd, cxled))
+ return 1;
+ }
return (r1->start == r2->start && r1->end == r2->end);
}
@@ -1954,7 +1966,8 @@ static int cxl_region_attach(struct cxl_region *cxlr,
}
if (resource_size(cxled->dpa_res) * p->interleave_ways !=
- resource_size(p->res)) {
+ resource_size(p->res) &&
+ !arch_match_spa(cxlrd, cxled)) {
dev_dbg(&cxlr->dev,
"%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
@@ -3204,7 +3217,12 @@ static int match_root_decoder_by_range(struct device *dev, void *data)
r1 = &cxlrd->cxlsd.cxld.hpa_range;
r2 = &cxled->cxld.hpa_range;
- return range_contains(r1, r2);
+ if (range_contains(r1, r2))
+ return true;
+ if (arch_match_spa(cxlrd, cxled))
+ return true;
+
+ return false;
}
static int match_region_by_range(struct device *dev, void *data)
@@ -3222,8 +3240,12 @@ static int match_region_by_range(struct device *dev, void *data)
p = &cxlr->params;
down_read(&cxl_region_rwsem);
- if (p->res && p->res->start == r->start && p->res->end == r->end)
- rc = 1;
+ if (p->res) {
+ if (p->res->start == r->start && p->res->end == r->end)
+ rc = 1;
+ if (arch_match_region(p, &cxled->cxld))
+ rc = 1;
+ }
up_read(&cxl_region_rwsem);
return rc;
@@ -3275,6 +3297,22 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
*res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
dev_name(&cxlr->dev));
+
+ /*
+ * Trim the HPA retrieved from hardware to fit the SPA mapped by the
+ * platform
+ */
+ if (arch_match_spa(cxlrd, cxled)) {
+ dev_dbg(cxlmd->dev.parent, "(LMH) Resource (%s: %pr)\n",
+ dev_name(&cxled->cxld.dev), res);
+
+ arch_adjust_region_resource(res, cxlrd);
+
+ dev_dbg(cxlmd->dev.parent,
+ "(LMH) has been adjusted (%s: %pr)\n",
+ dev_name(&cxled->cxld.dev), res);
+ }
+
rc = insert_resource(cxlrd->res, res);
if (rc) {
/*
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index b1256fee3567f..fe9c4480f7583 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -62,6 +62,7 @@ cxl_core-y += $(CXL_CORE_SRC)/hdm.o
cxl_core-y += $(CXL_CORE_SRC)/pmu.o
cxl_core-y += $(CXL_CORE_SRC)/cdat.o
cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
+cxl_core-$(CONFIG_CXL_ARCH_LOW_MEMORY_HOLE) += $(CXL_CORE_SRC)/lmh.o
cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
cxl_core-y += config_check.o
cxl_core-y += cxl_core_test.o
--
2.47.1
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 4/4 v2] cxl/test: Simulate an x86 Low Memory Hole for tests
2025-01-14 20:32 [PATCH 0/4 v2] cxl/core: Enable Region creation on x86 with Low Mem Hole Fabio M. De Francesco
` (2 preceding siblings ...)
2025-01-14 20:32 ` [PATCH 3/4 v2] cxl/core: Enable Region creation on x86 with Low Memory Hole Fabio M. De Francesco
@ 2025-01-14 20:32 ` Fabio M. De Francesco
3 siblings, 0 replies; 12+ messages in thread
From: Fabio M. De Francesco @ 2025-01-14 20:32 UTC (permalink / raw)
To: Davidlohr Bueso, Jonathan Cameron, Dave Jiang, Alison Schofield,
Vishal Verma, Ira Weiny, Dan Williams
Cc: Robert Richter, ming.li, linux-kernel, linux-cxl,
Fabio M. De Francesco
Simulate an x86 Low Memory Hole for the CXL tests by changing the first
mock CFMWS range size to 768MB and the CXL Endpoint Decoder HPA range sizes
to 1GB.
Since the auto-created region of cxl-test uses mock_cfmws[0], whose range
base address is typically different from the one published by the BIOS on
real hardware, the driver would fail to create and attach CXL Regions if
it was run on the mock environment created by cxl-tests.
Therefore, save the mock_cfmsw[0] range base_hpa and reuse it to match CXL
Root Decoders and Regions with Endpoint Decoders when the driver is run on
mock devices.
Since the auto-created region of cxl-test uses mock_cfmws[0], the
LMH path in the CXL Driver will be exercised every time the cxl-test
module is loaded. Executing unit test: cxl-topology.sh, confirms the
region created successfully with a LMH.
Cc: Alison Schofield <alison.schofield@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com>
---
drivers/cxl/core/lmh.c | 32 +++++++++++++++++++++++++---
drivers/cxl/core/lmh.h | 2 ++
tools/testing/cxl/cxl_core_exports.c | 2 ++
tools/testing/cxl/test/cxl.c | 10 +++++++++
4 files changed, 43 insertions(+), 3 deletions(-)
diff --git a/drivers/cxl/core/lmh.c b/drivers/cxl/core/lmh.c
index 232ebea0a8364..b981aeea805d6 100644
--- a/drivers/cxl/core/lmh.c
+++ b/drivers/cxl/core/lmh.c
@@ -1,11 +1,28 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/range.h>
+#include <linux/pci.h>
+
#include "lmh.h"
/* Start of CFMWS range that end before x86 Low Memory Holes */
#define LMH_CFMWS_RANGE_START 0x0ULL
+static u64 mock_cfmws0_range_start = ULLONG_MAX;
+
+void set_mock_cfmws0_range_start(u64 start)
+{
+ mock_cfmws0_range_start = start;
+}
+
+static u64 get_cfmws_range_start(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return LMH_CFMWS_RANGE_START;
+
+ return mock_cfmws0_range_start;
+}
+
/*
* Match CXL Root and Endpoint Decoders by comparing SPA and HPA ranges.
*
@@ -18,9 +35,14 @@
bool arch_match_spa(struct cxl_root_decoder *cxlrd,
struct cxl_endpoint_decoder *cxled)
{
+ u64 cfmws_range_start;
struct range *r1, *r2;
int niw;
+ cfmws_range_start = get_cfmws_range_start(&cxled->cxld.dev);
+ if (cfmws_range_start == ULLONG_MAX)
+ return false;
+
r1 = &cxlrd->cxlsd.cxld.hpa_range;
r2 = &cxled->cxld.hpa_range;
niw = cxled->cxld.interleave_ways;
@@ -36,13 +58,17 @@ bool arch_match_spa(struct cxl_root_decoder *cxlrd,
/* Similar to arch_match_spa(), it matches regions and decoders */
bool arch_match_region(struct cxl_region_params *p, struct cxl_decoder *cxld)
{
+ u64 cfmws_range_start;
struct range *r = &cxld->hpa_range;
struct resource *res = p->res;
int niw = cxld->interleave_ways;
- if (res->start == LMH_CFMWS_RANGE_START && res->start == r->start &&
- res->end < (LMH_CFMWS_RANGE_START + SZ_4G) && res->end < r->end &&
- IS_ALIGNED(range_len(r), niw * SZ_256M))
+ cfmws_range_start = get_cfmws_range_start(&cxld->dev);
+ if (cfmws_range_start == ULLONG_MAX)
+ return false;
+
+ if (res->start == cfmws_range_start && res->start == r->start &&
+ res->end < r->end && IS_ALIGNED(range_len(r), niw * SZ_256M))
return true;
return false;
diff --git a/drivers/cxl/core/lmh.h b/drivers/cxl/core/lmh.h
index ec8907145afe8..d804108fbb41a 100644
--- a/drivers/cxl/core/lmh.h
+++ b/drivers/cxl/core/lmh.h
@@ -2,6 +2,8 @@
#include "cxl.h"
+void set_mock_cfmws0_range_start(u64 start);
+
#ifdef CONFIG_CXL_ARCH_LOW_MEMORY_HOLE
bool arch_match_spa(struct cxl_root_decoder *cxlrd,
struct cxl_endpoint_decoder *cxled);
diff --git a/tools/testing/cxl/cxl_core_exports.c b/tools/testing/cxl/cxl_core_exports.c
index f088792a8925f..7b20f9fcf0d75 100644
--- a/tools/testing/cxl/cxl_core_exports.c
+++ b/tools/testing/cxl/cxl_core_exports.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include "cxl.h"
+#include "lmh.h"
/* Exporting of cxl_core symbols that are only used by cxl_test */
EXPORT_SYMBOL_NS_GPL(cxl_num_decoders_committed, "CXL");
+EXPORT_SYMBOL_NS_GPL(set_mock_cfmws0_range_start, "CXL");
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index d0337c11f9ee6..6bd305e778687 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/mm.h>
#include <cxlmem.h>
+#include <core/lmh.h>
#include "../watermark.h"
#include "mock.h"
@@ -212,7 +213,11 @@ static struct {
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
.qtg_id = FAKE_QTG_ID,
+#if defined(CONFIG_CXL_ARCH_LOW_MEMORY_HOLE)
+ .window_size = SZ_256M * 3UL,
+#else
.window_size = SZ_256M * 4UL,
+#endif
},
.target = { 0 },
},
@@ -454,6 +459,7 @@ static int populate_cedt(void)
return -ENOMEM;
window->base_hpa = res->range.start;
}
+ set_mock_cfmws0_range_start(mock_cfmws[0]->base_hpa);
return 0;
}
@@ -744,7 +750,11 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
struct cxl_endpoint_decoder *cxled;
struct cxl_switch_decoder *cxlsd;
struct cxl_port *port, *iter;
+#if defined(CONFIG_CXL_ARCH_LOW_MEMORY_HOLE)
+ const int size = SZ_1G;
+#else
const int size = SZ_512M;
+#endif
struct cxl_memdev *cxlmd;
struct cxl_dport *dport;
struct device *dev;
--
2.47.1
^ permalink raw reply related [flat|nested] 12+ messages in thread