From: "Michael S. Tsirkin" <mst@redhat.com>
To: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: nifan.cxl@gmail.com, qemu-devel@nongnu.org,
linux-cxl@vger.kernel.org, gregory.price@memverge.com,
ira.weiny@intel.com, dan.j.williams@intel.com,
a.manzanares@samsung.com, dave@stgolabs.net,
nmtadam.samsung@gmail.com, jim.harris@samsung.com,
Jorgen.Hansen@wdc.com, wj28.lee@gmail.com, armbru@redhat.com,
Fan Ni <fan.ni@samsung.com>
Subject: Re: [PATCH v8 08/14] hw/mem/cxl_type3: Add host backend and address space handling for DC regions
Date: Mon, 3 Jun 2024 11:04:06 -0400 [thread overview]
Message-ID: <20240603110327-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <20240603132759.00005fbf@Huawei.com>
On Mon, Jun 03, 2024 at 01:27:59PM +0100, Jonathan Cameron wrote:
> On Thu, 23 May 2024 10:44:48 -0700
> nifan.cxl@gmail.com wrote:
>
> > From: Fan Ni <fan.ni@samsung.com>
> >
> > Add (file/memory backed) host backend for DCD. All the dynamic capacity
> > regions will share a single, large enough host backend. Set up address
> > space for DC regions to support read/write operations to dynamic capacity
> > for DCD.
> >
> > With the change, the following support is added:
> > 1. Add a new property to type3 device "volatile-dc-memdev" to point to host
> > memory backend for dynamic capacity. Currently, all DC regions share one
> > host backend;
> > 2. Add namespace for dynamic capacity for read/write support;
> > 3. Create cdat entries for each dynamic capacity region.
> >
> > Reviewed-by: Gregory Price <gregory.price@memverge.com>
> > Signed-off-by: Fan Ni <fan.ni@samsung.com>
>
> > dvsec = (uint8_t *)&(CXLDVSECDevice){
> > @@ -579,11 +622,28 @@ static bool cxl_create_dc_regions(CXLType3Dev *ct3d, Error **errp)
> > {
> > int i;
> > uint64_t region_base = 0;
> > - uint64_t region_len = 2 * GiB;
> > - uint64_t decode_len = 2 * GiB;
> > + uint64_t region_len;
> > + uint64_t decode_len;
> > uint64_t blk_size = 2 * MiB;
> > CXLDCRegion *region;
> > MemoryRegion *mr;
> > + uint64_t dc_size;
> > +
> > + mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
> > + dc_size = memory_region_size(mr);
> > + region_len = DIV_ROUND_UP(dc_size, ct3d->dc.num_regions);
> > +
> > + if (dc_size % (ct3d->dc.num_regions * CXL_CAPACITY_MULTIPLIER) != 0) {
> > + error_setg(errp, "backend size is not multiple of region len: 0x%lx",
>
> Just seen a build error for this in mst's gitlab.
> Needs to be the messy PRIx64(not tested) e.g.
>
> error_setg(errp, "backend size is not multiple of region len: " PRIx64,
> region_len);
>
> Michael, do you want a new version, or are you happy to fix this up?
>
> Thanks,
>
> Jonathan
I did this fixup. If nothing else happens I'll keep it, if more
issues creep up I will drop. Thanks!
> > + region_len);
> > + return false;
> > + }
> > + if (region_len % CXL_CAPACITY_MULTIPLIER != 0) {
> > + error_setg(errp, "DC region size is unaligned to 0x%lx",
> > + CXL_CAPACITY_MULTIPLIER);
> > + return false;
> > + }
> > + decode_len = region_len;
> >
> > if (ct3d->hostvmem) {
> > mr = host_memory_backend_get_memory(ct3d->hostvmem);
> > @@ -610,6 +670,7 @@ static bool cxl_create_dc_regions(CXLType3Dev *ct3d, Error **errp)
> > /* dsmad_handle set when creating CDAT table entries */
> > .flags = 0,
> > };
> > + ct3d->dc.total_capacity += region->len;
> > }
> >
> > return true;
> > @@ -619,7 +680,8 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
> > {
> > DeviceState *ds = DEVICE(ct3d);
> >
> > - if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
> > + if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem
> > + && !ct3d->dc.num_regions) {
> > error_setg(errp, "at least one memdev property must be set");
> > return false;
> > } else if (ct3d->hostmem && ct3d->hostpmem) {
> > @@ -683,7 +745,37 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
> > g_free(p_name);
> > }
> >
> > + ct3d->dc.total_capacity = 0;
> > if (ct3d->dc.num_regions > 0) {
> > + MemoryRegion *dc_mr;
> > + char *dc_name;
> > +
> > + if (!ct3d->dc.host_dc) {
> > + error_setg(errp, "dynamic capacity must have a backing device");
> > + return false;
> > + }
> > +
> > + dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
> > + if (!dc_mr) {
> > + error_setg(errp, "dynamic capacity must have a backing device");
> > + return false;
> > + }
> > +
> > + /*
> > + * Set DC regions as volatile for now, non-volatile support can
> > + * be added in the future if needed.
> > + */
> > + memory_region_set_nonvolatile(dc_mr, false);
> > + memory_region_set_enabled(dc_mr, true);
> > + host_memory_backend_set_mapped(ct3d->dc.host_dc, true);
> > + if (ds->id) {
> > + dc_name = g_strdup_printf("cxl-dcd-dpa-dc-space:%s", ds->id);
> > + } else {
> > + dc_name = g_strdup("cxl-dcd-dpa-dc-space");
> > + }
> > + address_space_init(&ct3d->dc.host_dc_as, dc_mr, dc_name);
> > + g_free(dc_name);
> > +
> > if (!cxl_create_dc_regions(ct3d, errp)) {
> > error_append_hint(errp, "setup DC regions failed");
> > return false;
> > @@ -779,6 +871,9 @@ err_release_cdat:
> > err_free_special_ops:
> > g_free(regs->special_ops);
> > err_address_space_free:
> > + if (ct3d->dc.host_dc) {
> > + address_space_destroy(&ct3d->dc.host_dc_as);
> > + }
> > if (ct3d->hostpmem) {
> > address_space_destroy(&ct3d->hostpmem_as);
> > }
> > @@ -797,6 +892,9 @@ static void ct3_exit(PCIDevice *pci_dev)
> > pcie_aer_exit(pci_dev);
> > cxl_doe_cdat_release(cxl_cstate);
> > g_free(regs->special_ops);
> > + if (ct3d->dc.host_dc) {
> > + address_space_destroy(&ct3d->dc.host_dc_as);
> > + }
> > if (ct3d->hostpmem) {
> > address_space_destroy(&ct3d->hostpmem_as);
> > }
> > @@ -875,16 +973,23 @@ static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
> > AddressSpace **as,
> > uint64_t *dpa_offset)
> > {
> > - MemoryRegion *vmr = NULL, *pmr = NULL;
> > + MemoryRegion *vmr = NULL, *pmr = NULL, *dc_mr = NULL;
> > + uint64_t vmr_size = 0, pmr_size = 0, dc_size = 0;
> >
> > if (ct3d->hostvmem) {
> > vmr = host_memory_backend_get_memory(ct3d->hostvmem);
> > + vmr_size = memory_region_size(vmr);
> > }
> > if (ct3d->hostpmem) {
> > pmr = host_memory_backend_get_memory(ct3d->hostpmem);
> > + pmr_size = memory_region_size(pmr);
> > + }
> > + if (ct3d->dc.host_dc) {
> > + dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
> > + dc_size = memory_region_size(dc_mr);
> > }
> >
> > - if (!vmr && !pmr) {
> > + if (!vmr && !pmr && !dc_mr) {
> > return -ENODEV;
> > }
> >
> > @@ -892,19 +997,18 @@ static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
> > return -EINVAL;
> > }
> >
> > - if (*dpa_offset > ct3d->cxl_dstate.static_mem_size) {
> > + if (*dpa_offset >= vmr_size + pmr_size + dc_size) {
> > return -EINVAL;
> > }
> >
> > - if (vmr) {
> > - if (*dpa_offset < memory_region_size(vmr)) {
> > - *as = &ct3d->hostvmem_as;
> > - } else {
> > - *as = &ct3d->hostpmem_as;
> > - *dpa_offset -= memory_region_size(vmr);
> > - }
> > - } else {
> > + if (*dpa_offset < vmr_size) {
> > + *as = &ct3d->hostvmem_as;
> > + } else if (*dpa_offset < vmr_size + pmr_size) {
> > *as = &ct3d->hostpmem_as;
> > + *dpa_offset -= vmr_size;
> > + } else {
> > + *as = &ct3d->dc.host_dc_as;
> > + *dpa_offset -= (vmr_size + pmr_size);
> > }
> >
> > return 0;
> > @@ -986,6 +1090,8 @@ static Property ct3_props[] = {
> > DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
> > DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
> > DEFINE_PROP_UINT8("num-dc-regions", CXLType3Dev, dc.num_regions, 0),
> > + DEFINE_PROP_LINK("volatile-dc-memdev", CXLType3Dev, dc.host_dc,
> > + TYPE_MEMORY_BACKEND, HostMemoryBackend *),
> > DEFINE_PROP_END_OF_LIST(),
> > };
> >
> > @@ -1052,33 +1158,39 @@ static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
> >
> > static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
> > {
> > - MemoryRegion *vmr = NULL, *pmr = NULL;
> > + MemoryRegion *vmr = NULL, *pmr = NULL, *dc_mr = NULL;
> > AddressSpace *as;
> > + uint64_t vmr_size = 0, pmr_size = 0, dc_size = 0;
> >
> > if (ct3d->hostvmem) {
> > vmr = host_memory_backend_get_memory(ct3d->hostvmem);
> > + vmr_size = memory_region_size(vmr);
> > }
> > if (ct3d->hostpmem) {
> > pmr = host_memory_backend_get_memory(ct3d->hostpmem);
> > + pmr_size = memory_region_size(pmr);
> > }
> > + if (ct3d->dc.host_dc) {
> > + dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
> > + dc_size = memory_region_size(dc_mr);
> > + }
> >
> > - if (!vmr && !pmr) {
> > + if (!vmr && !pmr && !dc_mr) {
> > return false;
> > }
> >
> > - if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.static_mem_size) {
> > + if (dpa_offset + CXL_CACHE_LINE_SIZE > vmr_size + pmr_size + dc_size) {
> > return false;
> > }
> >
> > - if (vmr) {
> > - if (dpa_offset < memory_region_size(vmr)) {
> > - as = &ct3d->hostvmem_as;
> > - } else {
> > - as = &ct3d->hostpmem_as;
> > - dpa_offset -= memory_region_size(vmr);
> > - }
> > - } else {
> > + if (dpa_offset < vmr_size) {
> > + as = &ct3d->hostvmem_as;
> > + } else if (dpa_offset < vmr_size + pmr_size) {
> > as = &ct3d->hostpmem_as;
> > + dpa_offset -= vmr_size;
> > + } else {
> > + as = &ct3d->dc.host_dc_as;
> > + dpa_offset -= (vmr_size + pmr_size);
> > }
> >
> > address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
> > diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h
> > index f7f56b44e3..c2c3df0d2a 100644
> > --- a/include/hw/cxl/cxl_device.h
> > +++ b/include/hw/cxl/cxl_device.h
> > @@ -467,6 +467,14 @@ struct CXLType3Dev {
> > uint64_t poison_list_overflow_ts;
> >
> > struct dynamic_capacity {
> > + HostMemoryBackend *host_dc;
> > + AddressSpace host_dc_as;
> > + /*
> > + * total_capacity is equivalent to the dynamic capability
> > + * memory region size.
> > + */
> > + uint64_t total_capacity; /* 256M aligned */
> > +
> > uint8_t num_regions; /* 0-8 regions */
> > CXLDCRegion regions[DCD_MAX_NUM_REGION];
> > } dc;
next prev parent reply other threads:[~2024-06-03 15:04 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-23 17:44 [PATCH v8 00/14] Enabling DCD emulation support in Qemu nifan.cxl
2024-05-23 17:44 ` [PATCH v8 01/14] hw/cxl/mailbox: change CCI cmd set structure to be a member, not a reference nifan.cxl
2024-05-23 17:44 ` [PATCH v8 02/14] hw/cxl/mailbox: interface to add CCI commands to an existing CCI nifan.cxl
2024-05-23 17:44 ` [PATCH v8 03/14] hw/cxl/cxl-mailbox-utils: Add dc_event_log_size field to output payload of identify memory device command nifan.cxl
2024-05-23 17:44 ` [PATCH v8 04/14] hw/cxl/cxl-mailbox-utils: Add dynamic capacity region representative and mailbox command support nifan.cxl
2024-05-23 17:44 ` [PATCH v8 05/14] include/hw/cxl/cxl_device: Rename mem_size as static_mem_size for type3 memory devices nifan.cxl
2024-05-23 17:44 ` [PATCH v8 06/14] hw/mem/cxl_type3: Add support to create DC regions to " nifan.cxl
2024-05-27 7:42 ` Zhijian Li (Fujitsu) via
2024-05-23 17:44 ` [PATCH v8 07/14] hw/mem/cxl-type3: Refactor ct3_build_cdat_entries_for_mr to take mr size instead of mr as argument nifan.cxl
2024-05-23 17:44 ` [PATCH v8 08/14] hw/mem/cxl_type3: Add host backend and address space handling for DC regions nifan.cxl
2024-06-03 12:27 ` Jonathan Cameron via
2024-06-03 15:04 ` Michael S. Tsirkin [this message]
2024-06-03 17:27 ` Jonathan Cameron via
2024-05-23 17:44 ` [PATCH v8 09/14] hw/mem/cxl_type3: Add DC extent list representative and get DC extent list mailbox support nifan.cxl
2024-05-23 17:44 ` [PATCH v8 10/14] hw/cxl/cxl-mailbox-utils: Add mailbox commands to support add/release dynamic capacity response nifan.cxl
2024-05-23 17:44 ` [PATCH v8 11/14] hw/cxl/events: Add qmp interfaces to add/release dynamic capacity extents nifan.cxl
2024-06-04 7:12 ` Markus Armbruster
2024-06-04 11:55 ` Jonathan Cameron via
2024-06-04 14:49 ` Markus Armbruster
2025-09-02 10:39 ` Alireza Sanaee via
2025-09-02 15:59 ` Ira Weiny
2025-09-04 8:44 ` Alireza Sanaee via
2024-05-23 17:44 ` [PATCH v8 12/14] hw/mem/cxl_type3: Add DPA range validation for accesses to DC regions nifan.cxl
2024-05-23 17:44 ` [PATCH v8 13/14] hw/cxl/cxl-mailbox-utils: Add superset extent release mailbox support nifan.cxl
2024-05-23 17:44 ` [PATCH v8 14/14] hw/mem/cxl_type3: Allow to release extent superset in QMP interface nifan.cxl
2024-06-03 13:51 ` [PATCH v8 00/14] Enabling DCD emulation support in Qemu Jonathan Cameron via
2025-06-25 14:22 ` Alireza Sanaee via
2025-06-26 16:39 ` Fan Ni
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240603110327-mutt-send-email-mst@kernel.org \
--to=mst@redhat.com \
--cc=Jonathan.Cameron@huawei.com \
--cc=Jorgen.Hansen@wdc.com \
--cc=a.manzanares@samsung.com \
--cc=armbru@redhat.com \
--cc=dan.j.williams@intel.com \
--cc=dave@stgolabs.net \
--cc=fan.ni@samsung.com \
--cc=gregory.price@memverge.com \
--cc=ira.weiny@intel.com \
--cc=jim.harris@samsung.com \
--cc=linux-cxl@vger.kernel.org \
--cc=nifan.cxl@gmail.com \
--cc=nmtadam.samsung@gmail.com \
--cc=qemu-devel@nongnu.org \
--cc=wj28.lee@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).