From: Zhi Wang <zhiw@nvidia.com>
To: <alejandro.lucero-palau@amd.com>
Cc: <linux-cxl@vger.kernel.org>, <netdev@vger.kernel.org>,
<dan.j.williams@intel.com>, <martin.habets@xilinx.com>,
<edward.cree@amd.com>, <davem@davemloft.net>, <kuba@kernel.org>,
<pabeni@redhat.com>, <edumazet@google.com>,
<richard.hughes@amd.com>, Alejandro Lucero <alucerop@amd.com>,
<targupta@nvidia.com>, <zhiwang@kernel.org>
Subject: Re: [PATCH v2 12/15] cxl: allow region creation by type2 drivers
Date: Thu, 22 Aug 2024 16:12:26 +0300 [thread overview]
Message-ID: <20240822161226.00001736.zhiw@nvidia.com> (raw)
In-Reply-To: <20240715172835.24757-13-alejandro.lucero-palau@amd.com>
On Mon, 15 Jul 2024 18:28:32 +0100
<alejandro.lucero-palau@amd.com> wrote:
> From: Alejandro Lucero <alucerop@amd.com>
>
> Creating a CXL region requires userspace intervention through the cxl
> sysfs files. Type2 support should allow accelerator drivers to create
> such cxl region from kernel code.
>
> Adding that functionality and integrating it with current support for
> memory expanders.
>
> Based on
> https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m84598b534cc5664f5bb31521ba6e41c7bc213758
> Signed-off-by: Alejandro Lucero <alucerop@amd.com> Signed-off-by: Dan
> Williams <dan.j.williams@intel.com> ---
> drivers/cxl/core/region.c | 265
> ++++++++++++++++++++++------- drivers/cxl/cxl.h |
> 1 + drivers/cxl/cxlmem.h | 4 +-
> drivers/net/ethernet/sfc/efx_cxl.c | 15 +-
> include/linux/cxl_accel_mem.h | 5 +
> 5 files changed, 231 insertions(+), 59 deletions(-)
>
> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
> index 5cc71b8868bc..697c8df83a4b 100644
> --- a/drivers/cxl/core/region.c
> +++ b/drivers/cxl/core/region.c
> @@ -479,22 +479,14 @@ static ssize_t interleave_ways_show(struct
> device *dev,
> static const struct attribute_group
> *get_cxl_region_target_group(void);
> -static ssize_t interleave_ways_store(struct device *dev,
> - struct device_attribute *attr,
> - const char *buf, size_t len)
> +static int set_interleave_ways(struct cxl_region *cxlr, int val)
> {
> - struct cxl_root_decoder *cxlrd =
> to_cxl_root_decoder(dev->parent);
> + struct cxl_root_decoder *cxlrd =
> to_cxl_root_decoder(cxlr->dev.parent); struct cxl_decoder *cxld =
> &cxlrd->cxlsd.cxld;
> - struct cxl_region *cxlr = to_cxl_region(dev);
> struct cxl_region_params *p = &cxlr->params;
> - unsigned int val, save;
> - int rc;
> + int save, rc;
> u8 iw;
>
> - rc = kstrtouint(buf, 0, &val);
> - if (rc)
> - return rc;
> -
> rc = ways_to_eiw(val, &iw);
> if (rc)
> return rc;
> @@ -509,25 +501,42 @@ static ssize_t interleave_ways_store(struct
> device *dev, return -EINVAL;
> }
>
> - rc = down_write_killable(&cxl_region_rwsem);
> - if (rc)
> - return rc;
> - if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
> - rc = -EBUSY;
> - goto out;
> - }
> + lockdep_assert_held_write(&cxl_region_rwsem);
> + if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
> + return -EBUSY;
>
> save = p->interleave_ways;
> p->interleave_ways = val;
> rc = sysfs_update_group(&cxlr->dev.kobj,
> get_cxl_region_target_group()); if (rc)
> p->interleave_ways = save;
> -out:
> +
> + return rc;
> +}
> +
> +static ssize_t interleave_ways_store(struct device *dev,
> + struct device_attribute *attr,
> + const char *buf, size_t len)
> +{
> + struct cxl_region *cxlr = to_cxl_region(dev);
> + unsigned int val;
> + int rc;
> +
> + rc = kstrtouint(buf, 0, &val);
> + if (rc)
> + return rc;
> +
> + rc = down_write_killable(&cxl_region_rwsem);
> + if (rc)
> + return rc;
> +
> + rc = set_interleave_ways(cxlr, val);
> up_write(&cxl_region_rwsem);
> if (rc)
> return rc;
> return len;
> }
> +
> static DEVICE_ATTR_RW(interleave_ways);
>
> static ssize_t interleave_granularity_show(struct device *dev,
> @@ -547,21 +556,14 @@ static ssize_t
> interleave_granularity_show(struct device *dev, return rc;
> }
>
> -static ssize_t interleave_granularity_store(struct device *dev,
> - struct device_attribute
> *attr,
> - const char *buf, size_t
> len) +static int set_interleave_granularity(struct cxl_region *cxlr,
> int val) {
> - struct cxl_root_decoder *cxlrd =
> to_cxl_root_decoder(dev->parent);
> + struct cxl_root_decoder *cxlrd =
> to_cxl_root_decoder(cxlr->dev.parent); struct cxl_decoder *cxld =
> &cxlrd->cxlsd.cxld;
> - struct cxl_region *cxlr = to_cxl_region(dev);
> struct cxl_region_params *p = &cxlr->params;
> - int rc, val;
> + int rc;
> u16 ig;
>
> - rc = kstrtoint(buf, 0, &val);
> - if (rc)
> - return rc;
> -
> rc = granularity_to_eig(val, &ig);
> if (rc)
> return rc;
> @@ -577,21 +579,36 @@ static ssize_t
> interleave_granularity_store(struct device *dev, if
> (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
> return -EINVAL;
> + lockdep_assert_held_write(&cxl_region_rwsem);
> + if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
> + return -EBUSY;
> +
> + p->interleave_granularity = val;
> + return 0;
> +}
> +
> +static ssize_t interleave_granularity_store(struct device *dev,
> + struct device_attribute
> *attr,
> + const char *buf, size_t
> len) +{
> + struct cxl_region *cxlr = to_cxl_region(dev);
> + int rc, val;
> +
> + rc = kstrtoint(buf, 0, &val);
> + if (rc)
> + return rc;
> +
> rc = down_write_killable(&cxl_region_rwsem);
> if (rc)
> return rc;
> - if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
> - rc = -EBUSY;
> - goto out;
> - }
>
> - p->interleave_granularity = val;
> -out:
> + rc = set_interleave_granularity(cxlr, val);
> up_write(&cxl_region_rwsem);
> if (rc)
> return rc;
> return len;
> }
> +
> static DEVICE_ATTR_RW(interleave_granularity);
>
> static ssize_t resource_show(struct device *dev, struct
> device_attribute *attr, @@ -2193,7 +2210,7 @@ static int
> cxl_region_attach(struct cxl_region *cxlr, return 0;
> }
>
> -static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
> +int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
> {
> struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
> struct cxl_region *cxlr = cxled->cxld.region;
> @@ -2252,6 +2269,7 @@ static int cxl_region_detach(struct
> cxl_endpoint_decoder *cxled) put_device(&cxlr->dev);
> return rc;
> }
> +EXPORT_SYMBOL_NS_GPL(cxl_region_detach, CXL);
>
> void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
> {
> @@ -2746,6 +2764,14 @@ cxl_find_region_by_name(struct
> cxl_root_decoder *cxlrd, const char *name) return
> to_cxl_region(region_dev); }
>
> +static void drop_region(struct cxl_region *cxlr)
> +{
> + struct cxl_root_decoder *cxlrd =
> to_cxl_root_decoder(cxlr->dev.parent);
> + struct cxl_port *port = cxlrd_to_port(cxlrd);
> +
> + devm_release_action(port->uport_dev, unregister_region,
> cxlr); +}
> +
> static ssize_t delete_region_store(struct device *dev,
> struct device_attribute *attr,
> const char *buf, size_t len)
> @@ -3353,17 +3379,18 @@ static int match_region_by_range(struct
> device *dev, void *data) return rc;
> }
>
> -/* Establish an empty region covering the given HPA range */
> -static struct cxl_region *construct_region(struct cxl_root_decoder
> *cxlrd,
> - struct
> cxl_endpoint_decoder *cxled) +static void construct_region_end(void)
> +{
> + up_write(&cxl_region_rwsem);
> +}
> +
> +static struct cxl_region *construct_region_begin(struct
> cxl_root_decoder *cxlrd,
> + struct
> cxl_endpoint_decoder *cxled) {
> struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
> - struct cxl_port *port = cxlrd_to_port(cxlrd);
> - struct range *hpa = &cxled->cxld.hpa_range;
> struct cxl_region_params *p;
> struct cxl_region *cxlr;
> - struct resource *res;
> - int rc;
> + int err = 0;
>
> do {
> cxlr = __create_region(cxlrd, cxled->mode,
> @@ -3372,8 +3399,7 @@ static struct cxl_region
> *construct_region(struct cxl_root_decoder *cxlrd, } while
> (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
> if (IS_ERR(cxlr)) {
> - dev_err(cxlmd->dev.parent,
> - "%s:%s: %s failed assign region: %ld\n",
> + dev_err(cxlmd->dev.parent,"%s:%s: %s failed assign
> region: %ld\n", dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
> __func__, PTR_ERR(cxlr));
> return cxlr;
> @@ -3383,23 +3409,47 @@ static struct cxl_region
> *construct_region(struct cxl_root_decoder *cxlrd, p = &cxlr->params;
> if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
> dev_err(cxlmd->dev.parent,
> - "%s:%s: %s autodiscovery interrupted\n",
> + "%s:%s: %s region setup interrupted\n",
> dev_name(&cxlmd->dev),
> dev_name(&cxled->cxld.dev), __func__);
> - rc = -EBUSY;
> - goto err;
> + err = -EBUSY;
> + }
> +
> + if (err) {
> + construct_region_end();
> + drop_region(cxlr);
> + return ERR_PTR(err);
> }
> + return cxlr;
> +}
> +
> +
> +/* Establish an empty region covering the given HPA range */
> +static struct cxl_region *construct_region(struct cxl_root_decoder
> *cxlrd,
> + struct
> cxl_endpoint_decoder *cxled) +{
> + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
> + struct range *hpa = &cxled->cxld.hpa_range;
> + struct cxl_region_params *p;
> + struct cxl_region *cxlr;
> + struct resource *res;
> + int rc;
> +
> + cxlr = construct_region_begin(cxlrd, cxled);
> + if (IS_ERR(cxlr))
> + return cxlr;
>
> set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
>
> res = kmalloc(sizeof(*res), GFP_KERNEL);
> if (!res) {
> rc = -ENOMEM;
> - goto err;
> + goto out;
> }
>
> *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
> dev_name(&cxlr->dev));
> +
> rc = insert_resource(cxlrd->res, res);
> if (rc) {
> /*
> @@ -3412,6 +3462,7 @@ static struct cxl_region
> *construct_region(struct cxl_root_decoder *cxlrd, __func__,
> dev_name(&cxlr->dev)); }
>
> + p = &cxlr->params;
> p->res = res;
> p->interleave_ways = cxled->cxld.interleave_ways;
> p->interleave_granularity =
> cxled->cxld.interleave_granularity; @@ -3419,24 +3470,124 @@ static
> struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
> rc = sysfs_update_group(&cxlr->dev.kobj,
> get_cxl_region_target_group()); if (rc)
> - goto err;
> + goto out;
>
> dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig:
> %d\n",
> - dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
> __func__,
> - dev_name(&cxlr->dev), p->res, p->interleave_ways,
> - p->interleave_granularity);
> + dev_name(&cxlmd->dev),
> + dev_name(&cxled->cxld.dev),
> __func__,
> + dev_name(&cxlr->dev), p->res,
> + p->interleave_ways,
> + p->interleave_granularity);
>
> /* ...to match put_device() in cxl_add_to_region() */
> get_device(&cxlr->dev);
> up_write(&cxl_region_rwsem);
> +out:
> + construct_region_end();
> + if (rc) {
> + drop_region(cxlr);
> + return ERR_PTR(rc);
> + }
> + return cxlr;
> +}
> +
> +static struct cxl_region *
> +__construct_new_region(struct cxl_root_decoder *cxlrd,
> + struct cxl_endpoint_decoder **cxled, int ways)
> +{
> + struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
> + struct cxl_region_params *p;
> + resource_size_t size = 0;
> + struct cxl_region *cxlr;
> + int rc, i;
> +
> + /* If interleaving is not supported, why does ways need to
> be at least 1? */
> + if (ways < 1)
> + return ERR_PTR(-EINVAL);
> +
> + cxlr = construct_region_begin(cxlrd, cxled[0]);
> + if (IS_ERR(cxlr))
> + return cxlr;
> +
> + rc = set_interleave_ways(cxlr, ways);
> + if (rc)
> + goto out;
> +
> + rc = set_interleave_granularity(cxlr,
> cxld->interleave_granularity);
> + if (rc)
> + goto out;
> +
> + down_read(&cxl_dpa_rwsem);
> + for (i = 0; i < ways; i++) {
> + if (!cxled[i]->dpa_res)
> + break;
> + size += resource_size(cxled[i]->dpa_res);
> + }
> + up_read(&cxl_dpa_rwsem);
> +
> + if (i < ways)
> + goto out;
> +
> + rc = alloc_hpa(cxlr, size);
> + if (rc)
> + goto out;
> +
> + down_read(&cxl_dpa_rwsem);
> + for (i = 0; i < ways; i++) {
> + rc = cxl_region_attach(cxlr, cxled[i], i);
> + if (rc)
> + break;
> + }
> + up_read(&cxl_dpa_rwsem);
> +
> + if (rc)
> + goto out;
> +
> + rc = cxl_region_decode_commit(cxlr);
> + if (rc)
> + goto out;
>
> + p = &cxlr->params;
> + p->state = CXL_CONFIG_COMMIT;
> +out:
> + construct_region_end();
> + if (rc) {
> + drop_region(cxlr);
> + return ERR_PTR(rc);
> + }
> return cxlr;
> +}
>
> -err:
> - up_write(&cxl_region_rwsem);
> - devm_release_action(port->uport_dev, unregister_region,
> cxlr);
> - return ERR_PTR(rc);
> +/**
> + * cxl_create_region - Establish a region given an array of endpoint
> decoders
> + * @cxlrd: root decoder to allocate HPA
> + * @cxled: array of endpoint decoders with reserved DPA capacity
> + * @ways: size of @cxled array
> + *
> + * Returns a fully formed region in the commit state and attached to
> the
> + * cxl_region driver.
> + */
> +struct cxl_region *cxl_create_region(struct cxl_root_decoder *cxlrd,
> + struct cxl_endpoint_decoder
> **cxled,
> + int ways)
> +{
> + struct cxl_region *cxlr;
> +
> + mutex_lock(&cxlrd->range_lock);
> + cxlr = __construct_new_region(cxlrd, cxled, ways);
> + mutex_unlock(&cxlrd->range_lock);
> +
> + if (IS_ERR(cxlr))
> + return cxlr;
> +
> + if (device_attach(&cxlr->dev) <= 0) {
> + dev_err(&cxlr->dev, "failed to create region\n");
> + drop_region(cxlr);
> + return ERR_PTR(-ENODEV);
> + }
> + return cxlr;
> }
> +EXPORT_SYMBOL_NS_GPL(cxl_create_region, CXL);
>
> int cxl_add_to_region(struct cxl_port *root, struct
> cxl_endpoint_decoder *cxled) {
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index d3fdd2c1e066..1bf3b74ff959 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -905,6 +905,7 @@ void cxl_coordinates_combine(struct
> access_coordinate *out,
> bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
>
> +int cxl_region_detach(struct cxl_endpoint_decoder *cxled);
> /*
> * Unit test builds overrides this to __weak, find the 'strong'
> version
> * of these symbols in tools/testing/cxl/.
> diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
> index a0e0795ec064..377bb3cd2d47 100644
> --- a/drivers/cxl/cxlmem.h
> +++ b/drivers/cxl/cxlmem.h
> @@ -881,5 +881,7 @@ struct cxl_root_decoder
> *cxl_get_hpa_freespace(struct cxl_port *endpoint, int interleave_ways,
> unsigned long flags,
> resource_size_t *max);
> -
> +struct cxl_region *cxl_create_region(struct cxl_root_decoder *cxlrd,
> + struct cxl_endpoint_decoder
> **cxled,
> + int ways);
> #endif /* __CXL_MEM_H__ */
> diff --git a/drivers/net/ethernet/sfc/efx_cxl.c
> b/drivers/net/ethernet/sfc/efx_cxl.c index b5626d724b52..4012e3faa298
> 100644 --- a/drivers/net/ethernet/sfc/efx_cxl.c
> +++ b/drivers/net/ethernet/sfc/efx_cxl.c
> @@ -92,8 +92,18 @@ void efx_cxl_init(struct efx_nic *efx)
>
> cxl->cxled = cxl_request_dpa(cxl->endpoint, true,
> EFX_CTPIO_BUFFER_SIZE, EFX_CTPIO_BUFFER_SIZE);
> - if (IS_ERR(cxl->cxled))
> + if (IS_ERR(cxl->cxled)) {
> pci_info(pci_dev, "CXL accel request DPA failed");
> + return;
> + }
> +
> + cxl->efx_region = cxl_create_region(cxl->cxlrd, &cxl->cxled,
> 1);
> + if (!cxl->efx_region) {
if (IS_ERR(cxl->efx_region))
> + pci_info(pci_dev, "CXL accel create region failed");
> + cxl_dpa_free(cxl->cxled);
> + return;
> + }
> +
> out:
> cxl_release_endpoint(cxl->cxlmd, cxl->endpoint);
> }
> @@ -102,6 +112,9 @@ void efx_cxl_exit(struct efx_nic *efx)
> {
> struct efx_cxl *cxl = efx->cxl;
>
> + if (cxl->efx_region)
> + cxl_region_detach(cxl->cxled);
> +
> if (cxl->cxled)
> cxl_dpa_free(cxl->cxled);
>
> diff --git a/include/linux/cxl_accel_mem.h
> b/include/linux/cxl_accel_mem.h index d4ecb5bb4fc8..a5f9ffc24509
> 100644 --- a/include/linux/cxl_accel_mem.h
> +++ b/include/linux/cxl_accel_mem.h
> @@ -48,4 +48,9 @@ struct cxl_endpoint_decoder *cxl_request_dpa(struct
> cxl_port *endpoint, resource_size_t min,
> resource_size_t max);
> int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
> +struct cxl_region *cxl_create_region(struct cxl_root_decoder *cxlrd,
> + struct cxl_endpoint_decoder
> **cxled,
> + int ways);
> +
> +int cxl_region_detach(struct cxl_endpoint_decoder *cxled);
> #endif
next prev parent reply other threads:[~2024-08-22 13:12 UTC|newest]
Thread overview: 114+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-15 17:28 [PATCH v2 00/15] cxl: add Type2 device support alejandro.lucero-palau
2024-07-15 17:28 ` [PATCH v2 01/15] cxl: add type2 device basic support alejandro.lucero-palau
2024-07-15 18:48 ` Andrew Lunn
2024-07-16 8:50 ` Alejandro Lucero Palau
2024-07-16 1:57 ` kernel test robot
2024-07-18 23:12 ` Dave Jiang
2024-07-19 6:03 ` Alejandro Lucero Palau
2024-08-04 16:44 ` Jonathan Cameron
2024-08-09 7:26 ` Alejandro Lucero Palau
2024-08-04 17:10 ` Jonathan Cameron
2024-08-12 11:16 ` Alejandro Lucero Palau
2024-08-13 8:30 ` Alejandro Lucero Palau
2024-08-15 16:38 ` Jonathan Cameron
2024-08-19 11:12 ` Alejandro Lucero Palau
2024-08-20 10:44 ` Alejandro Lucero Palau
2024-08-15 16:35 ` Jonathan Cameron
2024-08-19 11:10 ` Alejandro Lucero Palau
2024-08-27 15:06 ` Jonathan Cameron
2024-08-09 8:34 ` Zhi Wang
2024-08-12 11:34 ` Alejandro Lucero Palau
2024-08-17 20:32 ` Zhi Wang
2024-08-19 11:13 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 02/15] cxl: add function for type2 cxl regs setup alejandro.lucero-palau
2024-07-16 6:26 ` Li, Ming4
2024-08-14 7:46 ` Alejandro Lucero Palau
2024-07-18 23:27 ` Dave Jiang
2024-08-14 7:49 ` Alejandro Lucero Palau
2024-08-04 17:15 ` Jonathan Cameron
2024-08-14 7:56 ` Alejandro Lucero Palau
2024-08-15 16:40 ` Jonathan Cameron
2024-08-18 8:07 ` Zhi Wang
2024-08-19 11:28 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 03/15] cxl: add function for type2 resource request alejandro.lucero-palau
2024-07-18 23:36 ` Dave Jiang
2024-08-04 17:16 ` Jonathan Cameron
2024-08-14 8:08 ` Alejandro Lucero Palau
2024-08-14 8:00 ` Alejandro Lucero Palau
2024-08-09 9:01 ` Zhi Wang
2024-08-22 13:07 ` Zhi Wang
2024-08-23 9:30 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 04/15] cxl: add capabilities field to cxl_dev_state alejandro.lucero-palau
2024-07-19 19:01 ` Dave Jiang
2024-07-23 13:43 ` Alejandro Lucero Palau
2024-08-09 10:25 ` Zhi Wang
2024-08-15 15:37 ` Alejandro Lucero Palau
2024-08-18 6:55 ` Zhi Wang
2024-08-19 13:14 ` Alejandro Lucero Palau
2024-08-04 17:22 ` Jonathan Cameron
2024-08-15 15:43 ` Alejandro Lucero Palau
2024-08-09 9:10 ` Zhi Wang
2024-08-15 15:20 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 05/15] cxl: fix use of resource_contains alejandro.lucero-palau
2024-07-24 21:25 ` fan
2024-08-16 14:43 ` Alejandro Lucero Palau
2024-08-04 17:25 ` Jonathan Cameron
2024-08-16 14:37 ` Alejandro Lucero Palau
2024-08-27 15:12 ` Jonathan Cameron
2024-08-09 9:14 ` Zhi Wang
2024-08-16 14:42 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 06/15] cxl: add function for setting media ready by an accelerator alejandro.lucero-palau
2024-08-04 17:26 ` Jonathan Cameron
2024-08-16 14:54 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 07/15] cxl: support type2 memdev creation alejandro.lucero-palau
2024-07-24 21:32 ` fan
2024-08-16 14:57 ` Alejandro Lucero Palau
2024-08-04 17:31 ` Jonathan Cameron
2024-08-16 15:00 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 08/15] cxl: indicate probe deferral alejandro.lucero-palau
2024-07-16 5:52 ` Li, Ming4
2024-07-16 8:10 ` Alejandro Lucero Palau
2024-07-30 16:43 ` Fan Ni
2024-08-04 17:41 ` Jonathan Cameron
2024-08-19 13:54 ` Alejandro Lucero Palau
2024-08-09 14:40 ` Zhi Wang
2024-08-26 17:42 ` Zhi Wang
2024-08-28 13:43 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 09/15] cxl: define a driver interface for HPA free space enumaration alejandro.lucero-palau
2024-07-16 0:53 ` kernel test robot
2024-07-16 6:06 ` Li, Ming4
2024-07-24 8:24 ` Alejandro Lucero Palau
2024-07-25 5:51 ` Li, Ming4
2024-07-25 11:59 ` Alejandro Lucero Palau
2024-08-04 17:57 ` Jonathan Cameron
2024-08-19 14:47 ` Alejandro Lucero Palau
2024-08-27 15:18 ` Jonathan Cameron
2024-08-28 10:18 ` Alejandro Lucero Palau
2024-08-28 11:19 ` Jonathan Cameron
2024-08-28 10:41 ` Alejandro Lucero Palau
2024-08-28 11:26 ` Jonathan Cameron
2024-08-28 13:08 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 10/15] cxl: define a driver interface for DPA allocation alejandro.lucero-palau
2024-07-16 3:32 ` kernel test robot
2024-08-04 18:07 ` Jonathan Cameron
2024-08-19 15:52 ` Alejandro Lucero Palau
2024-08-06 17:33 ` Fan Ni
2024-08-19 15:57 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 11/15] cxl: make region type based on endpoint type alejandro.lucero-palau
2024-07-16 7:14 ` Li, Ming4
2024-07-16 8:13 ` Alejandro Lucero Palau
2024-08-28 16:06 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 12/15] cxl: allow region creation by type2 drivers alejandro.lucero-palau
2024-08-04 18:29 ` Jonathan Cameron
2024-08-19 16:11 ` Alejandro Lucero Palau
2024-08-22 13:12 ` Zhi Wang [this message]
2024-08-23 9:31 ` Alejandro Lucero Palau
2024-08-27 15:20 ` Jonathan Cameron
2024-07-15 17:28 ` [PATCH v2 13/15] cxl: preclude device memory to be used for dax alejandro.lucero-palau
2024-07-15 17:28 ` [PATCH v2 14/15] cxl: add function for obtaining params from a region alejandro.lucero-palau
2024-08-09 15:24 ` Zhi Wang
2024-08-19 16:14 ` Alejandro Lucero Palau
2024-07-15 17:28 ` [PATCH v2 15/15] efx: support pio mapping based on cxl alejandro.lucero-palau
2024-08-04 18:13 ` Jonathan Cameron
2024-08-19 16:28 ` Alejandro Lucero Palau
2024-08-27 15:23 ` Jonathan Cameron
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240822161226.00001736.zhiw@nvidia.com \
--to=zhiw@nvidia.com \
--cc=alejandro.lucero-palau@amd.com \
--cc=alucerop@amd.com \
--cc=dan.j.williams@intel.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=edward.cree@amd.com \
--cc=kuba@kernel.org \
--cc=linux-cxl@vger.kernel.org \
--cc=martin.habets@xilinx.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=richard.hughes@amd.com \
--cc=targupta@nvidia.com \
--cc=zhiwang@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).