From: Jonathan Cameron <Jonathan.Cameron@Huawei.com>
To: Dan Williams <dan.j.williams@intel.com>
Cc: <linux-cxl@vger.kernel.org>, Robert Richter <rrichter@amd.com>,
<alison.schofield@intel.com>, <terry.bowman@amd.com>,
<bhelgaas@google.com>, <dave.jiang@intel.com>,
<nvdimm@lists.linux.dev>
Subject: Re: [PATCH v6 02/12] cxl/region: Drop redundant pmem region release handling
Date: Fri, 2 Dec 2022 15:43:55 +0000 [thread overview]
Message-ID: <20221202154355.000058df@Huawei.com> (raw)
In-Reply-To: <166993041215.1882361.6321535567798911286.stgit@dwillia2-xfh.jf.intel.com>
On Thu, 01 Dec 2022 13:33:32 -0800
Dan Williams <dan.j.williams@intel.com> wrote:
> Now that a cxl_nvdimm object can only experience ->remove() via an
> unregistration event (because the cxl_nvdimm bind attributes are
> suppressed), additional cleanups are possible.
>
> It is already the case that the removal of a cxl_memdev object triggers
> ->remove() on any associated region. With that mechanism in place there
> is no need for the cxl_nvdimm removal to trigger the same. Just rely on
> cxl_region_detach() to tear down the whole cxl_pmem_region.
>
> Tested-by: Robert Richter <rrichter@amd.com>
> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Seems logical. There was a bunch of stuff left in some of this where I didn't
follow why it was still there, but that's an artifact of how the series is built
up which is fair enough. FWIW
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> ---
> drivers/cxl/core/pmem.c | 2 -
> drivers/cxl/cxl.h | 1 -
> drivers/cxl/pmem.c | 90 -----------------------------------------------
> 3 files changed, 93 deletions(-)
>
> diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
> index 36aa5070d902..1d12a8206444 100644
> --- a/drivers/cxl/core/pmem.c
> +++ b/drivers/cxl/core/pmem.c
> @@ -188,7 +188,6 @@ static void cxl_nvdimm_release(struct device *dev)
> {
> struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
>
> - xa_destroy(&cxl_nvd->pmem_regions);
> kfree(cxl_nvd);
> }
>
> @@ -231,7 +230,6 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
>
> dev = &cxl_nvd->dev;
> cxl_nvd->cxlmd = cxlmd;
> - xa_init(&cxl_nvd->pmem_regions);
> device_initialize(dev);
> lockdep_set_class(&dev->mutex, &cxl_nvdimm_key);
> device_set_pm_not_required(dev);
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index 7d07127eade3..4ac7938eaf6c 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -424,7 +424,6 @@ struct cxl_nvdimm {
> struct device dev;
> struct cxl_memdev *cxlmd;
> struct cxl_nvdimm_bridge *bridge;
> - struct xarray pmem_regions;
> };
>
> struct cxl_pmem_region_mapping {
> diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
> index 946e171e7d4a..652f00fc68ca 100644
> --- a/drivers/cxl/pmem.c
> +++ b/drivers/cxl/pmem.c
> @@ -27,26 +27,7 @@ static void clear_exclusive(void *cxlds)
>
> static void unregister_nvdimm(void *nvdimm)
> {
> - struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
> - struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge;
> - struct cxl_pmem_region *cxlr_pmem;
> - unsigned long index;
> -
> - device_lock(&cxl_nvb->dev);
> - dev_set_drvdata(&cxl_nvd->dev, NULL);
> - xa_for_each(&cxl_nvd->pmem_regions, index, cxlr_pmem) {
> - get_device(&cxlr_pmem->dev);
> - device_unlock(&cxl_nvb->dev);
> -
> - device_release_driver(&cxlr_pmem->dev);
> - put_device(&cxlr_pmem->dev);
> -
> - device_lock(&cxl_nvb->dev);
> - }
> - device_unlock(&cxl_nvb->dev);
> -
> nvdimm_delete(nvdimm);
> - cxl_nvd->bridge = NULL;
> }
>
> static int cxl_nvdimm_probe(struct device *dev)
> @@ -243,21 +224,6 @@ static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb)
> return 0;
> }
>
> -static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb)
> -{
> - struct cxl_pmem_region *cxlr_pmem;
> -
> - if (!is_cxl_pmem_region(dev))
> - return 0;
> -
> - cxlr_pmem = to_cxl_pmem_region(dev);
> - if (cxlr_pmem->bridge != cxl_nvb)
> - return 0;
> -
> - device_release_driver(dev);
> - return 0;
> -}
> -
> static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb,
> struct nvdimm_bus *nvdimm_bus)
> {
> @@ -269,8 +235,6 @@ static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb,
> * nvdimm_bus_unregister() rips the nvdimm objects out from
> * underneath them.
> */
> - bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
> - cxl_pmem_region_release_driver);
> bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
> cxl_nvdimm_release_driver);
> nvdimm_bus_unregister(nvdimm_bus);
> @@ -378,48 +342,6 @@ static void unregister_nvdimm_region(void *nd_region)
> nvdimm_region_delete(nd_region);
> }
>
> -static int cxl_nvdimm_add_region(struct cxl_nvdimm *cxl_nvd,
> - struct cxl_pmem_region *cxlr_pmem)
> -{
> - int rc;
> -
> - rc = xa_insert(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem,
> - cxlr_pmem, GFP_KERNEL);
> - if (rc)
> - return rc;
> -
> - get_device(&cxlr_pmem->dev);
> - return 0;
> -}
> -
> -static void cxl_nvdimm_del_region(struct cxl_nvdimm *cxl_nvd,
> - struct cxl_pmem_region *cxlr_pmem)
> -{
> - /*
> - * It is possible this is called without a corresponding
> - * cxl_nvdimm_add_region for @cxlr_pmem
> - */
> - cxlr_pmem = xa_erase(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem);
> - if (cxlr_pmem)
> - put_device(&cxlr_pmem->dev);
> -}
> -
> -static void release_mappings(void *data)
> -{
> - int i;
> - struct cxl_pmem_region *cxlr_pmem = data;
> - struct cxl_nvdimm_bridge *cxl_nvb = cxlr_pmem->bridge;
> -
> - device_lock(&cxl_nvb->dev);
> - for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
> - struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
> - struct cxl_nvdimm *cxl_nvd = m->cxl_nvd;
> -
> - cxl_nvdimm_del_region(cxl_nvd, cxlr_pmem);
> - }
> - device_unlock(&cxl_nvb->dev);
> -}
> -
> static void cxlr_pmem_remove_resource(void *res)
> {
> remove_resource(res);
> @@ -508,10 +430,6 @@ static int cxl_pmem_region_probe(struct device *dev)
> goto out_nvb;
> }
>
> - rc = devm_add_action_or_reset(dev, release_mappings, cxlr_pmem);
> - if (rc)
> - goto out_nvd;
> -
> for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
> struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
> struct cxl_memdev *cxlmd = m->cxlmd;
> @@ -538,14 +456,6 @@ static int cxl_pmem_region_probe(struct device *dev)
> goto out_nvd;
> }
>
> - /*
> - * Pin the region per nvdimm device as those may be released
> - * out-of-order with respect to the region, and a single nvdimm
> - * maybe associated with multiple regions
> - */
> - rc = cxl_nvdimm_add_region(cxl_nvd, cxlr_pmem);
> - if (rc)
> - goto out_nvd;
> m->cxl_nvd = cxl_nvd;
> mappings[i] = (struct nd_mapping_desc) {
> .nvdimm = nvdimm,
>
next prev parent reply other threads:[~2022-12-02 15:44 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-01 21:33 [PATCH v6 00/12] cxl: Add support for Restricted CXL hosts (RCD mode) Dan Williams
2022-12-01 21:33 ` [PATCH v6 01/12] cxl/acpi: Simplify cxl_nvdimm_bridge probing Dan Williams
2022-12-02 15:02 ` Jonathan Cameron
2022-12-01 21:33 ` [PATCH v6 02/12] cxl/region: Drop redundant pmem region release handling Dan Williams
2022-12-02 15:43 ` Jonathan Cameron [this message]
2022-12-01 21:33 ` [PATCH v6 03/12] cxl/pmem: Refactor nvdimm device registration, delete the workqueue Dan Williams
2022-12-02 15:42 ` Jonathan Cameron
2022-12-01 21:33 ` [PATCH v6 04/12] cxl/pmem: Remove the cxl_pmem_wq and related infrastructure Dan Williams
2022-12-02 15:44 ` Jonathan Cameron
2022-12-01 21:33 ` [PATCH v6 05/12] cxl/acpi: Move rescan to the workqueue Dan Williams
2022-12-02 15:50 ` Jonathan Cameron
2022-12-03 7:14 ` Dan Williams
2022-12-01 21:33 ` [PATCH v6 06/12] tools/testing/cxl: Make mock CEDT parsing more robust Dan Williams
2022-12-01 21:57 ` Dave Jiang
2022-12-02 15:58 ` Jonathan Cameron
2022-12-03 7:22 ` Dan Williams
2022-12-01 21:33 ` [PATCH v6 07/12] cxl/ACPI: Register CXL host ports by bridge device Dan Williams
2022-12-01 22:00 ` Dave Jiang
2022-12-02 16:11 ` Jonathan Cameron
2022-12-03 7:28 ` Dan Williams
2022-12-01 21:34 ` [PATCH v6 08/12] cxl/acpi: Extract component registers of restricted hosts from RCRB Dan Williams
2022-12-01 23:55 ` Dave Jiang
2022-12-02 8:16 ` Robert Richter
2022-12-03 7:04 ` Dan Williams
2022-12-03 8:41 ` Dan Williams
2022-12-03 16:03 ` Robert Richter
2022-12-03 17:06 ` Dan Williams
2022-12-02 16:38 ` Jonathan Cameron
2022-12-03 7:39 ` Dan Williams
2022-12-01 21:34 ` [PATCH v6 09/12] cxl/mem: Move devm_cxl_add_endpoint() from cxl_core to cxl_mem Dan Williams
2022-12-02 16:40 ` Jonathan Cameron
2022-12-01 21:34 ` [PATCH v6 10/12] cxl/port: Add RCD endpoint port enumeration Dan Williams
2022-12-02 8:21 ` Robert Richter
2022-12-03 7:05 ` Dan Williams
2022-12-02 16:45 ` Jonathan Cameron
2022-12-01 21:34 ` [PATCH v6 11/12] tools/testing/cxl: Add an RCH topology Dan Williams
2022-12-02 8:05 ` Robert Richter
2022-12-02 17:04 ` Jonathan Cameron
2022-12-03 7:50 ` Dan Williams
2022-12-01 21:34 ` [PATCH v6 12/12] cxl/acpi: Set ACPI's CXL _OSC to indicate RCD mode support Dan Williams
2022-12-02 17:05 ` Jonathan Cameron
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221202154355.000058df@Huawei.com \
--to=jonathan.cameron@huawei.com \
--cc=alison.schofield@intel.com \
--cc=bhelgaas@google.com \
--cc=dan.j.williams@intel.com \
--cc=dave.jiang@intel.com \
--cc=linux-cxl@vger.kernel.org \
--cc=nvdimm@lists.linux.dev \
--cc=rrichter@amd.com \
--cc=terry.bowman@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox