From: Robin Murphy <robin.murphy@arm.com>
To: John Garry <john.garry@huawei.com>,
dwmw2@infradead.org, baolu.lu@linux.intel.com, joro@8bytes.org,
will@kernel.org
Cc: iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
linuxarm@huawei.com
Subject: Re: [PATCH 1/3] iova: Add CPU hotplug handler to flush rcaches
Date: Tue, 23 Mar 2021 12:45:29 +0000 [thread overview]
Message-ID: <7085b559-3d84-2bc1-5c36-8e034a66f8fc@arm.com> (raw)
In-Reply-To: <1614600741-15696-2-git-send-email-john.garry@huawei.com>
On 2021-03-01 12:12, John Garry wrote:
> Like the intel IOMMU driver already does, flush the per-IOVA domain
> CPU rcache when a CPU goes offline - there's no point in keeping it.
Thanks John!
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
> Signed-off-by: John Garry <john.garry@huawei.com>
> ---
> drivers/iommu/iova.c | 30 +++++++++++++++++++++++++++++-
> include/linux/cpuhotplug.h | 1 +
> include/linux/iova.h | 1 +
> 3 files changed, 31 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
> index e6e2fa85271c..c78312560425 100644
> --- a/drivers/iommu/iova.c
> +++ b/drivers/iommu/iova.c
> @@ -25,6 +25,17 @@ static void init_iova_rcaches(struct iova_domain *iovad);
> static void free_iova_rcaches(struct iova_domain *iovad);
> static void fq_destroy_all_entries(struct iova_domain *iovad);
> static void fq_flush_timeout(struct timer_list *t);
> +
> +static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
> +{
> + struct iova_domain *iovad;
> +
> + iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
> +
> + free_cpu_cached_iovas(cpu, iovad);
> + return 0;
> +}
> +
> static void free_global_cached_iovas(struct iova_domain *iovad);
>
> void
> @@ -51,6 +62,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
> iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
> rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
> rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
> + cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
> init_iova_rcaches(iovad);
> }
> EXPORT_SYMBOL_GPL(init_iova_domain);
> @@ -257,10 +269,21 @@ int iova_cache_get(void)
> {
> mutex_lock(&iova_cache_mutex);
> if (!iova_cache_users) {
> + int ret;
> +
> + ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
> + iova_cpuhp_dead);
> + if (ret) {
> + mutex_unlock(&iova_cache_mutex);
> + pr_err("Couldn't register cpuhp handler\n");
> + return ret;
> + }
> +
> iova_cache = kmem_cache_create(
> "iommu_iova", sizeof(struct iova), 0,
> SLAB_HWCACHE_ALIGN, NULL);
> if (!iova_cache) {
> + cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
> mutex_unlock(&iova_cache_mutex);
> pr_err("Couldn't create iova cache\n");
> return -ENOMEM;
> @@ -282,8 +305,10 @@ void iova_cache_put(void)
> return;
> }
> iova_cache_users--;
> - if (!iova_cache_users)
> + if (!iova_cache_users) {
> + cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
> kmem_cache_destroy(iova_cache);
> + }
> mutex_unlock(&iova_cache_mutex);
> }
> EXPORT_SYMBOL_GPL(iova_cache_put);
> @@ -606,6 +631,9 @@ void put_iova_domain(struct iova_domain *iovad)
> {
> struct iova *iova, *tmp;
>
> + cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
> + &iovad->cpuhp_dead);
> +
> free_iova_flush_queue(iovad);
> free_iova_rcaches(iovad);
> rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
> diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
> index f14adb882338..cedac9986557 100644
> --- a/include/linux/cpuhotplug.h
> +++ b/include/linux/cpuhotplug.h
> @@ -58,6 +58,7 @@ enum cpuhp_state {
> CPUHP_NET_DEV_DEAD,
> CPUHP_PCI_XGENE_DEAD,
> CPUHP_IOMMU_INTEL_DEAD,
> + CPUHP_IOMMU_IOVA_DEAD,
> CPUHP_LUSTRE_CFS_DEAD,
> CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
> CPUHP_PADATA_DEAD,
> diff --git a/include/linux/iova.h b/include/linux/iova.h
> index c834c01c0a5b..4be6c0ab4997 100644
> --- a/include/linux/iova.h
> +++ b/include/linux/iova.h
> @@ -95,6 +95,7 @@ struct iova_domain {
> flush-queues */
> atomic_t fq_timer_on; /* 1 when timer is active, 0
> when not */
> + struct hlist_node cpuhp_dead;
> };
>
> static inline unsigned long iova_size(struct iova *iova)
>
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2021-03-23 12:45 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-03-01 12:12 [PATCH 0/3] iommu/iova: Add CPU hotplug handler to flush rcaches to core code John Garry
2021-03-01 12:12 ` [PATCH 1/3] iova: Add CPU hotplug handler to flush rcaches John Garry
2021-03-23 12:45 ` Robin Murphy [this message]
2021-03-01 12:12 ` [PATCH 2/3] iommu/vt-d: Remove IOVA domain rcache flushing for CPU offlining John Garry
2021-03-23 1:57 ` Lu Baolu
2021-03-01 12:12 ` [PATCH 3/3] iova: Correct comment for free_cpu_cached_iovas() John Garry
2021-03-23 13:05 ` Robin Murphy
2021-03-23 13:56 ` John Garry
2021-03-22 17:55 ` [PATCH 0/3] iommu/iova: Add CPU hotplug handler to flush rcaches to core code John Garry
2021-04-07 8:04 ` Joerg Roedel
2021-04-07 8:08 ` John Garry
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=7085b559-3d84-2bc1-5c36-8e034a66f8fc@arm.com \
--to=robin.murphy@arm.com \
--cc=baolu.lu@linux.intel.com \
--cc=dwmw2@infradead.org \
--cc=iommu@lists.linux-foundation.org \
--cc=john.garry@huawei.com \
--cc=joro@8bytes.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linuxarm@huawei.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox