From: eric.auger@linaro.org (Eric Auger)
To: linux-arm-kernel@lists.infradead.org
Subject: [RFC v2 08/15] iommu/arm-smmu: implement iommu_get/put_single_reserved
Date: Thu, 11 Feb 2016 14:34:15 +0000 [thread overview]
Message-ID: <1455201262-5259-9-git-send-email-eric.auger@linaro.org> (raw)
In-Reply-To: <1455201262-5259-1-git-send-email-eric.auger@linaro.org>
Implement the iommu_get/put_single_reserved API in arm-smmu.
In order to track which physical address is already mapped we
use the RB tree indexed by PA.
Signed-off-by: Eric Auger <eric.auger@linaro.org>
---
v1 -> v2:
- previously in vfio_iommu_type1.c
---
drivers/iommu/arm-smmu.c | 114 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 114 insertions(+)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 729a4c6..9961bfd 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1563,6 +1563,118 @@ static void arm_smmu_free_reserved_iova_domain(struct iommu_domain *domain)
mutex_unlock(&smmu_domain->reserved_mutex);
}
+static int arm_smmu_get_single_reserved(struct iommu_domain *domain,
+ phys_addr_t addr, int prot,
+ dma_addr_t *iova)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ unsigned long order = __ffs(domain->ops->pgsize_bitmap);
+ size_t page_size = 1 << order;
+ phys_addr_t mask = page_size - 1;
+ phys_addr_t aligned_addr = addr & ~mask;
+ phys_addr_t offset = addr - aligned_addr;
+ struct arm_smmu_reserved_binding *b;
+ struct iova *p_iova;
+ struct iova_domain *iovad = smmu_domain->reserved_iova_domain;
+ int ret;
+
+ if (!iovad)
+ return -EINVAL;
+
+ mutex_lock(&smmu_domain->reserved_mutex);
+
+ b = find_reserved_binding(smmu_domain, aligned_addr, page_size);
+ if (b) {
+ *iova = b->iova + offset;
+ kref_get(&b->kref);
+ ret = 0;
+ goto unlock;
+ }
+
+ /* there is no existing reserved iova for this pa */
+ p_iova = alloc_iova(iovad, 1, iovad->dma_32bit_pfn, true);
+ if (!p_iova) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ *iova = p_iova->pfn_lo << order;
+
+ b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b) {
+ ret = -ENOMEM;
+ goto free_iova_unlock;
+ }
+
+ ret = arm_smmu_map(domain, *iova, aligned_addr, page_size, prot);
+ if (ret)
+ goto free_binding_iova_unlock;
+
+ kref_init(&b->kref);
+ kref_get(&b->kref);
+ b->domain = smmu_domain;
+ b->addr = aligned_addr;
+ b->iova = *iova;
+ b->size = page_size;
+
+ link_reserved_binding(smmu_domain, b);
+
+ *iova += offset;
+ goto unlock;
+
+free_binding_iova_unlock:
+ kfree(b);
+free_iova_unlock:
+ free_iova(smmu_domain->reserved_iova_domain, *iova >> order);
+unlock:
+ mutex_unlock(&smmu_domain->reserved_mutex);
+ return ret;
+}
+
+/* called with reserved_mutex locked */
+static void reserved_binding_release(struct kref *kref)
+{
+ struct arm_smmu_reserved_binding *b =
+ container_of(kref, struct arm_smmu_reserved_binding, kref);
+ struct arm_smmu_domain *smmu_domain = b->domain;
+ struct iommu_domain *d = &smmu_domain->domain;
+ unsigned long order = __ffs(b->size);
+
+
+ arm_smmu_unmap(d, b->iova, b->size);
+ free_iova(smmu_domain->reserved_iova_domain, b->iova >> order);
+ unlink_reserved_binding(smmu_domain, b);
+ kfree(b);
+}
+
+static void arm_smmu_put_single_reserved(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ unsigned long order;
+ phys_addr_t aligned_addr;
+ dma_addr_t aligned_iova, page_size, mask, offset;
+ struct arm_smmu_reserved_binding *b;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ order = __ffs(domain->ops->pgsize_bitmap);
+ page_size = (uint64_t)1 << order;
+ mask = page_size - 1;
+
+ aligned_iova = iova & ~mask;
+ offset = iova - aligned_iova;
+
+ aligned_addr = iommu_iova_to_phys(domain, aligned_iova);
+
+ mutex_lock(&smmu_domain->reserved_mutex);
+
+ b = find_reserved_binding(smmu_domain, aligned_addr, page_size);
+ if (!b)
+ goto unlock;
+ kref_put(&b->kref, reserved_binding_release);
+
+unlock:
+ mutex_unlock(&smmu_domain->reserved_mutex);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1580,6 +1692,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_set_attr = arm_smmu_domain_set_attr,
.alloc_reserved_iova_domain = arm_smmu_alloc_reserved_iova_domain,
.free_reserved_iova_domain = arm_smmu_free_reserved_iova_domain,
+ .get_single_reserved = arm_smmu_get_single_reserved,
+ .put_single_reserved = arm_smmu_put_single_reserved,
/* Page size bitmap, restricted during device attach */
.pgsize_bitmap = -1UL,
};
--
1.9.1
next prev parent reply other threads:[~2016-02-11 14:34 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-02-11 14:34 [RFC v2 00/15] KVM PCIe/MSI passthrough on ARM/ARM64 Eric Auger
2016-02-11 14:34 ` [RFC v2 01/15] iommu: Add DOMAIN_ATTR_MSI_MAPPING attribute Eric Auger
2016-02-11 14:34 ` [RFC v2 02/15] vfio: expose MSI mapping requirement through VFIO_IOMMU_GET_INFO Eric Auger
2016-02-11 14:34 ` [RFC v2 03/15] vfio: introduce VFIO_IOVA_RESERVED vfio_dma type Eric Auger
2016-02-11 14:34 ` [RFC v2 04/15] iommu: add alloc/free_reserved_iova_domain Eric Auger
2016-02-11 14:34 ` [RFC v2 05/15] iommu/arm-smmu: implement alloc/free_reserved_iova_domain Eric Auger
2016-02-11 14:34 ` [RFC v2 06/15] iommu/arm-smmu: add a reserved binding RB tree Eric Auger
2016-02-11 14:34 ` [RFC v2 07/15] iommu: iommu_get/put_single_reserved Eric Auger
2016-02-11 14:34 ` Eric Auger [this message]
2016-02-11 14:34 ` [RFC v2 09/15] iommu/arm-smmu: relinquish reserved resources on domain deletion Eric Auger
2016-02-11 14:34 ` [RFC v2 10/15] vfio: allow the user to register reserved iova range for MSI mapping Eric Auger
2016-02-11 14:34 ` [RFC v2 11/15] msi: Add a new MSI_FLAG_IRQ_REMAPPING flag Eric Auger
2016-02-11 14:34 ` [RFC v2 12/15] msi: export msi_get_domain_info Eric Auger
2016-02-11 14:34 ` [RFC v2 13/15] vfio/type1: also check IRQ remapping capability at msi domain Eric Auger
2016-02-11 14:34 ` [RFC v2 14/15] iommu/arm-smmu: do not advertise IOMMU_CAP_INTR_REMAP Eric Auger
2016-02-11 14:34 ` [RFC v2 15/15] irqchip/gicv2m/v3-its-pci-msi: IOMMU map the MSI frame when needed Eric Auger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1455201262-5259-9-git-send-email-eric.auger@linaro.org \
--to=eric.auger@linaro.org \
--cc=linux-arm-kernel@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).