xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Oleksandr Tyshchenko <olekstysh@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>,
	Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v2 13/13] [RFC] iommu: AMD-Vi: Squash map_pages/unmap_pages with map_page/unmap_page
Date: Tue, 25 Jul 2017 20:26:55 +0300	[thread overview]
Message-ID: <1501003615-15274-14-git-send-email-olekstysh@gmail.com> (raw)
In-Reply-To: <1501003615-15274-1-git-send-email-olekstysh@gmail.com>

From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

Reduce the scope of the TODO by squashing single-page stuff with
multi-page one. Next target is to use large pages whenever possible
in the case that hardware supports them.

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
CC: Jan Beulich <jbeulich@suse.com>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>

---
   Changes in v1:
      -

   Changes in v2:
      -

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
---
 xen/drivers/passthrough/amd/iommu_map.c | 250 ++++++++++++++++----------------
 1 file changed, 121 insertions(+), 129 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index ea3a728..22d0cc6 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -631,188 +631,180 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
     return 0;
 }
 
-static int __must_check amd_iommu_map_page(struct domain *d, unsigned long gfn,
-                                           unsigned long mfn,
-                                           unsigned int flags)
+/*
+ * TODO: Optimize by using large pages whenever possible in the case
+ * that hardware supports them.
+ */
+int __must_check amd_iommu_map_pages(struct domain *d, unsigned long gfn,
+                                     unsigned long mfn,
+                                     unsigned int order,
+                                     unsigned int flags)
 {
-    bool_t need_flush = 0;
     struct domain_iommu *hd = dom_iommu(d);
     int rc;
-    unsigned long pt_mfn[7];
-    unsigned int merge_level;
+    unsigned long orig_gfn = gfn;
+    unsigned long i;
 
     if ( iommu_use_hap_pt(d) )
         return 0;
 
-    memset(pt_mfn, 0, sizeof(pt_mfn));
-
     spin_lock(&hd->arch.mapping_lock);
-
     rc = amd_iommu_alloc_root(hd);
+    spin_unlock(&hd->arch.mapping_lock);
     if ( rc )
     {
-        spin_unlock(&hd->arch.mapping_lock);
         AMD_IOMMU_DEBUG("Root table alloc failed, gfn = %lx\n", gfn);
         domain_crash(d);
         return rc;
     }
 
-    /* Since HVM domain is initialized with 2 level IO page table,
-     * we might need a deeper page table for lager gfn now */
-    if ( is_hvm_domain(d) )
+    for ( i = 0; i < (1UL << order); i++, gfn++, mfn++ )
     {
-        if ( update_paging_mode(d, gfn) )
+        bool_t need_flush = 0;
+        unsigned long pt_mfn[7];
+        unsigned int merge_level;
+
+        memset(pt_mfn, 0, sizeof(pt_mfn));
+
+        spin_lock(&hd->arch.mapping_lock);
+
+        /* Since HVM domain is initialized with 2 level IO page table,
+         * we might need a deeper page table for lager gfn now */
+        if ( is_hvm_domain(d) )
+        {
+            if ( update_paging_mode(d, gfn) )
+            {
+                spin_unlock(&hd->arch.mapping_lock);
+                AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
+                domain_crash(d);
+                rc = -EFAULT;
+                goto err;
+            }
+        }
+
+        if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
         {
             spin_unlock(&hd->arch.mapping_lock);
-            AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
+            AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
             domain_crash(d);
-            return -EFAULT;
+            rc = -EFAULT;
+            goto err;
         }
-    }
 
-    if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
-    {
-        spin_unlock(&hd->arch.mapping_lock);
-        AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
-        domain_crash(d);
-        return -EFAULT;
-    }
+        /* Install 4k mapping first */
+        need_flush = set_iommu_pte_present(pt_mfn[1], gfn, mfn,
+                                           IOMMU_PAGING_MODE_LEVEL_1,
+                                           !!(flags & IOMMUF_writable),
+                                           !!(flags & IOMMUF_readable));
 
-    /* Install 4k mapping first */
-    need_flush = set_iommu_pte_present(pt_mfn[1], gfn, mfn, 
-                                       IOMMU_PAGING_MODE_LEVEL_1,
-                                       !!(flags & IOMMUF_writable),
-                                       !!(flags & IOMMUF_readable));
+        /* Do not increase pde count if io mapping has not been changed */
+        if ( !need_flush )
+        {
+            spin_unlock(&hd->arch.mapping_lock);
+            continue;
+        }
 
-    /* Do not increase pde count if io mapping has not been changed */
-    if ( !need_flush )
-        goto out;
+        /* 4K mapping for PV guests never changes,
+         * no need to flush if we trust non-present bits */
+        if ( is_hvm_domain(d) )
+            amd_iommu_flush_pages(d, gfn, 0);
 
-    /* 4K mapping for PV guests never changes, 
-     * no need to flush if we trust non-present bits */
-    if ( is_hvm_domain(d) )
-        amd_iommu_flush_pages(d, gfn, 0);
-
-    for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
-          merge_level <= hd->arch.paging_mode; merge_level++ )
-    {
-        if ( pt_mfn[merge_level] == 0 )
-            break;
-        if ( !iommu_update_pde_count(d, pt_mfn[merge_level],
-                                     gfn, mfn, merge_level) )
-            break;
-
-        if ( iommu_merge_pages(d, pt_mfn[merge_level], gfn, 
-                               flags, merge_level) )
+        for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
+              merge_level <= hd->arch.paging_mode; merge_level++ )
         {
-            spin_unlock(&hd->arch.mapping_lock);
-            AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
-                            "gfn = %lx mfn = %lx\n", merge_level, gfn, mfn);
-            domain_crash(d);
-            return -EFAULT;
+            if ( pt_mfn[merge_level] == 0 )
+                break;
+            if ( !iommu_update_pde_count(d, pt_mfn[merge_level],
+                                         gfn, mfn, merge_level) )
+                break;
+
+            if ( iommu_merge_pages(d, pt_mfn[merge_level], gfn,
+                                   flags, merge_level) )
+            {
+                spin_unlock(&hd->arch.mapping_lock);
+                AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
+                                "gfn = %lx mfn = %lx\n", merge_level, gfn, mfn);
+                domain_crash(d);
+                rc = -EFAULT;
+                goto err;
+            }
+
+            /* Deallocate lower level page table */
+            free_amd_iommu_pgtable(mfn_to_page(pt_mfn[merge_level - 1]));
         }
 
-        /* Deallocate lower level page table */
-        free_amd_iommu_pgtable(mfn_to_page(pt_mfn[merge_level - 1]));
+        spin_unlock(&hd->arch.mapping_lock);
     }
 
-out:
-    spin_unlock(&hd->arch.mapping_lock);
     return 0;
+
+err:
+    while ( i-- )
+        /* If statement to satisfy __must_check. */
+        if ( amd_iommu_unmap_pages(d, orig_gfn + i, 0) )
+            continue;
+
+    return rc;
 }
 
-static int __must_check amd_iommu_unmap_page(struct domain *d,
-                                             unsigned long gfn)
+int __must_check amd_iommu_unmap_pages(struct domain *d,
+                                       unsigned long gfn,
+                                       unsigned int order)
 {
-    unsigned long pt_mfn[7];
     struct domain_iommu *hd = dom_iommu(d);
+    int rt = 0;
+    unsigned long i;
 
     if ( iommu_use_hap_pt(d) )
         return 0;
 
-    memset(pt_mfn, 0, sizeof(pt_mfn));
-
-    spin_lock(&hd->arch.mapping_lock);
-
     if ( !hd->arch.root_table )
-    {
-        spin_unlock(&hd->arch.mapping_lock);
         return 0;
-    }
 
-    /* Since HVM domain is initialized with 2 level IO page table,
-     * we might need a deeper page table for lager gfn now */
-    if ( is_hvm_domain(d) )
+    for ( i = 0; i < (1UL << order); i++, gfn++ )
     {
-        int rc = update_paging_mode(d, gfn);
+        unsigned long pt_mfn[7];
 
-        if ( rc )
-        {
-            spin_unlock(&hd->arch.mapping_lock);
-            AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
-            if ( rc != -EADDRNOTAVAIL )
-                domain_crash(d);
-            return rc;
-        }
-    }
+        memset(pt_mfn, 0, sizeof(pt_mfn));
 
-    if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
-    {
-        spin_unlock(&hd->arch.mapping_lock);
-        AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
-        domain_crash(d);
-        return -EFAULT;
-    }
-
-    /* mark PTE as 'page not present' */
-    clear_iommu_pte_present(pt_mfn[1], gfn);
-    spin_unlock(&hd->arch.mapping_lock);
+        spin_lock(&hd->arch.mapping_lock);
 
-    amd_iommu_flush_pages(d, gfn, 0);
-
-    return 0;
-}
-
-/* TODO: Optimize by squashing map_pages/unmap_pages with map_page/unmap_page */
-int __must_check amd_iommu_map_pages(struct domain *d, unsigned long gfn,
-                                     unsigned long mfn, unsigned int order,
-                                     unsigned int flags)
-{
-    unsigned long i;
-    int rc = 0;
-
-    for ( i = 0; i < (1UL << order); i++ )
-    {
-        rc = amd_iommu_map_page(d, gfn + i, mfn + i, flags);
-        if ( unlikely(rc) )
+        /* Since HVM domain is initialized with 2 level IO page table,
+         * we might need a deeper page table for lager gfn now */
+        if ( is_hvm_domain(d) )
         {
-            while ( i-- )
-                /* If statement to satisfy __must_check. */
-                if ( amd_iommu_unmap_page(d, gfn + i) )
-                    continue;
+            int rc = update_paging_mode(d, gfn);
 
-            break;
+            if ( rc )
+            {
+                spin_unlock(&hd->arch.mapping_lock);
+                AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
+                if ( rc != -EADDRNOTAVAIL )
+                    domain_crash(d);
+                if ( !rt )
+                    rt = rc;
+                continue;
+            }
         }
-    }
-
-    return rc;
-}
 
-int __must_check amd_iommu_unmap_pages(struct domain *d, unsigned long gfn,
-                                       unsigned int order)
-{
-    unsigned long i;
-    int rc = 0;
+        if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
+        {
+            spin_unlock(&hd->arch.mapping_lock);
+            AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
+            domain_crash(d);
+            if ( !rt )
+                rt = -EFAULT;
+            continue;
+        }
 
-    for ( i = 0; i < (1UL << order); i++ )
-    {
-        int ret = amd_iommu_unmap_page(d, gfn + i);
+        /* mark PTE as 'page not present' */
+        clear_iommu_pte_present(pt_mfn[1], gfn);
+        spin_unlock(&hd->arch.mapping_lock);
 
-        if ( !rc )
-            rc = ret;
+        amd_iommu_flush_pages(d, gfn, 0);
     }
 
-    return rc;
+    return rt;
 }
 
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
@@ -831,7 +823,7 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain,
     gfn = phys_addr >> PAGE_SHIFT;
     for ( i = 0; i < npages; i++ )
     {
-        rt = amd_iommu_map_page(domain, gfn +i, gfn +i, flags);
+        rt = amd_iommu_map_pages(domain, gfn +i, gfn +i, flags, 0);
         if ( rt != 0 )
             return rt;
     }
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-07-25 17:27 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-07-25 17:26 [PATCH v2 00/13] "Non-shared" IOMMU support on ARM Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 01/13] xen/device-tree: Add dt_count_phandle_with_args helper Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 02/13] iommu: Add extra order argument to the IOMMU APIs and platform callbacks Oleksandr Tyshchenko
2017-08-03 11:21   ` Julien Grall
2017-08-03 12:32     ` Oleksandr Tyshchenko
2017-08-21 16:20       ` Oleksandr Tyshchenko
2017-08-22  7:21         ` Jan Beulich
2017-08-22 10:28           ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 03/13] xen/arm: p2m: Add helper to convert p2m type to IOMMU flags Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 04/13] xen/arm: p2m: Update IOMMU mapping whenever possible if page table is not shared Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 05/13] iommu/arm: Re-define iommu_use_hap_pt(d) as iommu_hap_pt_share Oleksandr Tyshchenko
2017-08-03 11:23   ` Julien Grall
2017-08-03 12:33     ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 06/13] iommu: Add extra use_iommu argument to iommu_domain_init() Oleksandr Tyshchenko
2017-08-21 16:29   ` Oleksandr Tyshchenko
2017-12-06 16:51   ` Jan Beulich
2017-12-06 19:53     ` Oleksandr Tyshchenko
2017-12-06 22:49       ` Julien Grall
2017-12-07 12:08         ` Oleksandr Tyshchenko
2017-12-07 12:51           ` Jan Beulich
2017-07-25 17:26 ` [PATCH v2 07/13] iommu: Make decision about needing IOMMU for hardware domains in advance Oleksandr Tyshchenko
2017-08-21 16:30   ` Oleksandr Tyshchenko
2017-12-06 17:01   ` Jan Beulich
2017-12-06 19:23     ` Oleksandr Tyshchenko
2017-12-07  8:57       ` Jan Beulich
2017-12-07 13:50         ` Oleksandr Tyshchenko
2017-12-07 13:57           ` Jan Beulich
2017-12-08 12:28             ` Oleksandr Tyshchenko
2018-01-18 12:09   ` Roger Pau Monné
2018-01-18 14:50     ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 08/13] iommu/arm: Misc fixes for arch specific part Oleksandr Tyshchenko
2017-08-03 11:31   ` Julien Grall
2017-08-03 12:34     ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 09/13] xen/arm: Add use_iommu flag to xen_arch_domainconfig Oleksandr Tyshchenko
2017-07-28 16:16   ` Wei Liu
2017-07-28 16:30     ` Oleksandr Tyshchenko
2017-08-03 11:33   ` Julien Grall
2017-08-03 12:31     ` Oleksandr Tyshchenko
2017-08-03 12:35       ` Julien Grall
2017-07-25 17:26 ` [PATCH v2 10/13] xen/arm: domain_build: Don't expose IOMMU specific properties to the guest Oleksandr Tyshchenko
2017-08-03 11:37   ` Julien Grall
2017-08-03 13:24     ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 11/13] iommu/arm: smmu: Squash map_pages/unmap_pages with map_page/unmap_page Oleksandr Tyshchenko
2017-08-03 12:36   ` Julien Grall
2017-08-03 13:26     ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 12/13] [RFC] iommu: VT-d: " Oleksandr Tyshchenko
2017-08-21 16:44   ` Oleksandr Tyshchenko
2017-09-12 14:44     ` Oleksandr Tyshchenko
2017-09-20  8:54       ` Tian, Kevin
2017-09-20 18:23         ` Oleksandr Tyshchenko
2017-07-25 17:26 ` Oleksandr Tyshchenko [this message]
2017-08-21 16:44   ` [PATCH v2 13/13] [RFC] iommu: AMD-Vi: " Oleksandr Tyshchenko
2017-09-12 14:45     ` Oleksandr Tyshchenko
2017-07-31  5:57 ` [PATCH v2 00/13] "Non-shared" IOMMU support on ARM Tian, Kevin
2017-07-31 11:57   ` Oleksandr Tyshchenko
2017-08-01  3:06     ` Tian, Kevin
2017-08-01 11:08       ` Oleksandr Tyshchenko
2017-08-02  6:12         ` Tian, Kevin
2017-08-02 17:47           ` Oleksandr Tyshchenko
2017-08-01 18:09       ` Julien Grall
2017-08-01 18:20         ` Oleksandr Tyshchenko
2017-08-01 17:56   ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1501003615-15274-14-git-send-email-olekstysh@gmail.com \
    --to=olekstysh@gmail.com \
    --cc=jbeulich@suse.com \
    --cc=oleksandr_tyshchenko@epam.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).