From: Tycho Andersen <tycho@docker.com>
To: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org, kernel-hardening@lists.openwall.com,
Marco Benatto <marco.antonio.780@gmail.com>,
Juerg Haefliger <juerg.haefliger@canonical.com>,
Juerg Haefliger <juerg.haefliger@hpe.com>,
Tycho Andersen <tycho@docker.com>
Subject: [PATCH v5 08/10] arm64/mm: Add support for XPFO to swiotlb
Date: Wed, 9 Aug 2017 14:07:53 -0600 [thread overview]
Message-ID: <20170809200755.11234-9-tycho@docker.com> (raw)
In-Reply-To: <20170809200755.11234-1-tycho@docker.com>
From: Juerg Haefliger <juerg.haefliger@hpe.com>
Pages that are unmapped by XPFO need to be mapped before and unmapped
again after (to restore the original state) the __dma_{map,unmap}_area()
operations to prevent fatal page faults.
Signed-off-by: Juerg Haefliger <juerg.haefliger@canonical.com>
Signed-off-by: Tycho Andersen <tycho@docker.com>
---
arch/arm64/include/asm/cacheflush.h | 11 +++++++++
arch/arm64/mm/dma-mapping.c | 32 +++++++++++++-------------
arch/arm64/mm/xpfo.c | 45 +++++++++++++++++++++++++++++++++++++
3 files changed, 72 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index d74a284abdc2..b6a462e3b2f9 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -93,6 +93,17 @@ extern void __dma_map_area(const void *, size_t, int);
extern void __dma_unmap_area(const void *, size_t, int);
extern void __dma_flush_area(const void *, size_t);
+#ifdef CONFIG_XPFO
+#include <linux/xpfo.h>
+#define _dma_map_area(addr, size, dir) \
+ xpfo_dma_map_unmap_area(true, addr, size, dir)
+#define _dma_unmap_area(addr, size, dir) \
+ xpfo_dma_map_unmap_area(false, addr, size, dir)
+#else
+#define _dma_map_area(addr, size, dir) __dma_map_area(addr, size, dir)
+#define _dma_unmap_area(addr, size, dir) __dma_unmap_area(addr, size, dir)
+#endif
+
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index f27d4dd04384..a79f200786ab 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -204,7 +204,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
if (!is_device_dma_coherent(dev) &&
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ _dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
return dev_addr;
}
@@ -216,7 +216,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
{
if (!is_device_dma_coherent(dev) &&
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ _dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}
@@ -231,8 +231,8 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
if (!is_device_dma_coherent(dev) &&
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
for_each_sg(sgl, sg, ret, i)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ _dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
return ret;
}
@@ -248,8 +248,8 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev,
if (!is_device_dma_coherent(dev) &&
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ _dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}
@@ -258,7 +258,7 @@ static void __swiotlb_sync_single_for_cpu(struct device *dev,
enum dma_data_direction dir)
{
if (!is_device_dma_coherent(dev))
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ _dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
}
@@ -268,7 +268,7 @@ static void __swiotlb_sync_single_for_device(struct device *dev,
{
swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
if (!is_device_dma_coherent(dev))
- __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ _dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
}
static void __swiotlb_sync_sg_for_cpu(struct device *dev,
@@ -280,8 +280,8 @@ static void __swiotlb_sync_sg_for_cpu(struct device *dev,
if (!is_device_dma_coherent(dev))
for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ _dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
}
@@ -295,8 +295,8 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
if (!is_device_dma_coherent(dev))
for_each_sg(sgl, sg, nelems, i)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ _dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
}
static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
@@ -758,7 +758,7 @@ static void __iommu_sync_single_for_cpu(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
- __dma_unmap_area(phys_to_virt(phys), size, dir);
+ _dma_unmap_area(phys_to_virt(phys), size, dir);
}
static void __iommu_sync_single_for_device(struct device *dev,
@@ -771,7 +771,7 @@ static void __iommu_sync_single_for_device(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
- __dma_map_area(phys_to_virt(phys), size, dir);
+ _dma_map_area(phys_to_virt(phys), size, dir);
}
static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
@@ -811,7 +811,7 @@ static void __iommu_sync_sg_for_cpu(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(sg_virt(sg), sg->length, dir);
+ _dma_unmap_area(sg_virt(sg), sg->length, dir);
}
static void __iommu_sync_sg_for_device(struct device *dev,
@@ -825,7 +825,7 @@ static void __iommu_sync_sg_for_device(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
- __dma_map_area(sg_virt(sg), sg->length, dir);
+ _dma_map_area(sg_virt(sg), sg->length, dir);
}
static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
diff --git a/arch/arm64/mm/xpfo.c b/arch/arm64/mm/xpfo.c
index de03a652d48a..c4deb2b720cf 100644
--- a/arch/arm64/mm/xpfo.c
+++ b/arch/arm64/mm/xpfo.c
@@ -11,8 +11,10 @@
* the Free Software Foundation.
*/
+#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/xpfo.h>
#include <asm/tlbflush.h>
@@ -62,3 +64,46 @@ inline void xpfo_flush_kernel_page(struct page *page, int order)
flush_tlb_kernel_range(kaddr, kaddr + (1 << order) * size);
}
+
+inline void xpfo_dma_map_unmap_area(bool map, const void *addr, size_t size,
+ int dir)
+{
+ unsigned long flags;
+ struct page *page = virt_to_page(addr);
+
+ /*
+ * +2 here because we really want
+ * ceil(size / PAGE_SIZE), not floor(), and one extra in case things are
+ * not page aligned
+ */
+ int i, possible_pages = size / PAGE_SIZE + 2;
+ void *buf[possible_pages];
+
+ memset(buf, 0, sizeof(void *) * possible_pages);
+
+ local_irq_save(flags);
+
+ /* Map the first page */
+ if (xpfo_page_is_unmapped(page))
+ buf[0] = kmap_atomic(page);
+
+ /* Map the remaining pages */
+ for (i = 1; i < possible_pages; i++) {
+ if (page_to_virt(page + i) >= addr + size)
+ break;
+
+ if (xpfo_page_is_unmapped(page + i))
+ buf[i] = kmap_atomic(page + i);
+ }
+
+ if (map)
+ __dma_map_area(addr, size, dir);
+ else
+ __dma_unmap_area(addr, size, dir);
+
+ for (i = 0; i < possible_pages; i++)
+ if (buf[i])
+ kunmap_atomic(buf[i]);
+
+ local_irq_restore(flags);
+}
--
2.11.0
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-08-09 20:09 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-09 20:07 [PATCH v5 00/10] Add support for eXclusive Page Frame Ownership Tycho Andersen
2017-08-09 20:07 ` [PATCH v5 01/10] mm: add MAP_HUGETLB support to vm_mmap Tycho Andersen
2017-08-09 20:07 ` [PATCH v5 02/10] mm, x86: Add support for eXclusive Page Frame Ownership (XPFO) Tycho Andersen
2017-08-14 18:51 ` Laura Abbott
2017-08-14 22:30 ` Laura Abbott
2017-08-15 3:47 ` Tycho Andersen
2017-08-15 3:51 ` Tycho Andersen
2017-08-09 20:07 ` [PATCH v5 03/10] swiotlb: Map the buffer if it was unmapped by XPFO Tycho Andersen
2017-08-10 13:01 ` Konrad Rzeszutek Wilk
2017-08-10 16:22 ` Tycho Andersen
2017-09-20 16:19 ` Dave Hansen
2017-09-20 22:47 ` Tycho Andersen
2017-09-20 23:25 ` Dave Hansen
2017-08-09 20:07 ` [PATCH v5 04/10] arm64: Add __flush_tlb_one() Tycho Andersen
2017-08-12 11:26 ` [kernel-hardening] " Mark Rutland
2017-08-14 16:35 ` Tycho Andersen
2017-08-14 16:50 ` Mark Rutland
2017-08-14 17:01 ` Tycho Andersen
2017-08-23 16:58 ` Tycho Andersen
2017-08-23 17:04 ` Mark Rutland
2017-08-23 17:13 ` Tycho Andersen
2017-08-24 15:45 ` Mark Rutland
2017-08-29 17:24 ` Tycho Andersen
2017-08-30 5:31 ` Juerg Haefliger
2017-08-30 16:47 ` Tycho Andersen
2017-08-31 9:43 ` Juerg Haefliger
2017-08-31 9:47 ` Mark Rutland
2017-08-31 21:21 ` Tycho Andersen
2017-08-09 20:07 ` [PATCH v5 05/10] arm64/mm: Add support for XPFO Tycho Andersen
2017-08-11 18:01 ` [kernel-hardening] " Laura Abbott
2017-08-11 20:19 ` Tycho Andersen
2017-08-09 20:07 ` [PATCH v5 06/10] arm64/mm: Disable section mappings if XPFO is enabled Tycho Andersen
2017-08-11 17:25 ` [kernel-hardening] " Laura Abbott
2017-08-11 21:13 ` Tycho Andersen
2017-08-11 21:52 ` Tycho Andersen
2017-08-12 11:17 ` Mark Rutland
2017-08-14 16:22 ` Tycho Andersen
2017-08-14 18:42 ` Laura Abbott
2017-08-14 20:28 ` Tycho Andersen
2017-08-09 20:07 ` [PATCH v5 07/10] arm64/mm: Don't flush the data cache if the page is unmapped by XPFO Tycho Andersen
2017-08-12 11:57 ` [kernel-hardening] " Mark Rutland
2017-08-14 16:54 ` Mark Rutland
2017-08-14 20:27 ` Tycho Andersen
2017-08-15 9:39 ` Mark Rutland
2017-08-09 20:07 ` Tycho Andersen [this message]
2017-08-10 13:11 ` [PATCH v5 08/10] arm64/mm: Add support for XPFO to swiotlb Konrad Rzeszutek Wilk
2017-08-10 16:35 ` Tycho Andersen
2017-08-09 20:07 ` [PATCH v5 09/10] mm: add a user_virt_to_phys symbol Tycho Andersen
2017-08-09 20:07 ` [PATCH v5 10/10] lkdtm: Add test for XPFO Tycho Andersen
2017-08-12 20:24 ` kbuild test robot
2017-08-14 16:21 ` Tycho Andersen
2017-08-12 21:05 ` kbuild test robot
2017-08-14 19:10 ` Kees Cook
2017-08-14 20:29 ` Tycho Andersen
2017-08-11 23:35 ` [kernel-hardening] [PATCH v5 00/10] Add support for eXclusive Page Frame Ownership Laura Abbott
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170809200755.11234-9-tycho@docker.com \
--to=tycho@docker.com \
--cc=juerg.haefliger@canonical.com \
--cc=juerg.haefliger@hpe.com \
--cc=kernel-hardening@lists.openwall.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=marco.antonio.780@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).