* [PATCH 2/5] mm: Refactor remap_pfn_range() [not found] <1428424299-13721-1-git-send-email-chris@chris-wilson.co.uk> @ 2015-04-07 16:31 ` Chris Wilson 2015-04-07 20:27 ` Andrew Morton 2015-04-09 8:32 ` Joonas Lahtinen 2015-04-07 16:31 ` [PATCH 3/5] io-mapping: Always create a struct to hold metadata about the io-mapping Chris Wilson ` (2 subsequent siblings) 3 siblings, 2 replies; 10+ messages in thread From: Chris Wilson @ 2015-04-07 16:31 UTC (permalink / raw) To: Joonas Lahtinen Cc: intel-gfx, Chris Wilson, Andrew Morton, Kirill A. Shutemov, Peter Zijlstra, Rik van Riel, Mel Gorman, Cyrill Gorcunov, Johannes Weiner, linux-mm In preparation for exporting very similar functionality through another interface, gut the current remap_pfn_range(). The motivating factor here is to reuse the PGB/PUD/PMD/PTE walker, but allow back progation of errors rather than BUG_ON. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: linux-mm@kvack.org --- mm/memory.c | 102 +++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 57 insertions(+), 45 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 97839f5c8c30..acb06f40d614 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1614,71 +1614,81 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(vm_insert_mixed); +struct remap_pfn { + struct mm_struct *mm; + unsigned long addr; + unsigned long pfn; + pgprot_t prot; +}; + /* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */ -static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, unsigned long end, - unsigned long pfn, pgprot_t prot) +static inline int remap_pfn(struct remap_pfn *r, pte_t *pte) +{ + if (!pte_none(*pte)) + return -EBUSY; + + set_pte_at(r->mm, r->addr, pte, + pte_mkspecial(pfn_pte(r->pfn, r->prot))); + r->pfn++; + r->addr += PAGE_SIZE; + return 0; +} + +static int remap_pte_range(struct remap_pfn *r, pmd_t *pmd, unsigned long end) { pte_t *pte; spinlock_t *ptl; + int err; - pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + pte = pte_alloc_map_lock(r->mm, pmd, r->addr, &ptl); if (!pte) return -ENOMEM; + arch_enter_lazy_mmu_mode(); do { - BUG_ON(!pte_none(*pte)); - set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); - pfn++; - } while (pte++, addr += PAGE_SIZE, addr != end); + err = remap_pfn(r, pte++); + } while (err == 0 && r->addr < end); arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(pte - 1, ptl); - return 0; + return err; } -static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, - unsigned long addr, unsigned long end, - unsigned long pfn, pgprot_t prot) +static inline int remap_pmd_range(struct remap_pfn *r, pud_t *pud, unsigned long end) { pmd_t *pmd; - unsigned long next; + int err; - pfn -= addr >> PAGE_SHIFT; - pmd = pmd_alloc(mm, pud, addr); + pmd = pmd_alloc(r->mm, pud, r->addr); if (!pmd) return -ENOMEM; VM_BUG_ON(pmd_trans_huge(*pmd)); + do { - next = pmd_addr_end(addr, end); - if (remap_pte_range(mm, pmd, addr, next, - pfn + (addr >> PAGE_SHIFT), prot)) - return -ENOMEM; - } while (pmd++, addr = next, addr != end); - return 0; + err = remap_pte_range(r, pmd++, pmd_addr_end(r->addr, end)); + } while (err == 0 && r->addr < end); + + return err; } -static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, - unsigned long addr, unsigned long end, - unsigned long pfn, pgprot_t prot) +static inline int remap_pud_range(struct remap_pfn *r, pgd_t *pgd, unsigned long end) { pud_t *pud; - unsigned long next; + int err; - pfn -= addr >> PAGE_SHIFT; - pud = pud_alloc(mm, pgd, addr); + pud = pud_alloc(r->mm, pgd, r->addr); if (!pud) return -ENOMEM; + do { - next = pud_addr_end(addr, end); - if (remap_pmd_range(mm, pud, addr, next, - pfn + (addr >> PAGE_SHIFT), prot)) - return -ENOMEM; - } while (pud++, addr = next, addr != end); - return 0; + err = remap_pmd_range(r, pud++, pud_addr_end(r->addr, end)); + } while (err == 0 && r->addr < end); + + return err; } /** @@ -1694,10 +1704,9 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { - pgd_t *pgd; - unsigned long next; unsigned long end = addr + PAGE_ALIGN(size); - struct mm_struct *mm = vma->vm_mm; + struct remap_pfn r; + pgd_t *pgd; int err; /* @@ -1731,19 +1740,22 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; BUG_ON(addr >= end); - pfn -= addr >> PAGE_SHIFT; - pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); + + r.mm = vma->vm_mm; + r.addr = addr; + r.pfn = pfn; + r.prot = prot; + + pgd = pgd_offset(r.mm, addr); do { - next = pgd_addr_end(addr, end); - err = remap_pud_range(mm, pgd, addr, next, - pfn + (addr >> PAGE_SHIFT), prot); - if (err) - break; - } while (pgd++, addr = next, addr != end); + err = remap_pud_range(&r, pgd++, pgd_addr_end(r.addr, end)); + } while (err == 0 && r.addr < end); - if (err) + if (err) { untrack_pfn(vma, pfn, PAGE_ALIGN(size)); + BUG_ON(err == -EBUSY); + } return err; } -- 2.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> ^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH 2/5] mm: Refactor remap_pfn_range() 2015-04-07 16:31 ` [PATCH 2/5] mm: Refactor remap_pfn_range() Chris Wilson @ 2015-04-07 20:27 ` Andrew Morton 2015-04-08 9:45 ` Peter Zijlstra 2015-04-09 8:32 ` Joonas Lahtinen 1 sibling, 1 reply; 10+ messages in thread From: Andrew Morton @ 2015-04-07 20:27 UTC (permalink / raw) To: Chris Wilson Cc: Joonas Lahtinen, intel-gfx, Kirill A. Shutemov, Peter Zijlstra, Rik van Riel, Mel Gorman, Cyrill Gorcunov, Johannes Weiner, linux-mm On Tue, 7 Apr 2015 17:31:36 +0100 Chris Wilson <chris@chris-wilson.co.uk> wrote: > In preparation for exporting very similar functionality through another > interface, gut the current remap_pfn_range(). The motivating factor here > is to reuse the PGB/PUD/PMD/PTE walker, but allow back progation of > errors rather than BUG_ON. I'm not on intel-gfx and for some reason these patches didn't show up on linux-mm. I wanted to comment on "mutex: Export an interface to wrap a mutex lock" but http://lists.freedesktop.org/archives/intel-gfx/2015-April/064063.html doesn't tell me which mailing lists were cc'ed and I can't find that patch on linux-kernel. Can you please do something to make this easier for us?? And please fully document all the mutex interfaces which you just added. -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> ^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 2/5] mm: Refactor remap_pfn_range() 2015-04-07 20:27 ` Andrew Morton @ 2015-04-08 9:45 ` Peter Zijlstra 0 siblings, 0 replies; 10+ messages in thread From: Peter Zijlstra @ 2015-04-08 9:45 UTC (permalink / raw) To: Andrew Morton Cc: Chris Wilson, Joonas Lahtinen, intel-gfx, Kirill A. Shutemov, Rik van Riel, Mel Gorman, Cyrill Gorcunov, Johannes Weiner, linux-mm On Tue, Apr 07, 2015 at 01:27:21PM -0700, Andrew Morton wrote: > On Tue, 7 Apr 2015 17:31:36 +0100 Chris Wilson <chris@chris-wilson.co.uk> wrote: > > > In preparation for exporting very similar functionality through another > > interface, gut the current remap_pfn_range(). The motivating factor here > > is to reuse the PGB/PUD/PMD/PTE walker, but allow back progation of > > errors rather than BUG_ON. > > I'm not on intel-gfx and for some reason these patches didn't show up on > linux-mm. I wanted to comment on "mutex: Export an interface to wrap a > mutex lock" but > http://lists.freedesktop.org/archives/intel-gfx/2015-April/064063.html > doesn't tell me which mailing lists were cc'ed and I can't find that > patch on linux-kernel. > > Can you please do something to make this easier for us?? > > And please fully document all the mutex interfaces which you just > added. Also, please Cc locking people if you poke at mutexes.. -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> ^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 2/5] mm: Refactor remap_pfn_range() 2015-04-07 16:31 ` [PATCH 2/5] mm: Refactor remap_pfn_range() Chris Wilson 2015-04-07 20:27 ` Andrew Morton @ 2015-04-09 8:32 ` Joonas Lahtinen 1 sibling, 0 replies; 10+ messages in thread From: Joonas Lahtinen @ 2015-04-09 8:32 UTC (permalink / raw) To: Chris Wilson Cc: intel-gfx, Andrew Morton, Kirill A. Shutemov, Peter Zijlstra, Rik van Riel, Mel Gorman, Cyrill Gorcunov, Johannes Weiner, linux-mm On ti, 2015-04-07 at 17:31 +0100, Chris Wilson wrote: > In preparation for exporting very similar functionality through another > interface, gut the current remap_pfn_range(). The motivating factor here > is to reuse the PGB/PUD/PMD/PTE walker, but allow back progation of > errors rather than BUG_ON. > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> > Cc: Peter Zijlstra <peterz@infradead.org> > Cc: Rik van Riel <riel@redhat.com> > Cc: Mel Gorman <mgorman@suse.de> > Cc: Cyrill Gorcunov <gorcunov@gmail.com> > Cc: Johannes Weiner <hannes@cmpxchg.org> > Cc: linux-mm@kvack.org > --- > mm/memory.c | 102 +++++++++++++++++++++++++++++++++--------------------------- > 1 file changed, 57 insertions(+), 45 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index 97839f5c8c30..acb06f40d614 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -1614,71 +1614,81 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, > } > EXPORT_SYMBOL(vm_insert_mixed); > > +struct remap_pfn { > + struct mm_struct *mm; > + unsigned long addr; > + unsigned long pfn; > + pgprot_t prot; > +}; > + > /* > * maps a range of physical memory into the requested pages. the old > * mappings are removed. any references to nonexistent pages results > * in null mappings (currently treated as "copy-on-access") > */ > -static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, > - unsigned long addr, unsigned long end, > - unsigned long pfn, pgprot_t prot) > +static inline int remap_pfn(struct remap_pfn *r, pte_t *pte) I think add a brief own comment for this function and keep it below old comment not to cause unnecessary noise. Otherwise looks good. Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> > +{ > + if (!pte_none(*pte)) > + return -EBUSY; > + > + set_pte_at(r->mm, r->addr, pte, > + pte_mkspecial(pfn_pte(r->pfn, r->prot))); > + r->pfn++; > + r->addr += PAGE_SIZE; > + return 0; > +} > + > +static int remap_pte_range(struct remap_pfn *r, pmd_t *pmd, unsigned long end) > { > pte_t *pte; > spinlock_t *ptl; > + int err; > > - pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); > + pte = pte_alloc_map_lock(r->mm, pmd, r->addr, &ptl); > if (!pte) > return -ENOMEM; > + > arch_enter_lazy_mmu_mode(); > do { > - BUG_ON(!pte_none(*pte)); > - set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); > - pfn++; > - } while (pte++, addr += PAGE_SIZE, addr != end); > + err = remap_pfn(r, pte++); > + } while (err == 0 && r->addr < end); > arch_leave_lazy_mmu_mode(); > + > pte_unmap_unlock(pte - 1, ptl); > - return 0; > + return err; > } > > -static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, > - unsigned long addr, unsigned long end, > - unsigned long pfn, pgprot_t prot) > +static inline int remap_pmd_range(struct remap_pfn *r, pud_t *pud, unsigned long end) > { > pmd_t *pmd; > - unsigned long next; > + int err; > > - pfn -= addr >> PAGE_SHIFT; > - pmd = pmd_alloc(mm, pud, addr); > + pmd = pmd_alloc(r->mm, pud, r->addr); > if (!pmd) > return -ENOMEM; > VM_BUG_ON(pmd_trans_huge(*pmd)); > + > do { > - next = pmd_addr_end(addr, end); > - if (remap_pte_range(mm, pmd, addr, next, > - pfn + (addr >> PAGE_SHIFT), prot)) > - return -ENOMEM; > - } while (pmd++, addr = next, addr != end); > - return 0; > + err = remap_pte_range(r, pmd++, pmd_addr_end(r->addr, end)); > + } while (err == 0 && r->addr < end); > + > + return err; > } > > -static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, > - unsigned long addr, unsigned long end, > - unsigned long pfn, pgprot_t prot) > +static inline int remap_pud_range(struct remap_pfn *r, pgd_t *pgd, unsigned long end) > { > pud_t *pud; > - unsigned long next; > + int err; > > - pfn -= addr >> PAGE_SHIFT; > - pud = pud_alloc(mm, pgd, addr); > + pud = pud_alloc(r->mm, pgd, r->addr); > if (!pud) > return -ENOMEM; > + > do { > - next = pud_addr_end(addr, end); > - if (remap_pmd_range(mm, pud, addr, next, > - pfn + (addr >> PAGE_SHIFT), prot)) > - return -ENOMEM; > - } while (pud++, addr = next, addr != end); > - return 0; > + err = remap_pmd_range(r, pud++, pud_addr_end(r->addr, end)); > + } while (err == 0 && r->addr < end); > + > + return err; > } > > /** > @@ -1694,10 +1704,9 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, > int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, > unsigned long pfn, unsigned long size, pgprot_t prot) > { > - pgd_t *pgd; > - unsigned long next; > unsigned long end = addr + PAGE_ALIGN(size); > - struct mm_struct *mm = vma->vm_mm; > + struct remap_pfn r; > + pgd_t *pgd; > int err; > > /* > @@ -1731,19 +1740,22 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, > vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; > > BUG_ON(addr >= end); > - pfn -= addr >> PAGE_SHIFT; > - pgd = pgd_offset(mm, addr); > flush_cache_range(vma, addr, end); > + > + r.mm = vma->vm_mm; > + r.addr = addr; > + r.pfn = pfn; > + r.prot = prot; > + > + pgd = pgd_offset(r.mm, addr); > do { > - next = pgd_addr_end(addr, end); > - err = remap_pud_range(mm, pgd, addr, next, > - pfn + (addr >> PAGE_SHIFT), prot); > - if (err) > - break; > - } while (pgd++, addr = next, addr != end); > + err = remap_pud_range(&r, pgd++, pgd_addr_end(r.addr, end)); > + } while (err == 0 && r.addr < end); > > - if (err) > + if (err) { > untrack_pfn(vma, pfn, PAGE_ALIGN(size)); > + BUG_ON(err == -EBUSY); > + } > > return err; > } -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> ^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 3/5] io-mapping: Always create a struct to hold metadata about the io-mapping [not found] <1428424299-13721-1-git-send-email-chris@chris-wilson.co.uk> 2015-04-07 16:31 ` [PATCH 2/5] mm: Refactor remap_pfn_range() Chris Wilson @ 2015-04-07 16:31 ` Chris Wilson 2015-04-09 7:58 ` Joonas Lahtinen 2015-04-07 16:31 ` [PATCH 4/5] mm: Export remap_io_mapping() Chris Wilson 2015-04-07 16:31 ` [PATCH 5/5] drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass Chris Wilson 3 siblings, 1 reply; 10+ messages in thread From: Chris Wilson @ 2015-04-07 16:31 UTC (permalink / raw) To: Joonas Lahtinen; +Cc: intel-gfx, Chris Wilson, linux-mm Currently, we only allocate a structure to hold metadata if we need to allocate an ioremap for every access, such as on x86-32. However, it would be useful to store basic information about the io-mapping, such as its page protection, on all platforms. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: linux-mm@kvack.org --- include/linux/io-mapping.h | 52 ++++++++++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 657fab4efab3..e053011f50bb 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -31,16 +31,17 @@ * See Documentation/io-mapping.txt */ -#ifdef CONFIG_HAVE_ATOMIC_IOMAP - -#include <asm/iomap.h> - struct io_mapping { resource_size_t base; unsigned long size; pgprot_t prot; + void __iomem *iomem; }; + +#ifdef CONFIG_HAVE_ATOMIC_IOMAP + +#include <asm/iomap.h> /* * For small address space machines, mapping large objects * into the kernel virtual space isn't practical. Where @@ -119,48 +120,59 @@ io_mapping_unmap(void __iomem *vaddr) #else #include <linux/uaccess.h> - -/* this struct isn't actually defined anywhere */ -struct io_mapping; +#include <asm/pgtable_types.h> /* Create the io_mapping object*/ static inline struct io_mapping * io_mapping_create_wc(resource_size_t base, unsigned long size) { - return (struct io_mapping __force *) ioremap_wc(base, size); + struct io_mapping *iomap; + + iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); + if (!iomap) + return NULL; + + iomap->base = base; + iomap->size = size; + iomap->iomem = ioremap_wc(base, size); + iomap->prot = pgprot_writecombine(PAGE_KERNEL_IO); + + return iomap; } static inline void io_mapping_free(struct io_mapping *mapping) { - iounmap((void __force __iomem *) mapping); + iounmap(mapping->iomem); + kfree(mapping); } -/* Atomic map/unmap */ +/* Non-atomic map/unmap */ static inline void __iomem * -io_mapping_map_atomic_wc(struct io_mapping *mapping, - unsigned long offset) +io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) { - pagefault_disable(); - return ((char __force __iomem *) mapping) + offset; + return mapping->iomem + offset; } static inline void -io_mapping_unmap_atomic(void __iomem *vaddr) +io_mapping_unmap(void __iomem *vaddr) { - pagefault_enable(); } -/* Non-atomic map/unmap */ +/* Atomic map/unmap */ static inline void __iomem * -io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) +io_mapping_map_atomic_wc(struct io_mapping *mapping, + unsigned long offset) { - return ((char __force __iomem *) mapping) + offset; + pagefault_disable(); + return io_mapping_map_wc(mapping, offset); } static inline void -io_mapping_unmap(void __iomem *vaddr) +io_mapping_unmap_atomic(void __iomem *vaddr) { + io_mapping_unmap(vaddr); + pagefault_enable(); } #endif /* HAVE_ATOMIC_IOMAP */ -- 2.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> ^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH 3/5] io-mapping: Always create a struct to hold metadata about the io-mapping 2015-04-07 16:31 ` [PATCH 3/5] io-mapping: Always create a struct to hold metadata about the io-mapping Chris Wilson @ 2015-04-09 7:58 ` Joonas Lahtinen 0 siblings, 0 replies; 10+ messages in thread From: Joonas Lahtinen @ 2015-04-09 7:58 UTC (permalink / raw) To: Chris Wilson; +Cc: intel-gfx, linux-mm On ti, 2015-04-07 at 17:31 +0100, Chris Wilson wrote: > Currently, we only allocate a structure to hold metadata if we need to > allocate an ioremap for every access, such as on x86-32. However, it > would be useful to store basic information about the io-mapping, such as > its page protection, on all platforms. > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> > Cc: linux-mm@kvack.org > --- > include/linux/io-mapping.h | 52 ++++++++++++++++++++++++++++------------------ > 1 file changed, 32 insertions(+), 20 deletions(-) > > diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h > index 657fab4efab3..e053011f50bb 100644 > --- a/include/linux/io-mapping.h > +++ b/include/linux/io-mapping.h > @@ -31,16 +31,17 @@ > * See Documentation/io-mapping.txt > */ > > -#ifdef CONFIG_HAVE_ATOMIC_IOMAP > - > -#include <asm/iomap.h> > - > struct io_mapping { > resource_size_t base; > unsigned long size; > pgprot_t prot; > + void __iomem *iomem; > }; > > + > +#ifdef CONFIG_HAVE_ATOMIC_IOMAP > + > +#include <asm/iomap.h> > /* > * For small address space machines, mapping large objects > * into the kernel virtual space isn't practical. Where > @@ -119,48 +120,59 @@ io_mapping_unmap(void __iomem *vaddr) > #else > > #include <linux/uaccess.h> > - > -/* this struct isn't actually defined anywhere */ > -struct io_mapping; > +#include <asm/pgtable_types.h> > > /* Create the io_mapping object*/ > static inline struct io_mapping * > io_mapping_create_wc(resource_size_t base, unsigned long size) > { > - return (struct io_mapping __force *) ioremap_wc(base, size); > + struct io_mapping *iomap; > + > + iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); > + if (!iomap) > + return NULL; > + > + iomap->base = base; > + iomap->size = size; > + iomap->iomem = ioremap_wc(base, size); > + iomap->prot = pgprot_writecombine(PAGE_KERNEL_IO); > + > + return iomap; > } > > static inline void > io_mapping_free(struct io_mapping *mapping) > { > - iounmap((void __force __iomem *) mapping); > + iounmap(mapping->iomem); > + kfree(mapping); > } > > -/* Atomic map/unmap */ > +/* Non-atomic map/unmap */ > static inline void __iomem * > -io_mapping_map_atomic_wc(struct io_mapping *mapping, > - unsigned long offset) > +io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) > { > - pagefault_disable(); > - return ((char __force __iomem *) mapping) + offset; > + return mapping->iomem + offset; > } > > static inline void > -io_mapping_unmap_atomic(void __iomem *vaddr) > +io_mapping_unmap(void __iomem *vaddr) > { > - pagefault_enable(); > } > > -/* Non-atomic map/unmap */ > +/* Atomic map/unmap */ > static inline void __iomem * > -io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) > +io_mapping_map_atomic_wc(struct io_mapping *mapping, > + unsigned long offset) > { > - return ((char __force __iomem *) mapping) + offset; > + pagefault_disable(); > + return io_mapping_map_wc(mapping, offset); > } > > static inline void > -io_mapping_unmap(void __iomem *vaddr) > +io_mapping_unmap_atomic(void __iomem *vaddr) > { > + io_mapping_unmap(vaddr); > + pagefault_enable(); > } > > #endif /* HAVE_ATOMIC_IOMAP */ -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> ^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 4/5] mm: Export remap_io_mapping() [not found] <1428424299-13721-1-git-send-email-chris@chris-wilson.co.uk> 2015-04-07 16:31 ` [PATCH 2/5] mm: Refactor remap_pfn_range() Chris Wilson 2015-04-07 16:31 ` [PATCH 3/5] io-mapping: Always create a struct to hold metadata about the io-mapping Chris Wilson @ 2015-04-07 16:31 ` Chris Wilson 2015-04-09 8:18 ` Joonas Lahtinen 2015-04-07 16:31 ` [PATCH 5/5] drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass Chris Wilson 3 siblings, 1 reply; 10+ messages in thread From: Chris Wilson @ 2015-04-07 16:31 UTC (permalink / raw) To: Joonas Lahtinen Cc: intel-gfx, Chris Wilson, Andrew Morton, Kirill A. Shutemov, Peter Zijlstra, Rik van Riel, Mel Gorman, Cyrill Gorcunov, Johannes Weiner, linux-mm This is similar to remap_pfn_range(), and uses the recently refactor code to do the page table walking. The key difference is that is back propagates its error as this is required for use from within a pagefault handler. The other difference, is that it combine the page protection from io-mapping, which is known from when the io-mapping is created, with the per-vma page protection flags. This avoids having to walk the entire system description to rediscover the special page protection established for the io-mapping. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: linux-mm@kvack.org --- include/linux/mm.h | 4 ++++ mm/memory.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 47a93928b90f..3dfecd58adb0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2083,6 +2083,10 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); +struct io_mapping; +int remap_io_mapping(struct vm_area_struct *, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); diff --git a/mm/memory.c b/mm/memory.c index acb06f40d614..83bc5df3fafc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -61,6 +61,7 @@ #include <linux/string.h> #include <linux/dma-debug.h> #include <linux/debugfs.h> +#include <linux/io-mapping.h> #include <asm/io.h> #include <asm/pgalloc.h> @@ -1762,6 +1763,51 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, EXPORT_SYMBOL(remap_pfn_range); /** + * remap_io_mapping - remap an IO mapping to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @pfn: physical address of kernel memory + * @size: size of map area + * @iomap: the source io_mapping + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap) +{ + unsigned long end = addr + PAGE_ALIGN(size); + struct remap_pfn r; + pgd_t *pgd; + int err; + + if (WARN_ON(addr >= end)) + return -EINVAL; + +#define MUST_SET (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) + BUG_ON(is_cow_mapping(vma->vm_flags)); + BUG_ON((vma->vm_flags & MUST_SET) != MUST_SET); +#undef MUST_SET + + r.mm = vma->vm_mm; + r.addr = addr; + r.pfn = pfn; + r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | + (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); + + pgd = pgd_offset(r.mm, addr); + do { + err = remap_pud_range(&r, pgd++, pgd_addr_end(r.addr, end)); + } while (err == 0 && r.addr < end); + + if (err) + zap_page_range_single(vma, addr, r.addr - addr, NULL); + + return err; +} +EXPORT_SYMBOL(remap_io_mapping); + +/** * vm_iomap_memory - remap memory to userspace * @vma: user vma to map to * @start: start of area -- 2.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> ^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH 4/5] mm: Export remap_io_mapping() 2015-04-07 16:31 ` [PATCH 4/5] mm: Export remap_io_mapping() Chris Wilson @ 2015-04-09 8:18 ` Joonas Lahtinen 0 siblings, 0 replies; 10+ messages in thread From: Joonas Lahtinen @ 2015-04-09 8:18 UTC (permalink / raw) To: Chris Wilson Cc: intel-gfx, Andrew Morton, Kirill A. Shutemov, Peter Zijlstra, Rik van Riel, Mel Gorman, Cyrill Gorcunov, Johannes Weiner, linux-mm On ti, 2015-04-07 at 17:31 +0100, Chris Wilson wrote: > This is similar to remap_pfn_range(), and uses the recently refactor > code to do the page table walking. The key difference is that is back > propagates its error as this is required for use from within a pagefault > handler. The other difference, is that it combine the page protection > from io-mapping, which is known from when the io-mapping is created, > with the per-vma page protection flags. This avoids having to walk the > entire system description to rediscover the special page protection > established for the io-mapping. > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> > Cc: Peter Zijlstra <peterz@infradead.org> > Cc: Rik van Riel <riel@redhat.com> > Cc: Mel Gorman <mgorman@suse.de> > Cc: Cyrill Gorcunov <gorcunov@gmail.com> > Cc: Johannes Weiner <hannes@cmpxchg.org> > Cc: linux-mm@kvack.org > --- > include/linux/mm.h | 4 ++++ > mm/memory.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 50 insertions(+) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 47a93928b90f..3dfecd58adb0 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -2083,6 +2083,10 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, > struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); > int remap_pfn_range(struct vm_area_struct *, unsigned long addr, > unsigned long pfn, unsigned long size, pgprot_t); > +struct io_mapping; This is unconditional code, so just move the struct forward declaration to the top of the file after "struct writeback_control" and others. > +int remap_io_mapping(struct vm_area_struct *, > + unsigned long addr, unsigned long pfn, unsigned long size, > + struct io_mapping *iomap); > int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); > int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, > unsigned long pfn); > diff --git a/mm/memory.c b/mm/memory.c > index acb06f40d614..83bc5df3fafc 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -61,6 +61,7 @@ > #include <linux/string.h> > #include <linux/dma-debug.h> > #include <linux/debugfs.h> > +#include <linux/io-mapping.h> > > #include <asm/io.h> > #include <asm/pgalloc.h> > @@ -1762,6 +1763,51 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, > EXPORT_SYMBOL(remap_pfn_range); > > /** > + * remap_io_mapping - remap an IO mapping to userspace > + * @vma: user vma to map to > + * @addr: target user address to start at > + * @pfn: physical address of kernel memory > + * @size: size of map area > + * @iomap: the source io_mapping > + * > + * Note: this is only safe if the mm semaphore is held when called. > + */ > +int remap_io_mapping(struct vm_area_struct *vma, > + unsigned long addr, unsigned long pfn, unsigned long size, > + struct io_mapping *iomap) > +{ > + unsigned long end = addr + PAGE_ALIGN(size); > + struct remap_pfn r; > + pgd_t *pgd; > + int err; > + > + if (WARN_ON(addr >= end)) > + return -EINVAL; > + > +#define MUST_SET (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) > + BUG_ON(is_cow_mapping(vma->vm_flags)); > + BUG_ON((vma->vm_flags & MUST_SET) != MUST_SET); > +#undef MUST_SET > + I think that is bit general for define name, maybe something along REMAP_IO_NEEDED_FLAGS outside of the function... and then it doesn't have to be #undeffed. And if it is kept inside function then at least _ prefix it. But I don't see why not make it available outside too. Otherwise looking good. Regards, Joonas > + r.mm = vma->vm_mm; > + r.addr = addr; > + r.pfn = pfn; > + r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | > + (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); > + > + pgd = pgd_offset(r.mm, addr); > + do { > + err = remap_pud_range(&r, pgd++, pgd_addr_end(r.addr, end)); > + } while (err == 0 && r.addr < end); > + > + if (err) > + zap_page_range_single(vma, addr, r.addr - addr, NULL); > + > + return err; > +} > +EXPORT_SYMBOL(remap_io_mapping); > + > +/** > * vm_iomap_memory - remap memory to userspace > * @vma: user vma to map to > * @start: start of area -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> ^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 5/5] drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass [not found] <1428424299-13721-1-git-send-email-chris@chris-wilson.co.uk> ` (2 preceding siblings ...) 2015-04-07 16:31 ` [PATCH 4/5] mm: Export remap_io_mapping() Chris Wilson @ 2015-04-07 16:31 ` Chris Wilson 2015-04-09 8:00 ` Joonas Lahtinen 3 siblings, 1 reply; 10+ messages in thread From: Chris Wilson @ 2015-04-07 16:31 UTC (permalink / raw) To: Joonas Lahtinen; +Cc: intel-gfx, Chris Wilson, linux-mm On an Ivybridge i7-3720qm with 1600MHz DDR3, with 32 fences, Upload rate for 2 linear surfaces: 8134MiB/s -> 8154MiB/s Upload rate for 2 tiled surfaces: 8625MiB/s -> 8632MiB/s Upload rate for 4 linear surfaces: 8127MiB/s -> 8134MiB/s Upload rate for 4 tiled surfaces: 8602MiB/s -> 8629MiB/s Upload rate for 8 linear surfaces: 8124MiB/s -> 8137MiB/s Upload rate for 8 tiled surfaces: 8603MiB/s -> 8624MiB/s Upload rate for 16 linear surfaces: 8123MiB/s -> 8128MiB/s Upload rate for 16 tiled surfaces: 8606MiB/s -> 8618MiB/s Upload rate for 32 linear surfaces: 8121MiB/s -> 8128MiB/s Upload rate for 32 tiled surfaces: 8605MiB/s -> 8614MiB/s Upload rate for 64 linear surfaces: 8121MiB/s -> 8127MiB/s Upload rate for 64 tiled surfaces: 3017MiB/s -> 5202MiB/s Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Testcase: igt/gem_fence_upload/performance Testcase: igt/gem_mmap_gtt Reviewed-by: Brad Volkin <bradley.d.volkin@intel.com> Cc: linux-mm@kvack.org --- drivers/gpu/drm/i915/i915_gem.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7ab8e0039790..90d772f72276 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1667,25 +1667,14 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); pfn >>= PAGE_SHIFT; - if (!obj->fault_mappable) { - unsigned long size = min_t(unsigned long, - vma->vm_end - vma->vm_start, - obj->base.size); - int i; + ret = remap_io_mapping(vma, + vma->vm_start, pfn, vma->vm_end - vma->vm_start, + dev_priv->gtt.mappable); + if (ret) + goto unpin; - for (i = 0; i < size >> PAGE_SHIFT; i++) { - ret = vm_insert_pfn(vma, - (unsigned long)vma->vm_start + i * PAGE_SIZE, - pfn + i); - if (ret) - break; - } + obj->fault_mappable = true; - obj->fault_mappable = true; - } else - ret = vm_insert_pfn(vma, - (unsigned long)vmf->virtual_address, - pfn + page_offset); unpin: i915_gem_object_ggtt_unpin(obj); unlock: -- 2.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> ^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH 5/5] drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass 2015-04-07 16:31 ` [PATCH 5/5] drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass Chris Wilson @ 2015-04-09 8:00 ` Joonas Lahtinen 0 siblings, 0 replies; 10+ messages in thread From: Joonas Lahtinen @ 2015-04-09 8:00 UTC (permalink / raw) To: Chris Wilson; +Cc: intel-gfx, linux-mm On ti, 2015-04-07 at 17:31 +0100, Chris Wilson wrote: > On an Ivybridge i7-3720qm with 1600MHz DDR3, with 32 fences, > Upload rate for 2 linear surfaces: 8134MiB/s -> 8154MiB/s > Upload rate for 2 tiled surfaces: 8625MiB/s -> 8632MiB/s > Upload rate for 4 linear surfaces: 8127MiB/s -> 8134MiB/s > Upload rate for 4 tiled surfaces: 8602MiB/s -> 8629MiB/s > Upload rate for 8 linear surfaces: 8124MiB/s -> 8137MiB/s > Upload rate for 8 tiled surfaces: 8603MiB/s -> 8624MiB/s > Upload rate for 16 linear surfaces: 8123MiB/s -> 8128MiB/s > Upload rate for 16 tiled surfaces: 8606MiB/s -> 8618MiB/s > Upload rate for 32 linear surfaces: 8121MiB/s -> 8128MiB/s > Upload rate for 32 tiled surfaces: 8605MiB/s -> 8614MiB/s > Upload rate for 64 linear surfaces: 8121MiB/s -> 8127MiB/s > Upload rate for 64 tiled surfaces: 3017MiB/s -> 5202MiB/s > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > Testcase: igt/gem_fence_upload/performance > Testcase: igt/gem_mmap_gtt > Reviewed-by: Brad Volkin <bradley.d.volkin@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> > Cc: linux-mm@kvack.org > --- > drivers/gpu/drm/i915/i915_gem.c | 23 ++++++----------------- > 1 file changed, 6 insertions(+), 17 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 7ab8e0039790..90d772f72276 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -1667,25 +1667,14 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) > pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); > pfn >>= PAGE_SHIFT; > > - if (!obj->fault_mappable) { > - unsigned long size = min_t(unsigned long, > - vma->vm_end - vma->vm_start, > - obj->base.size); > - int i; > + ret = remap_io_mapping(vma, > + vma->vm_start, pfn, vma->vm_end - vma->vm_start, > + dev_priv->gtt.mappable); > + if (ret) > + goto unpin; > > - for (i = 0; i < size >> PAGE_SHIFT; i++) { > - ret = vm_insert_pfn(vma, > - (unsigned long)vma->vm_start + i * PAGE_SIZE, > - pfn + i); > - if (ret) > - break; > - } > + obj->fault_mappable = true; > > - obj->fault_mappable = true; > - } else > - ret = vm_insert_pfn(vma, > - (unsigned long)vmf->virtual_address, > - pfn + page_offset); > unpin: > i915_gem_object_ggtt_unpin(obj); > unlock: -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> ^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2015-04-09 8:33 UTC | newest] Thread overview: 10+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- [not found] <1428424299-13721-1-git-send-email-chris@chris-wilson.co.uk> 2015-04-07 16:31 ` [PATCH 2/5] mm: Refactor remap_pfn_range() Chris Wilson 2015-04-07 20:27 ` Andrew Morton 2015-04-08 9:45 ` Peter Zijlstra 2015-04-09 8:32 ` Joonas Lahtinen 2015-04-07 16:31 ` [PATCH 3/5] io-mapping: Always create a struct to hold metadata about the io-mapping Chris Wilson 2015-04-09 7:58 ` Joonas Lahtinen 2015-04-07 16:31 ` [PATCH 4/5] mm: Export remap_io_mapping() Chris Wilson 2015-04-09 8:18 ` Joonas Lahtinen 2015-04-07 16:31 ` [PATCH 5/5] drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass Chris Wilson 2015-04-09 8:00 ` Joonas Lahtinen
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).