From: Christoph Hellwig <hch@lst.de> Cc: linux-arch@vger.kernel.org, linux-xtensa@linux-xtensa.org, Michal Simek <monstr@monstr.eu>, Vincent Chen <deanbo422@gmail.com>, linux-c6x-dev@linux-c6x.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, linux-hexagon@vger.kernel.org, linux-kernel@vger.kernel.org, linux-m68k@lists.linux-m68k.org, openrisc@lists.librecores.org, Greentime Hu <green.hu@gmail.com>, linux-alpha@vger.kernel.org, sparclinux@vger.kernel.org, nios2-dev@lists.rocketboards.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org Subject: [PATCH 06/22] arc: use generic dma_noncoherent_ops Date: Fri, 20 Apr 2018 10:02:57 +0200 [thread overview] Message-ID: <20180420080313.18796-7-hch@lst.de> (raw) In-Reply-To: <20180420080313.18796-1-hch@lst.de> Switch to the generic noncoherent direct mapping implementation. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/arc/Kconfig | 4 + arch/arc/include/asm/Kbuild | 1 + arch/arc/include/asm/dma-mapping.h | 21 ----- arch/arc/mm/dma.c | 141 +++-------------------------- 4 files changed, 19 insertions(+), 148 deletions(-) delete mode 100644 arch/arc/include/asm/dma-mapping.h diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 7498aca4b887..89d47eac18b2 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -9,11 +9,15 @@ config ARC def_bool y select ARC_TIMERS + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SG_CHAIN select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK + select DMA_NONCOHERENT_OPS + select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) select GENERIC_CLOCKEVENTS select GENERIC_FIND_FIRST_BIT diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 4bd5d4369e05..bbdcb955e18f 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -2,6 +2,7 @@ generic-y += bugs.h generic-y += device.h generic-y += div64.h +generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += extable.h generic-y += fb.h diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h deleted file mode 100644 index 7a16824bfe98..000000000000 --- a/arch/arc/include/asm/dma-mapping.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * DMA Mapping glue for ARC - * - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef ASM_ARC_DMA_MAPPING_H -#define ASM_ARC_DMA_MAPPING_H - -extern const struct dma_map_ops arc_dma_ops; - -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - return &arc_dma_ops; -} - -#endif diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 1dcc404b5aec..ecfeb5585cc7 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -16,13 +16,12 @@ * The default DMA address == Phy address which is 0x8000_0000 based. */ -#include <linux/dma-mapping.h> +#include <linux/dma-noncoherent.h> #include <asm/cache.h> #include <asm/cacheflush.h> - -static void *arc_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) { unsigned long order = get_order(size); struct page *page; @@ -89,7 +88,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size, return kvaddr; } -static void arc_dma_free(struct device *dev, size_t size, void *vaddr, +void arch_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { phys_addr_t paddr = dma_handle; @@ -105,9 +104,9 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr, __free_pages(page, get_order(size)); } -static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) +int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) { unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -135,7 +134,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, * CPU accesses page via normal paddr, thus needs to explicitly made * consistent before each use */ -static void _dma_cache_sync(phys_addr_t paddr, size_t size, +static void _dma_cache_sync(struct device *dev, phys_addr_t paddr size_t size, enum dma_data_direction dir) { switch (dir) { @@ -153,126 +152,14 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size, } } -/* - * arc_dma_map_page - map a portion of a page for streaming DMA - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_page(). - * - * Note: while it takes struct page as arg, caller can "abuse" it to pass - * a region larger than PAGE_SIZE, provided it is physically contiguous - * and this still works correctly - */ -static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - phys_addr_t paddr = page_to_phys(page) + offset; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - _dma_cache_sync(paddr, size, dir); - - return paddr; -} - -/* - * arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - * - * Note: historically this routine was not implemented for ARC - */ -static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - phys_addr_t paddr = handle; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - _dma_cache_sync(paddr, size, dir); -} - -static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - s->dma_address = dma_map_page(dev, sg_page(s), s->offset, - s->length, dir); - - return nents; -} - -static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - arc_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, - attrs); -} - -static void arc_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) -{ - _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); -} - -static void arc_dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); + return _dma_cache_sync(dev, paddr, size, dir); } -static void arc_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync(sg_phys(sg), sg->length, dir); + return _dma_cache_sync(dev, paddr, size, dir); } - -static void arc_dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync(sg_phys(sg), sg->length, dir); -} - -static int arc_dma_supported(struct device *dev, u64 dma_mask) -{ - /* Support 32 bit DMA mask exclusively */ - return dma_mask == DMA_BIT_MASK(32); -} - -const struct dma_map_ops arc_dma_ops = { - .alloc = arc_dma_alloc, - .free = arc_dma_free, - .mmap = arc_dma_mmap, - .map_page = arc_dma_map_page, - .unmap_page = arc_dma_unmap_page, - .map_sg = arc_dma_map_sg, - .unmap_sg = arc_dma_unmap_sg, - .sync_single_for_device = arc_dma_sync_single_for_device, - .sync_single_for_cpu = arc_dma_sync_single_for_cpu, - .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, - .sync_sg_for_device = arc_dma_sync_sg_for_device, - .dma_supported = arc_dma_supported, -}; -EXPORT_SYMBOL(arc_dma_ops); -- 2.17.0
WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de> Cc: linux-arch@vger.kernel.org, Michal Simek <monstr@monstr.eu>, Greentime Hu <green.hu@gmail.com>, Vincent Chen <deanbo422@gmail.com>, linux-alpha@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-c6x-dev@linux-c6x.org, linux-hexagon@vger.kernel.org, linux-m68k@lists.linux-m68k.org, nios2-dev@lists.rocketboards.org, openrisc@lists.librecores.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, sparclinux@vger.kernel.org, linux-xtensa@linux-xtensa.org, linux-kernel@vger.kernel.org Subject: [PATCH 06/22] arc: use generic dma_noncoherent_ops Date: Fri, 20 Apr 2018 10:02:57 +0200 [thread overview] Message-ID: <20180420080313.18796-7-hch@lst.de> (raw) Message-ID: <20180420080257.DHmPCdoWrhEb6eqf-1xBAtcl_0esgH5w5lFVQaGMJcM@z> (raw) In-Reply-To: <20180420080313.18796-1-hch@lst.de> Switch to the generic noncoherent direct mapping implementation. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/arc/Kconfig | 4 + arch/arc/include/asm/Kbuild | 1 + arch/arc/include/asm/dma-mapping.h | 21 ----- arch/arc/mm/dma.c | 141 +++-------------------------- 4 files changed, 19 insertions(+), 148 deletions(-) delete mode 100644 arch/arc/include/asm/dma-mapping.h diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 7498aca4b887..89d47eac18b2 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -9,11 +9,15 @@ config ARC def_bool y select ARC_TIMERS + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SG_CHAIN select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK + select DMA_NONCOHERENT_OPS + select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) select GENERIC_CLOCKEVENTS select GENERIC_FIND_FIRST_BIT diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 4bd5d4369e05..bbdcb955e18f 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -2,6 +2,7 @@ generic-y += bugs.h generic-y += device.h generic-y += div64.h +generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += extable.h generic-y += fb.h diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h deleted file mode 100644 index 7a16824bfe98..000000000000 --- a/arch/arc/include/asm/dma-mapping.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * DMA Mapping glue for ARC - * - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef ASM_ARC_DMA_MAPPING_H -#define ASM_ARC_DMA_MAPPING_H - -extern const struct dma_map_ops arc_dma_ops; - -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - return &arc_dma_ops; -} - -#endif diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 1dcc404b5aec..ecfeb5585cc7 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -16,13 +16,12 @@ * The default DMA address == Phy address which is 0x8000_0000 based. */ -#include <linux/dma-mapping.h> +#include <linux/dma-noncoherent.h> #include <asm/cache.h> #include <asm/cacheflush.h> - -static void *arc_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) { unsigned long order = get_order(size); struct page *page; @@ -89,7 +88,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size, return kvaddr; } -static void arc_dma_free(struct device *dev, size_t size, void *vaddr, +void arch_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { phys_addr_t paddr = dma_handle; @@ -105,9 +104,9 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr, __free_pages(page, get_order(size)); } -static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) +int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) { unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -135,7 +134,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, * CPU accesses page via normal paddr, thus needs to explicitly made * consistent before each use */ -static void _dma_cache_sync(phys_addr_t paddr, size_t size, +static void _dma_cache_sync(struct device *dev, phys_addr_t paddr size_t size, enum dma_data_direction dir) { switch (dir) { @@ -153,126 +152,14 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size, } } -/* - * arc_dma_map_page - map a portion of a page for streaming DMA - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_page(). - * - * Note: while it takes struct page as arg, caller can "abuse" it to pass - * a region larger than PAGE_SIZE, provided it is physically contiguous - * and this still works correctly - */ -static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - phys_addr_t paddr = page_to_phys(page) + offset; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - _dma_cache_sync(paddr, size, dir); - - return paddr; -} - -/* - * arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - * - * Note: historically this routine was not implemented for ARC - */ -static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - phys_addr_t paddr = handle; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - _dma_cache_sync(paddr, size, dir); -} - -static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - s->dma_address = dma_map_page(dev, sg_page(s), s->offset, - s->length, dir); - - return nents; -} - -static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - arc_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, - attrs); -} - -static void arc_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) -{ - _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); -} - -static void arc_dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); + return _dma_cache_sync(dev, paddr, size, dir); } -static void arc_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync(sg_phys(sg), sg->length, dir); + return _dma_cache_sync(dev, paddr, size, dir); } - -static void arc_dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync(sg_phys(sg), sg->length, dir); -} - -static int arc_dma_supported(struct device *dev, u64 dma_mask) -{ - /* Support 32 bit DMA mask exclusively */ - return dma_mask == DMA_BIT_MASK(32); -} - -const struct dma_map_ops arc_dma_ops = { - .alloc = arc_dma_alloc, - .free = arc_dma_free, - .mmap = arc_dma_mmap, - .map_page = arc_dma_map_page, - .unmap_page = arc_dma_unmap_page, - .map_sg = arc_dma_map_sg, - .unmap_sg = arc_dma_unmap_sg, - .sync_single_for_device = arc_dma_sync_single_for_device, - .sync_single_for_cpu = arc_dma_sync_single_for_cpu, - .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, - .sync_sg_for_device = arc_dma_sync_sg_for_device, - .dma_supported = arc_dma_supported, -}; -EXPORT_SYMBOL(arc_dma_ops); -- 2.17.0
next prev parent reply other threads:[~2018-04-20 8:02 UTC|newest] Thread overview: 88+ messages / expand[flat|nested] mbox.gz Atom feed top 2018-04-20 8:02 (unknown), Christoph Hellwig 2018-04-20 8:02 ` Christoph Hellwig 2018-04-20 8:02 ` [PATCH 01/22] dma-debug: move initialization to common code Christoph Hellwig 2018-04-20 8:02 ` Christoph Hellwig 2018-04-20 10:23 ` Robin Murphy 2018-04-20 10:23 ` Robin Murphy 2018-04-24 7:35 ` Christoph Hellwig 2018-04-24 7:35 ` Christoph Hellwig 2018-04-20 8:02 ` [PATCH 02/22] dma-mapping: simplify Kconfig dependencies Christoph Hellwig 2018-04-20 8:02 ` Christoph Hellwig 2018-04-20 8:02 ` [PATCH 03/22] dma-mapping: provide a generic dma-noncoherent implementation Christoph Hellwig 2018-04-20 8:02 ` Christoph Hellwig 2018-04-20 8:02 ` [PATCH 04/22] alpha: use dma_direct_ops for jensen Christoph Hellwig 2018-04-20 8:02 ` Christoph Hellwig 2018-04-20 8:02 ` [PATCH 05/22] alpha: simplify get_arch_dma_ops Christoph Hellwig 2018-04-20 8:02 ` Christoph Hellwig 2018-04-20 8:02 ` Christoph Hellwig [this message] 2018-04-20 8:02 ` [PATCH 06/22] arc: use generic dma_noncoherent_ops Christoph Hellwig 2018-04-25 11:17 ` Alexey Brodkin 2018-04-25 11:17 ` Alexey Brodkin 2018-04-26 6:45 ` hch 2018-04-26 6:45 ` hch 2018-04-26 8:25 ` hch 2018-04-26 8:25 ` hch 2018-04-20 8:02 ` [PATCH 07/22] arm-nommu: " Christoph Hellwig 2018-04-20 8:02 ` Christoph Hellwig 2018-04-20 8:02 ` [PATCH 08/22] c6x: " Christoph Hellwig 2018-04-20 8:02 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 09/22] hexagon: " Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 10/22] m68k: " Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 11/22] microblaze: " Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 12/22] microblaze: remove the consistent_sync and consistent_sync_page Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 13/22] nds32: use generic dma_noncoherent_ops Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-23 6:49 ` Greentime Hu 2018-04-23 6:49 ` Greentime Hu 2018-04-23 8:09 ` Michael Schmitz 2018-04-23 11:03 ` Greentime Hu 2018-04-24 19:16 ` Christoph Hellwig 2018-04-24 19:16 ` Christoph Hellwig 2018-04-25 1:43 ` Greentime Hu 2018-04-25 1:43 ` Greentime Hu 2018-04-25 6:40 ` Christoph Hellwig 2018-04-25 6:40 ` Christoph Hellwig 2018-04-25 12:25 ` Greentime Hu 2018-04-25 12:25 ` Greentime Hu 2018-04-26 6:42 ` Christoph Hellwig 2018-04-26 6:42 ` Christoph Hellwig 2018-04-26 8:06 ` Greentime Hu 2018-04-26 8:06 ` Greentime Hu 2018-04-26 8:24 ` Christoph Hellwig 2018-04-26 8:24 ` Christoph Hellwig 2018-04-26 9:39 ` Greentime Hu 2018-04-26 9:39 ` Greentime Hu 2018-04-20 8:03 ` [PATCH 14/22] nios2: " Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 15/22] openrisc: " Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 16/22] sh: simplify get_arch_dma_ops Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 17/22] sh: introduce a sh_cacheop_vaddr helper Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 18/22] sh: use dma_direct_ops for the CONFIG_DMA_COHERENT case Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 19/22] sh: use generic dma_noncoherent_ops Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 20/22] xtensa: " Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 21/22] sparc: " Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-20 8:03 ` [PATCH 22/22] parisc: " Christoph Hellwig 2018-04-20 8:03 ` Christoph Hellwig 2018-04-21 17:43 ` Helge Deller 2018-04-21 17:43 ` Helge Deller 2018-04-21 21:52 ` James Bottomley 2018-04-21 21:52 ` James Bottomley 2018-04-25 7:21 ` Christoph Hellwig 2018-04-25 7:21 ` Christoph Hellwig 2018-04-25 21:07 ` Helge Deller 2018-04-25 21:07 ` Helge Deller 2018-04-21 21:42 ` James Bottomley 2018-04-21 21:42 ` James Bottomley 2018-04-24 8:20 ` Christoph Hellwig 2018-04-24 8:20 ` Christoph Hellwig
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20180420080313.18796-7-hch@lst.de \ --to=hch@lst.de \ --cc=deanbo422@gmail.com \ --cc=green.hu@gmail.com \ --cc=linux-alpha@vger.kernel.org \ --cc=linux-arch@vger.kernel.org \ --cc=linux-arm-kernel@lists.infradead.org \ --cc=linux-c6x-dev@linux-c6x.org \ --cc=linux-hexagon@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-m68k@lists.linux-m68k.org \ --cc=linux-parisc@vger.kernel.org \ --cc=linux-sh@vger.kernel.org \ --cc=linux-snps-arc@lists.infradead.org \ --cc=linux-xtensa@linux-xtensa.org \ --cc=monstr@monstr.eu \ --cc=nios2-dev@lists.rocketboards.org \ --cc=openrisc@lists.librecores.org \ --cc=sparclinux@vger.kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).