From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Subject: [PATCH 14/25] openrisc: fix cache maintainance the the sync_single_for_device DMA operation Date: Tue, 22 May 2018 14:04:19 +0200 Message-ID: <20180522120430.28709-15-hch@lst.de> References: <20180522120430.28709-1-hch@lst.de> Return-path: In-Reply-To: <20180522120430.28709-1-hch@lst.de> Sender: linux-kernel-owner@vger.kernel.org To: iommu@lists.linux-foundation.org Cc: linux-arch@vger.kernel.org, Michal Simek , Greentime Hu , Vincent Chen , linux-alpha@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-c6x-dev@linux-c6x.org, linux-hexagon@vger.kernel.org, linux-m68k@lists.linux-m68k.org, nios2-dev@lists.rocketboards.org, openrisc@lists.librecores.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, sparclinux@vger.kernel.org, linux-xtensa@linux-xtensa.org, linux-kernel@vger.kernel.org List-Id: linux-arch.vger.kernel.org The cache maintaince in the sync_single_for_device operation should be equivalent to the map_page operation to facilitate reusing buffers. Fix the openrisc implementation by moving the cache maintaince performed in map_page into the sync_single method, and calling that from map_page. Signed-off-by: Christoph Hellwig --- arch/openrisc/kernel/dma.c | 42 +++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 7cadff93d179..d6a0bf1fa713 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -133,19 +133,15 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr, free_pages_exact(vaddr, size); } -static dma_addr_t -or1k_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) +static void +or1k_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) { unsigned long cl; - dma_addr_t addr = page_to_phys(page) + offset; + dma_addr_t addr = dma_handle; struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) - return addr; - switch (dir) { case DMA_TO_DEVICE: /* Flush the dcache for the requested range */ @@ -168,6 +164,20 @@ or1k_map_page(struct device *dev, struct page *page, break; } +} + +static dma_addr_t +or1k_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + unsigned long cl; + dma_addr_t addr = page_to_phys(page) + offset; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + or1k_sync_single_for_device(dev, addr, size, dir); return addr; } @@ -187,20 +197,6 @@ or1k_map_sg(struct device *dev, struct scatterlist *sg, return nents; } -static void -or1k_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir) -{ - unsigned long cl; - dma_addr_t addr = dma_handle; - struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; - - /* Flush the dcache for the requested range */ - for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size) - mtspr(SPR_DCBFR, cl); -} - const struct dma_map_ops or1k_dma_map_ops = { .alloc = or1k_dma_alloc, .free = or1k_dma_free, -- 2.17.0 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from bombadil.infradead.org ([198.137.202.133]:53952 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751602AbeEVMFh (ORCPT ); Tue, 22 May 2018 08:05:37 -0400 From: Christoph Hellwig Subject: [PATCH 14/25] openrisc: fix cache maintainance the the sync_single_for_device DMA operation Date: Tue, 22 May 2018 14:04:19 +0200 Message-ID: <20180522120430.28709-15-hch@lst.de> In-Reply-To: <20180522120430.28709-1-hch@lst.de> References: <20180522120430.28709-1-hch@lst.de> Sender: linux-arch-owner@vger.kernel.org List-ID: To: iommu@lists.linux-foundation.org Cc: linux-arch@vger.kernel.org, Michal Simek , Greentime Hu , Vincent Chen , linux-alpha@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-c6x-dev@linux-c6x.org, linux-hexagon@vger.kernel.org, linux-m68k@lists.linux-m68k.org, nios2-dev@lists.rocketboards.org, openrisc@lists.librecores.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, sparclinux@vger.kernel.org, linux-xtensa@linux-xtensa.org, linux-kernel@vger.kernel.org Message-ID: <20180522120419.-RuZFAnZ43Z6SISuA_RaaDa4OM_pomPAFk7cTICTJeg@z> The cache maintaince in the sync_single_for_device operation should be equivalent to the map_page operation to facilitate reusing buffers. Fix the openrisc implementation by moving the cache maintaince performed in map_page into the sync_single method, and calling that from map_page. Signed-off-by: Christoph Hellwig --- arch/openrisc/kernel/dma.c | 42 +++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 7cadff93d179..d6a0bf1fa713 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -133,19 +133,15 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr, free_pages_exact(vaddr, size); } -static dma_addr_t -or1k_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) +static void +or1k_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) { unsigned long cl; - dma_addr_t addr = page_to_phys(page) + offset; + dma_addr_t addr = dma_handle; struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) - return addr; - switch (dir) { case DMA_TO_DEVICE: /* Flush the dcache for the requested range */ @@ -168,6 +164,20 @@ or1k_map_page(struct device *dev, struct page *page, break; } +} + +static dma_addr_t +or1k_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + unsigned long cl; + dma_addr_t addr = page_to_phys(page) + offset; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + or1k_sync_single_for_device(dev, addr, size, dir); return addr; } @@ -187,20 +197,6 @@ or1k_map_sg(struct device *dev, struct scatterlist *sg, return nents; } -static void -or1k_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir) -{ - unsigned long cl; - dma_addr_t addr = dma_handle; - struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; - - /* Flush the dcache for the requested range */ - for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size) - mtspr(SPR_DCBFR, cl); -} - const struct dma_map_ops or1k_dma_map_ops = { .alloc = or1k_dma_alloc, .free = or1k_dma_free, -- 2.17.0