From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
To: Christoph Hellwig <hch@lst.de>
Cc: iommu@lists.linux-foundation.org
Subject: Re: [PATCH 1/2] dma-direct: improve swiotlb error reporting
Date: Tue, 4 Feb 2020 09:54:26 -0500 [thread overview]
Message-ID: <20200204145426.GB5400@localhost.localdomain> (raw)
In-Reply-To: <20200204063205.652456-2-hch@lst.de>
On Tue, Feb 04, 2020 at 07:32:04AM +0100, Christoph Hellwig wrote:
> Untangle the way how dma_direct_map_page calls into swiotlb to be able
> to properly report errors where the swiotlb DMA address overflows the
> mask separately from overflows in the !swiotlb case. This means that
> siotlb_map now has to do a little more work that duplicates
> dma_direct_map_page, but doing so greatly simplifies the calling
> convention.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: Konrad Rzeszutek Wilk <konrad@darnok.org>
(Pick whichever tag you want :-))
> ---
> include/linux/swiotlb.h | 11 +++--------
> kernel/dma/direct.c | 16 +++++++--------
> kernel/dma/swiotlb.c | 43 +++++++++++++++++++++++------------------
> 3 files changed, 34 insertions(+), 36 deletions(-)
>
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index cde3dc18e21a..046bb94bd4d6 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -64,6 +64,9 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
> size_t size, enum dma_data_direction dir,
> enum dma_sync_target target);
>
> +dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
> + size_t size, enum dma_data_direction dir, unsigned long attrs);
> +
> #ifdef CONFIG_SWIOTLB
> extern enum swiotlb_force swiotlb_force;
> extern phys_addr_t io_tlb_start, io_tlb_end;
> @@ -73,8 +76,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
> return paddr >= io_tlb_start && paddr < io_tlb_end;
> }
>
> -bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
> - size_t size, enum dma_data_direction dir, unsigned long attrs);
> void __init swiotlb_exit(void);
> unsigned int swiotlb_max_segment(void);
> size_t swiotlb_max_mapping_size(struct device *dev);
> @@ -85,12 +86,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
> {
> return false;
> }
> -static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
> - dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
> - unsigned long attrs)
> -{
> - return false;
> -}
> static inline void swiotlb_exit(void)
> {
> }
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 6af7ae83c4ad..82ad50aaf42c 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -357,13 +357,6 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
> EXPORT_SYMBOL(dma_direct_unmap_sg);
> #endif
>
> -static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
> - size_t size)
> -{
> - return swiotlb_force != SWIOTLB_FORCE &&
> - dma_capable(dev, dma_addr, size, true);
> -}
> -
> dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
> unsigned long offset, size_t size, enum dma_data_direction dir,
> unsigned long attrs)
> @@ -371,8 +364,13 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
> phys_addr_t phys = page_to_phys(page) + offset;
> dma_addr_t dma_addr = phys_to_dma(dev, phys);
>
> - if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
> - !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
> + if (unlikely(swiotlb_force == SWIOTLB_FORCE))
> + return swiotlb_map(dev, phys, size, dir, attrs);
> +
> + if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
> + if (swiotlb_force != SWIOTLB_NO_FORCE)
> + return swiotlb_map(dev, phys, size, dir, attrs);
> +
> report_addr(dev, dma_addr, size);
> return DMA_MAPPING_ERROR;
> }
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 9280d6f8271e..589bb9a40f21 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -22,6 +22,7 @@
>
> #include <linux/cache.h>
> #include <linux/dma-direct.h>
> +#include <linux/dma-noncoherent.h>
> #include <linux/mm.h>
> #include <linux/export.h>
> #include <linux/spinlock.h>
> @@ -656,35 +657,39 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
> }
>
> /*
> - * Create a swiotlb mapping for the buffer at @phys, and in case of DMAing
> + * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
> * to the device copy the data into it as well.
> */
> -bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
> - size_t size, enum dma_data_direction dir, unsigned long attrs)
> +dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
> + enum dma_data_direction dir, unsigned long attrs)
> {
> - trace_swiotlb_bounced(dev, *dma_addr, size, swiotlb_force);
> + phys_addr_t swiotlb_addr;
> + dma_addr_t dma_addr;
>
> - if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
> - dev_warn_ratelimited(dev,
> - "Cannot do DMA to address %pa\n", phys);
> - return false;
> - }
> + trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
> + swiotlb_force);
>
> - /* Oh well, have to allocate and map a bounce buffer. */
> - *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
> - *phys, size, size, dir, attrs);
> - if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
> - return false;
> + swiotlb_addr = swiotlb_tbl_map_single(dev,
> + __phys_to_dma(dev, io_tlb_start),
> + paddr, size, size, dir, attrs);
> + if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
> + return DMA_MAPPING_ERROR;
>
> /* Ensure that the address returned is DMA'ble */
> - *dma_addr = __phys_to_dma(dev, *phys);
> - if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
> - swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
> + dma_addr = __phys_to_dma(dev, swiotlb_addr);
> + if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
> + swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
> attrs | DMA_ATTR_SKIP_CPU_SYNC);
> - return false;
> + dev_err_once(dev,
> + "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
> + &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
> + WARN_ON_ONCE(1);
> + return DMA_MAPPING_ERROR;
> }
>
> - return true;
> + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> + arch_sync_dma_for_device(swiotlb_addr, size, dir);
> + return dma_addr;
> }
>
> size_t swiotlb_max_mapping_size(struct device *dev)
> --
> 2.24.1
>
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2020-02-04 14:54 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-02-04 6:32 improve dma-direct / swiotlb error reporting Christoph Hellwig
2020-02-04 6:32 ` [PATCH 1/2] dma-direct: improve " Christoph Hellwig
2020-02-04 14:54 ` Konrad Rzeszutek Wilk [this message]
2020-02-04 6:32 ` [PATCH 2/2] dma-direct: improve DMA mask overflow reporting Christoph Hellwig
2020-02-04 14:55 ` Konrad Rzeszutek Wilk
2020-02-04 15:14 ` Robin Murphy
2020-02-04 15:58 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200204145426.GB5400@localhost.localdomain \
--to=konrad.wilk@oracle.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox