linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] dma-mapping: Add BUG_ON for uninitialized dma_ops
@ 2013-06-03 12:44 Michal Simek
  2013-06-10  9:00 ` Michal Simek
  2013-06-11  2:34 ` Bjorn Helgaas
  0 siblings, 2 replies; 15+ messages in thread
From: Michal Simek @ 2013-06-03 12:44 UTC (permalink / raw)
  To: linux-kernel; +Cc: Michal Simek, Michal Simek, Arnd Bergmann, linux-arch

[-- Attachment #1: Type: text/plain, Size: 4000 bytes --]

Check that dma_ops are initialized correctly.

Signed-off-by: Michal Simek <michal.simek@xilinx.com>
---
Functions dma_mmap_attrs(), dma_get_sgtable_attrs()
already have this checking.

---
 include/asm-generic/dma-mapping-common.h | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index de8bf89..d430cab 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -16,6 +16,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
 	dma_addr_t addr;

 	kmemcheck_mark_initialized(ptr, size);
+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	addr = ops->map_page(dev, virt_to_page(ptr),
 			     (unsigned long)ptr & ~PAGE_MASK, size,
@@ -33,6 +34,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);

+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->unmap_page)
 		ops->unmap_page(dev, addr, size, dir, attrs);
@@ -49,6 +51,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,

 	for_each_sg(sg, s, nents, i)
 		kmemcheck_mark_initialized(sg_virt(s), s->length);
+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	ents = ops->map_sg(dev, sg, nents, dir, attrs);
 	debug_dma_map_sg(dev, sg, nents, ents, dir);
@@ -62,6 +65,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);

+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	debug_dma_unmap_sg(dev, sg, nents, dir);
 	if (ops->unmap_sg)
@@ -76,6 +80,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 	dma_addr_t addr;

 	kmemcheck_mark_initialized(page_address(page) + offset, size);
+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	addr = ops->map_page(dev, page, offset, size, dir, NULL);
 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
@@ -88,6 +93,7 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);

+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->unmap_page)
 		ops->unmap_page(dev, addr, size, dir, NULL);
@@ -100,6 +106,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);

+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_single_for_cpu)
 		ops->sync_single_for_cpu(dev, addr, size, dir);
@@ -112,6 +119,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);

+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_single_for_device)
 		ops->sync_single_for_device(dev, addr, size, dir);
@@ -126,6 +134,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
 {
 	const struct dma_map_ops *ops = get_dma_ops(dev);

+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_single_for_cpu)
 		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
@@ -140,6 +149,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
 {
 	const struct dma_map_ops *ops = get_dma_ops(dev);

+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_single_for_device)
 		ops->sync_single_for_device(dev, addr + offset, size, dir);
@@ -152,6 +162,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);

+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_sg_for_cpu)
 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
@@ -164,6 +175,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);

+	BUG_ON(!ops);
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_sg_for_device)
 		ops->sync_sg_for_device(dev, sg, nelems, dir);
--
1.8.2.3


[-- Attachment #2: Type: application/pgp-signature, Size: 198 bytes --]

^ permalink raw reply related	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2013-06-26 12:58 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-06-03 12:44 [PATCH] dma-mapping: Add BUG_ON for uninitialized dma_ops Michal Simek
2013-06-10  9:00 ` Michal Simek
2013-06-11  2:34 ` Bjorn Helgaas
2013-06-11  2:34   ` Bjorn Helgaas
2013-06-11 11:02   ` Marek Szyprowski
2013-06-11 11:02     ` Marek Szyprowski
2013-06-11 13:54     ` James Bottomley
2013-06-12 15:06       ` Arnd Bergmann
2013-06-13  8:51         ` Marek Szyprowski
2013-06-13 20:59         ` James Bottomley
2013-06-14 14:36           ` Arnd Bergmann
2013-06-14 16:14             ` James Bottomley
2013-06-19 15:20               ` Arnd Bergmann
2013-06-26 12:58                 ` Michal Simek
2013-06-26 12:58                   ` Michal Simek

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).