* [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access
@ 2009-05-12 8:35 Li Yang
2009-05-12 8:35 ` [PATCH 2/6] powerpc/fsl_rio: use LAW address from device tree Li Yang
2009-06-11 4:13 ` [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access Kumar Gala
0 siblings, 2 replies; 19+ messages in thread
From: Li Yang @ 2009-05-12 8:35 UTC (permalink / raw)
To: akpm, galak, davem, mporter
Cc: linuxppc-dev, Zhang Wei, Li Yang, linux-kernel, netdev
Add the mapping functions used to support direct IO memory access of
rapidIO.
Signed-off-by: Zhang Wei <zw@zh-kernel.org>
Signed-off-by: Li Yang <leoli@freescale.com>
---
drivers/rapidio/rio.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++
include/linux/rio.h | 25 ++++++++++++
include/linux/rio_drv.h | 24 +++++++++---
3 files changed, 138 insertions(+), 6 deletions(-)
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 6395c78..224a076 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -2,6 +2,8 @@
* RapidIO interconnect services
* (RapidIO Interconnect Specification, http://www.rapidio.org)
*
+ * Copyright (C) 2007-2009 Freescale Semiconductor, Inc.
+ *
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
@@ -24,11 +26,23 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/hardirq.h>
#include "rio.h"
static LIST_HEAD(rio_mports);
+static DEFINE_SPINLOCK(rio_config_lock);
+
+struct resource rio_resource = {
+ .name = "RapidIO GSM",
+ .start = 0,
+ .end = -1,
+ .flags = IORESOURCE_MEM,
+};
+EXPORT_SYMBOL(rio_resource);
+
/**
* rio_local_get_device_id - Get the base/extended device id for a port
* @port: RIO master port from which to get the deviceid
@@ -333,6 +347,87 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res)
}
/**
+ * rio_map_inb_region -- Mapping inbound memory region.
+ * @mport: Master port.
+ * @mem: Memory struction for mapping.
+ * @rflags: Flags for mapping.
+ *
+ * Return: 0 -- Success.
+ *
+ * This function will create the mapping from rio space to local mem.
+ */
+int rio_map_inb_region(struct rio_mport *mport, struct resource *rio_res,
+ dma_addr_t local, u32 rflags)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (!mport->mops)
+ return -1;
+ spin_lock_irqsave(&rio_config_lock, flags);
+ rc = mport->mops->map_inb(mport, local, rio_res->start,
+ resource_size(rio_res), rflags);
+ spin_unlock_irqrestore(&rio_config_lock, flags);
+ return rc;
+}
+
+/**
+ * rio_map_outb_region -- Mapping outbound memory region.
+ * @mport: Master port.
+ * @tid: Target RapidIO device id.
+ * @mem: Memory struction for mapping.
+ * @rflags: Flags for mapping.
+ *
+ * Return: 0 -- Success.
+ *
+ * This function will create the mapping from local iomem to rio space.
+ */
+int rio_map_outb_region(struct rio_mport *mport, u16 tid,
+ struct resource *rio_res, phys_addr_t lstart, u32 rflags)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (!mport->mops)
+ return -1;
+ spin_lock_irqsave(&rio_config_lock, flags);
+ rc = mport->mops->map_outb(mport, lstart, rio_res->start,
+ resource_size(rio_res), tid, rflags);
+ spin_unlock_irqrestore(&rio_config_lock, flags);
+ return rc;
+}
+
+/**
+ * rio_unmap_inb_region -- Unmap the inbound memory region
+ * @mport: Master port
+ * @mem: Memory struction for unmapping.
+ */
+void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart)
+{
+ unsigned long flags;
+ if (!mport->mops)
+ return;
+ spin_lock_irqsave(&rio_config_lock, flags);
+ mport->mops->unmap_inb(mport, lstart);
+ spin_unlock_irqrestore(&rio_config_lock, flags);
+}
+
+/**
+ * rio_unmap_outb_region -- Unmap the outbound memory region
+ * @mport: Master port
+ * @mem: Memory struction for unmapping.
+ */
+void rio_unmap_outb_region(struct rio_mport *mport, phys_addr_t lstart)
+{
+ unsigned long flags;
+ if (!mport->mops)
+ return;
+ spin_lock_irqsave(&rio_config_lock, flags);
+ mport->mops->unmap_outb(mport, lstart);
+ spin_unlock_irqrestore(&rio_config_lock, flags);
+}
+
+/**
* rio_mport_get_feature - query for devices' extended features
* @port: Master port to issue transaction
* @local: Indicate a local master port or remote device access
diff --git a/include/linux/rio.h b/include/linux/rio.h
index dc0c755..dd61538 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -176,6 +176,7 @@ struct rio_mport {
struct rio_msg outb_msg[RIO_MAX_MBOX];
int host_deviceid; /* Host device ID */
struct rio_ops *ops; /* maintenance transaction functions */
+ struct rio_mem_ops *mops; /* Memory functions */
unsigned char id; /* port ID, unique among all ports */
unsigned char index; /* port index, unique among all port
interfaces of the same type */
@@ -185,6 +186,7 @@ struct rio_mport {
*/
enum rio_phy_type phy_type; /* RapidIO phy type */
unsigned char name[40];
+ struct device *dev;
void *priv; /* Master port private data */
};
@@ -319,6 +321,29 @@ struct rio_route_ops {
u16 table, u16 route_destid, u8 * route_port);
};
+extern struct resource rio_resource;
+#define request_rio_region(start, n, name, flag) \
+ __request_region(&rio_resource, (start), (n), (name), (flag))
+#define release_rio_region(start, n) __release_region(&rio_resource, (start), (n))
+
+/**
+ * Struct for RIO memory definition.
+ * @map_inb: The function for mapping inbound memory window.
+ * @map_outb: The function for mapping outbound memory window.
+ * @unmap_inb: The function for unmapping inbound memory window.
+ * @unmap_outb: The function for unmapping outbound memory window.
+ */
+struct rio_mem_ops {
+ int (*map_inb) (struct rio_mport *, dma_addr_t lstart,
+ resource_size_t rstart,
+ resource_size_t size, u32 flags);
+ int (*map_outb) (struct rio_mport *, phys_addr_t lstart,
+ resource_size_t rstart,
+ resource_size_t size, u16 tid, u32 flags);
+ void (*unmap_inb) (struct rio_mport *, dma_addr_t lstart);
+ void (*unmap_outb) (struct rio_mport *, phys_addr_t lstart);
+};
+
/* Architecture and hardware-specific functions */
extern int rio_init_mports(void);
extern void rio_register_mport(struct rio_mport *);
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index c93a58a..685f2da 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -332,6 +332,16 @@ static inline void rio_init_dbell_res(struct resource *res, u16 start, u16 end)
res->flags = RIO_RESOURCE_DOORBELL;
}
+static inline void rio_init_io_res(struct resource *res, resource_size_t start,
+ resource_size_t size, const char *name, unsigned long flag)
+{
+ memset(res, 0, sizeof(struct resource));
+ res->start = start;
+ res->end = start + size - 1;
+ res->name = name;
+ res->flags = flag;
+}
+
/**
* RIO_DEVICE - macro used to describe a specific RIO device
* @dev: the 16 bit RIO device ID
@@ -406,12 +416,13 @@ extern int rio_release_inb_dbell(struct rio_mport *, u16, u16);
extern struct resource *rio_request_outb_dbell(struct rio_dev *, u16, u16);
extern int rio_release_outb_dbell(struct rio_dev *, struct resource *);
-/* Memory region management */
-int rio_claim_resource(struct rio_dev *, int);
-int rio_request_regions(struct rio_dev *, char *);
-void rio_release_regions(struct rio_dev *);
-int rio_request_region(struct rio_dev *, int, char *);
-void rio_release_region(struct rio_dev *, int);
+/* Memory low-level mapping functions */
+extern int rio_map_inb_region(struct rio_mport *, struct resource *,
+ dma_addr_t, u32);
+extern int rio_map_outb_region(struct rio_mport *, u16, struct resource *,
+ phys_addr_t, u32);
+extern void rio_unmap_inb_region(struct rio_mport *, dma_addr_t);
+extern void rio_unmap_outb_region(struct rio_mport *, phys_addr_t);
/* LDM support */
int rio_register_driver(struct rio_driver *);
@@ -461,5 +472,6 @@ extern u16 rio_local_get_device_id(struct rio_mport *port);
extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
struct rio_dev *from);
+extern u32 rio_get_mport_id(struct rio_mport *);
#endif /* LINUX_RIO_DRV_H */
--
1.5.4
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH 2/6] powerpc/fsl_rio: use LAW address from device tree
2009-05-12 8:35 [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access Li Yang
@ 2009-05-12 8:35 ` Li Yang
2009-05-12 8:36 ` [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block Li Yang
2009-05-13 22:08 ` [PATCH 2/6] powerpc/fsl_rio: use LAW address from device tree Kumar Gala
2009-06-11 4:13 ` [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access Kumar Gala
1 sibling, 2 replies; 19+ messages in thread
From: Li Yang @ 2009-05-12 8:35 UTC (permalink / raw)
To: akpm, galak, davem, mporter; +Cc: linuxppc-dev, Li Yang, linux-kernel, netdev
Instead of fixed address in old code.
Signed-off-by: Li Yang <leoli@freescale.com>
---
arch/powerpc/sysdev/fsl_rio.c | 12 +++++++-----
1 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index abdb124..fa0720f 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1077,8 +1077,9 @@ int fsl_rio_setup(struct of_device *dev)
INIT_LIST_HEAD(&port->dbells);
port->iores.start = law_start;
- port->iores.end = law_start + law_size;
+ port->iores.end = law_start + law_size - 1;
port->iores.flags = IORESOURCE_MEM;
+ port->iores.name = "rio_io_win";
priv->bellirq = irq_of_parse_and_map(dev->node, 2);
priv->txirq = irq_of_parse_and_map(dev->node, 3);
@@ -1156,14 +1157,15 @@ int fsl_rio_setup(struct of_device *dev)
out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA);
/* Configure maintenance transaction window */
- out_be32(&priv->maint_atmu_regs->rowbar, 0x000c0000);
- out_be32(&priv->maint_atmu_regs->rowar, 0x80077015);
+ out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
+ out_be32(&priv->maint_atmu_regs->rowar, 0x80077015); /* 4M */
priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
/* Configure outbound doorbell window */
- out_be32(&priv->dbell_atmu_regs->rowbar, 0x000c0400);
- out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b);
+ out_be32(&priv->dbell_atmu_regs->rowbar,
+ (law_start + RIO_MAINT_WIN_SIZE) >> 12);
+ out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */
fsl_rio_doorbell_init(port);
return 0;
--
1.5.4
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block
2009-05-12 8:35 ` [PATCH 2/6] powerpc/fsl_rio: use LAW address from device tree Li Yang
@ 2009-05-12 8:36 ` Li Yang
2009-05-12 8:36 ` [PATCH 4/6] rionet: add memory access to simulated Ethernet over rapidio Li Yang
2009-05-12 22:05 ` [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block Andrew Morton
2009-05-13 22:08 ` [PATCH 2/6] powerpc/fsl_rio: use LAW address from device tree Kumar Gala
1 sibling, 2 replies; 19+ messages in thread
From: Li Yang @ 2009-05-12 8:36 UTC (permalink / raw)
To: akpm, galak, davem, mporter
Cc: linuxppc-dev, Zhang Wei, Li Yang, linux-kernel, netdev
The RIO memory map functions are used to support direct IO memory access
to RapidIO space. The patch adds its support for Freescale RapidIO block
driver.
Signed-off-by: Zhang Wei <zw@zh-kernel.org>
Signed-off-by: Li Yang <leoli@freescale.com>
---
arch/powerpc/sysdev/fsl_rio.c | 217 ++++++++++++++++++++++++++++++++++++++++-
1 files changed, 215 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index fa0720f..7056dc0 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -31,6 +31,9 @@
#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq)
#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
+#define IS_64BIT_DMA ((sizeof(dma_addr_t) == 8) ? 1 : 0)
+#define IS_64BIT_PHYS ((sizeof(phys_addr_t) == 8) ? 1 : 0)
+
#define RIO_ATMU_REGS_OFFSET 0x10c00
#define RIO_P_MSG_REGS_OFFSET 0x11000
#define RIO_S_MSG_REGS_OFFSET 0x13000
@@ -40,6 +43,15 @@
#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
#define RIO_MAINT_WIN_SIZE 0x400000
#define RIO_DBELL_WIN_SIZE 0x1000
+#define RIO_MAX_INB_ATMU 4
+#define RIO_MAX_OUTB_ATMU 8
+#define RIO_INB_ATMU_REGS_OFFSET 0x10de0
+#define RIO_ATMU_EN_MASK 0x80000000
+
+#define RIO_NREAD 0x4
+#define RIO_NWRITE 0x4
+#define RIO_NWRITE_R 0x5
+#define RIO_NREAD_R 0x5
#define RIO_MSG_OMR_MUI 0x00000002
#define RIO_MSG_OSR_TE 0x00000080
@@ -83,6 +95,15 @@ struct rio_atmu_regs {
u32 pad3[3];
};
+struct rio_inb_atmu_regs {
+ u32 riwtar;
+ u32 pad1;
+ u32 riwbar;
+ u32 pad2;
+ u32 riwar;
+ u32 pad3[3];
+};
+
struct rio_msg_regs {
u32 omr;
u32 osr;
@@ -341,6 +362,188 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
}
/**
+ * fsl_rio_map_inb_mem -- Mapping inbound memory region.
+ * @mport: RapidIO master port
+ * @lstart: Local memory space start address.
+ * @rstart: RapidIO space start address.
+ * @size: The mapping region size.
+ * @flags: Flags for mapping. 0 for using default flags.
+ *
+ * Return: 0 -- Success.
+ *
+ * This function will create the inbound mapping
+ * from rstart to lstart.
+ */
+static int fsl_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
+ resource_size_t rstart, resource_size_t size, u32 flags)
+{
+ int i;
+ struct rio_priv *priv = mport->priv;
+ struct rio_inb_atmu_regs __iomem *inbatmu = (struct rio_inb_atmu_regs *)
+ (priv->regs_win + RIO_INB_ATMU_REGS_OFFSET) - 1;
+ int size_ffs;
+ resource_size_t align;
+
+ if (flags == 0)
+ flags = (RIO_NREAD_R << 4) | RIO_NWRITE_R;
+
+ align = (size < 0x1000) ? 0x1000 : 1 << (__ilog2(size - 1) + 1);
+
+ /* Align the size */
+ if ((lstart + size) > (_ALIGN_DOWN(lstart, align) + align)) {
+ size_ffs = __ffs(_ALIGN_DOWN(lstart + size - 1, align));
+ size = 1 << (size_ffs + (((_ALIGN_DOWN(lstart, 1 << size_ffs) +
+ (1 << size_ffs)) < (lstart + size)) ? 1 : 0));
+ } else
+ size = align;
+
+ if ((lstart & (size - 1)) != (rstart & (size - 1))) {
+ dev_err(mport->dev, "The local address 0x%llx can not be "
+ "aligned to the same size 0x%llx with the RapidIO "
+ "space address 0x%llx!\n", (unsigned long long)lstart,
+ (unsigned long long)size, (unsigned long long)rstart);
+ return -EINVAL;
+ }
+
+ /* Search for free inbound ATMU */
+ for (i = 1;
+ (i <= RIO_MAX_INB_ATMU) && (inbatmu->riwar & RIO_ATMU_EN_MASK);
+ i++, inbatmu--)
+ ;
+
+ if (i > RIO_MAX_INB_ATMU) {
+ dev_err(mport->dev, "No free inbound ATMU!\n");
+ return -EBUSY;
+ }
+ out_be32(&inbatmu->riwtar, ((IS_64BIT_DMA ? (lstart >> 32)
+ & 0xf : 0) << 20) | ((lstart >> 12) & 0xfffff));
+ out_be32(&inbatmu->riwbar, ((IS_64BIT_DMA ? (rstart >> 32)
+ & 0x3 : 0) << 20) | ((rstart >> 12) & 0xfffff));
+ out_be32(&inbatmu->riwar, 0x80000000 | (0xf << 20)
+ | ((flags & 0xff) << 12)
+ | (__ilog2(size) - 1));
+ return 0;
+}
+
+/**
+ * fsl_rio_map_outb_mem -- Mapping outbound memory region.
+ * @mport: RapidIO master port
+ * @lstart: Local memory space start address.
+ * @rstart: RapidIO space start address.
+ * @size: The mapping region size.
+ * @tid: The target RapidIO device id.
+ * @flags: Flags for mapping. 0 for using default flags.
+ *
+ * Return: 0 -- Success.
+ *
+ * This function will create the outbound mapping
+ * from lstart to rstart.
+ */
+static int fsl_rio_map_outb_mem(struct rio_mport *mport, phys_addr_t lstart,
+ resource_size_t rstart, resource_size_t size,
+ u16 tid, u32 flags)
+{
+ int i;
+ struct rio_priv *priv = mport->priv;
+ struct rio_atmu_regs __iomem *outbatmu = (struct rio_atmu_regs *)
+ (priv->regs_win + RIO_ATMU_REGS_OFFSET) + 1;
+ int size_ffs;
+ resource_size_t align;
+
+ if (flags == 0)
+ flags = (RIO_NREAD << 4) | RIO_NWRITE_R;
+
+ align = (size < 0x1000) ? 0x1000 : 1 << (__ilog2(size - 1) + 1);
+
+ /* Align the size */
+ if ((lstart + size) > (_ALIGN_DOWN(lstart, align) + align)) {
+ size_ffs = __ffs(_ALIGN_DOWN(lstart + size - 1, align));
+ size = 1 << (size_ffs + (((_ALIGN_DOWN(lstart, 1 << size_ffs) +
+ (1 << size_ffs)) < (lstart + size)) ? 1 : 0));
+ } else
+ size = align;
+
+ if ((lstart & (size - 1)) != (rstart & (size - 1))) {
+ dev_err(mport->dev, "The local address 0x%llx can not be "
+ "aligned to the same size 0x%llx with the RapidIO "
+ "space address 0x%llx!\n", (unsigned long long)lstart,
+ (unsigned long long)size, (unsigned long long)rstart);
+ return -EINVAL;
+ }
+
+ /* Search for free outbound ATMU */
+ for (i = 1;
+ (i <= RIO_MAX_OUTB_ATMU) && (outbatmu->rowar & RIO_ATMU_EN_MASK);
+ i++, outbatmu++)
+ ;
+
+ if (i > RIO_MAX_OUTB_ATMU) {
+ dev_err(mport->dev, "No free outbound ATMU!\n");
+ return -EBUSY;
+ }
+ out_be32(&outbatmu->rowtar, ((tid & 0x3ff) << 22)
+ | ((IS_64BIT_PHYS ? (rstart >> 32) & 0x3 : 0) << 20)
+ | ((rstart >> 12) & 0xfffff));
+ if (mport->phy_type == RIO_PHY_SERIAL)
+ out_be32(&outbatmu->rowtear, tid >> 10);
+ out_be32(&outbatmu->rowbar, ((IS_64BIT_PHYS ?
+ (lstart >> 32) & 0xf : 0) << 20)
+ | ((lstart >> 12) & 0xfffff));
+ out_be32(&outbatmu->rowar, 0x80000000
+ | ((flags & 0xff) << 12)
+ | (__ilog2(size) - 1));
+ return 0;
+}
+
+/**
+ * fsl_rio_unmap_inb_mem -- Unmapping inbound memory region.
+ * @mport: RapidIO master port
+ * @lstart: Local memory space start address.
+ */
+static void fsl_rio_unmap_inb_mem(struct rio_mport *mport,
+ dma_addr_t lstart)
+{
+ int i;
+ struct rio_priv *priv = mport->priv;
+ struct rio_inb_atmu_regs __iomem *inbatmu = (struct rio_inb_atmu_regs *)
+ (priv->regs_win + RIO_INB_ATMU_REGS_OFFSET) - 1;
+
+ /* Search for inbound ATMU */
+ for (i = 1; i <= RIO_MAX_INB_ATMU ; i++, inbatmu--) {
+ u32 tar = ((IS_64BIT_DMA ? (lstart >> 32) & 0xf : 0) << 20)
+ | ((lstart >> 12) & 0xfffff);
+ if (inbatmu->riwtar == tar) {
+ out_be32(&inbatmu->riwar, ~(RIO_ATMU_EN_MASK));
+ return;
+ }
+ }
+}
+
+/**
+ * fsl_rio_unmap_outb_mem -- Unmapping outbound memory region.
+ * @mport: RapidIO master port
+ * @lstart: Local memory space start address.
+ */
+static void fsl_rio_unmap_outb_mem(struct rio_mport *mport,
+ phys_addr_t lstart)
+{
+ int i;
+ struct rio_priv *priv = mport->priv;
+ struct rio_atmu_regs __iomem *outbatmu = (struct rio_atmu_regs *)
+ (priv->regs_win + RIO_ATMU_REGS_OFFSET) + 1;
+
+ /* Search for outbound ATMU */
+ for (i = 1; i <= RIO_MAX_OUTB_ATMU ; i++, outbatmu++) {
+ u32 bar = ((IS_64BIT_PHYS ? (lstart >> 32) & 0xf : 0) << 20)
+ | ((lstart >> 12) & 0xfffff);
+ if (outbatmu->rowbar == bar) {
+ out_be32(&outbatmu->rowar, ~(RIO_ATMU_EN_MASK));
+ return;
+ }
+ }
+}
+
+/**
* rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue
* @mport: Master port with outbound message queue
* @rdev: Target of outbound message
@@ -951,6 +1154,13 @@ static int fsl_rio_get_cmdline(char *s)
__setup("riohdid=", fsl_rio_get_cmdline);
+static struct rio_mem_ops fsl_mem_ops = {
+ .map_inb = fsl_rio_map_inb_mem,
+ .map_outb = fsl_rio_map_outb_mem,
+ .unmap_inb = fsl_rio_unmap_inb_mem,
+ .unmap_outb = fsl_rio_unmap_outb_mem,
+};
+
static inline void fsl_rio_info(struct device *dev, u32 ccsr)
{
const char *str;
@@ -1026,8 +1236,9 @@ int fsl_rio_setup(struct of_device *dev)
return -EFAULT;
}
dev_info(&dev->dev, "Of-device full name %s\n", dev->node->full_name);
- dev_info(&dev->dev, "Regs start 0x%08x size 0x%08x\n", regs.start,
- regs.end - regs.start + 1);
+ dev_info(&dev->dev, "Regs start 0x%llx size 0x%llx\n",
+ (unsigned long long)regs.start,
+ (unsigned long long)(regs.end - regs.start + 1));
dt_range = of_get_property(dev->node, "ranges", &rlen);
if (!dt_range) {
@@ -1067,6 +1278,7 @@ int fsl_rio_setup(struct of_device *dev)
port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
port->id = 0;
port->index = 0;
+ port->dev = &dev->dev;
priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
if (!priv) {
@@ -1095,6 +1307,7 @@ int fsl_rio_setup(struct of_device *dev)
priv->dev = &dev->dev;
port->ops = ops;
+ port->mops = &fsl_mem_ops;
port->host_deviceid = fsl_rio_get_hdid(port->id);
port->priv = priv;
--
1.5.4
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH 4/6] rionet: add memory access to simulated Ethernet over rapidio
2009-05-12 8:36 ` [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block Li Yang
@ 2009-05-12 8:36 ` Li Yang
2009-05-12 8:36 ` [PATCH 5/6] rio: warn_unused_result warnings fix Li Yang
2009-05-12 22:10 ` [PATCH 4/6] rionet: add memory access to simulated Ethernet over rapidio Andrew Morton
2009-05-12 22:05 ` [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block Andrew Morton
1 sibling, 2 replies; 19+ messages in thread
From: Li Yang @ 2009-05-12 8:36 UTC (permalink / raw)
To: akpm, galak, davem, mporter
Cc: linuxppc-dev, Zhang Wei, Li Yang, linux-kernel, netdev
Through the newly added IO memory access of RapidIO, sender can
write directly to recipient's rx buffer, either by cpu or DMA engine.
Signed-off-by: Zhang Wei <zw@zh-kernel.org>
Signed-off-by: Li Yang <leoli@freescale.com>
---
drivers/net/Kconfig | 10 ++
drivers/net/rionet.c | 365 +++++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 371 insertions(+), 4 deletions(-)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 214a92d..1e88e26 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2736,6 +2736,16 @@ config RIONET_RX_SIZE
depends on RIONET
default "128"
+config RIONET_MEMMAP
+ bool "Use memory map instead of message"
+ depends on RIONET
+ default n
+
+config RIONET_DMA
+ bool "Use DMA for memory mapping data transfer"
+ depends on RIONET_MEMMAP && FSL_DMA
+ default y
+
config FDDI
tristate "FDDI driver support"
depends on (PCI || EISA || TC)
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index ec59e29..c38e51e 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -1,6 +1,8 @@
/*
* rionet - Ethernet driver over RapidIO messaging services
*
+ * Copyright (C) 2007-2009 Freescale Semiconductor, Inc.
+ *
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
@@ -23,6 +25,7 @@
#include <linux/skbuff.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
+#include <linux/dmaengine.h>
#define DRV_NAME "rionet"
#define DRV_VERSION "0.2"
@@ -40,13 +43,48 @@ MODULE_LICENSE("GPL");
NETIF_MSG_TX_ERR)
#define RIONET_DOORBELL_JOIN 0x1000
+#ifdef CONFIG_RIONET_MEMMAP
+#define RIONET_DOORBELL_SEND 0x1001
+#define RIONET_DOORBELL_LEAVE 0x1002
+#else
#define RIONET_DOORBELL_LEAVE 0x1001
+#endif
#define RIONET_MAILBOX 0
#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
+#define ERR(fmt, arg...) \
+ printk(KERN_ERR "ERROR %s - %s: " fmt, __FILE__, __func__, ## arg)
+
+#ifdef CONFIG_RIONET_MEMMAP
+/* Definitions for rionet memory map driver */
+#define RIONET_DRVID 0x101
+#define RIONET_MAX_SK_DATA_SIZE 0x1000
+#define RIONET_MEM_RIO_BASE 0x10000000
+#define RIONET_TX_RX_BUFF_SIZE (0x1000 * (128 + 128))
+#define RIONET_QUEUE_NEXT(x) (((x) < 127) ? ((x) + 1) : 0)
+#define RIONET_QUEUE_INC(x) (x = RIONET_QUEUE_NEXT(x))
+
+struct sk_data {
+ u8 data[0x1000];
+};
+
+#define RIONET_SKDATA_EN 0x80000000
+struct rionet_tx_rx_buff {
+ int enqueue; /* enqueue point */
+ int dequeue; /* dequeue point */
+ u32 size[128]; /* size[i] is skdata[i] size
+ * the most high bit [31] is
+ * enable bit. The
+ * max size is 4096.
+ */
+ u8 rev1[3576];
+ struct sk_data skdata[128]; /* all size are 0x1000 * 128 */
+};
+#endif /* CONFIG_RIONET_MEMMAP */
+
static LIST_HEAD(rionet_peers);
struct rionet_private {
@@ -60,6 +98,19 @@ struct rionet_private {
spinlock_t lock;
spinlock_t tx_lock;
u32 msg_enable;
+#ifdef CONFIG_RIONET_MEMMAP
+ struct rionet_tx_rx_buff *rxbuff;
+ struct rionet_tx_rx_buff __iomem *txbuff;
+ dma_addr_t rx_addr;
+ phys_addr_t tx_addr;
+ struct resource *riores;
+#ifdef CONFIG_RIONET_DMA
+ struct dma_chan *txdmachan;
+ struct dma_chan *rxdmachan;
+ struct dma_client rio_dma_client;
+ spinlock_t rio_dma_event_lock;
+#endif
+#endif
};
struct rionet_peer {
@@ -90,6 +141,7 @@ static struct rio_dev **rionet_active;
#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
+#ifndef CONFIG_RIONET_MEMMAP
static int rionet_rx_clean(struct net_device *ndev)
{
int i;
@@ -108,9 +160,11 @@ static int rionet_rx_clean(struct net_device *ndev)
rnet->rx_skb[i]->data = data;
skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
+ rnet->rx_skb[i]->dev = ndev;
rnet->rx_skb[i]->protocol =
eth_type_trans(rnet->rx_skb[i], ndev);
error = netif_rx(rnet->rx_skb[i]);
+ rnet->rx_skb[i] = NULL;
if (error == NET_RX_DROP) {
ndev->stats.rx_dropped++;
@@ -128,6 +182,7 @@ static int rionet_rx_clean(struct net_device *ndev)
return i;
}
+#endif
static void rionet_rx_fill(struct net_device *ndev, int end)
{
@@ -141,19 +196,86 @@ static void rionet_rx_fill(struct net_device *ndev, int end)
if (!rnet->rx_skb[i])
break;
+#ifndef CONFIG_RIONET_MEMMAP
rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
rnet->rx_skb[i]->data);
+#endif
} while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
rnet->rx_slot = i;
}
+#ifdef CONFIG_RIONET_MEMMAP
+static int rio_send_mem(struct sk_buff *skb,
+ struct net_device *ndev, struct rio_dev *rdev)
+{
+ struct rionet_private *rnet = netdev_priv(ndev);
+ int enqueue, dequeue;
+
+ if (!rdev)
+ return -EFAULT;
+
+ if (skb->len > RIONET_MAX_SK_DATA_SIZE) {
+ printk(KERN_ERR "Frame len is more than RIONET max sk_data!\n");
+ return -EINVAL;
+ }
+
+ rio_map_outb_region(rnet->mport, rdev->destid, rnet->riores,
+ rnet->tx_addr, 0);
+
+ enqueue = in_be32(&rnet->txbuff->enqueue);
+ dequeue = in_be32(&rnet->txbuff->dequeue);
+
+ if (!(in_be32(&rnet->txbuff->size[enqueue]) & RIONET_SKDATA_EN)
+ && (RIONET_QUEUE_NEXT(enqueue) != dequeue)) {
+#ifdef CONFIG_RIONET_DMA
+ struct dma_device *dmadev;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t tx_cookie = 0;
+
+ dmadev = rnet->txdmachan->device;
+ tx = dmadev->device_prep_dma_memcpy(rnet->txdmachan,
+ (void *)rnet->txbuff->skdata[enqueue].data
+ - (void *)rnet->txbuff rnet->tx_addr,
+ dma_map_single(&ndev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE), skb->len, DMA_CTRL_ACK);
+ if (!tx)
+ return -EFAULT;
+ tx_cookie = tx->tx_submit(tx);
+
+ dma_async_memcpy_issue_pending(rnet->txdmachan);
+ while (dma_async_memcpy_complete(rnet->txdmachan,
+ tx_cookie, NULL, NULL) == DMA_IN_PROGRESS) ;
+#else
+ memcpy(rnet->txbuff->skdata[enqueue].data, skb->data, skb->len);
+#endif /* CONFIG_RIONET_DMA */
+ out_be32(&rnet->txbuff->size[enqueue],
+ RIONET_SKDATA_EN | skb->len);
+ out_be32(&rnet->txbuff->enqueue,
+ RIONET_QUEUE_NEXT(enqueue));
+ in_be32(&rnet->txbuff->enqueue); /* verify read */
+ } else if (netif_msg_tx_err(rnet))
+ printk(KERN_ERR "rionmet(memmap): txbuff is busy!\n");
+
+ rio_unmap_outb_region(rnet->mport, rnet->tx_addr);
+ rio_send_doorbell(rdev, RIONET_DOORBELL_SEND);
+ return 0;
+}
+#endif
+
static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
struct rio_dev *rdev)
{
struct rionet_private *rnet = netdev_priv(ndev);
+#ifdef CONFIG_RIONET_MEMMAP
+ int ret = 0;
+ ret = rio_send_mem(skb, ndev, rdev);
+ if (ret)
+ return ret;
+#else
rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
+#endif
rnet->tx_skb[rnet->tx_slot] = skb;
ndev->stats.tx_packets++;
@@ -165,6 +287,19 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
++rnet->tx_slot;
rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
+#ifdef CONFIG_RIONET_MEMMAP
+ while (rnet->tx_cnt && (rnet->ack_slot != rnet->tx_slot)) {
+ /* dma unmap single */
+ dev_kfree_skb_any(rnet->tx_skb[rnet->ack_slot]);
+ rnet->tx_skb[rnet->ack_slot] = NULL;
+ ++rnet->ack_slot;
+ rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
+ rnet->tx_cnt--;
+ }
+
+ if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
+ netif_wake_queue(ndev);
+#endif
if (netif_msg_tx_queued(rnet))
printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME,
(u32) skb, skb->len);
@@ -211,6 +346,92 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
+#ifdef CONFIG_RIONET_MEMMAP
+static void rio_recv_mem(struct net_device *ndev)
+{
+ struct rionet_private *rnet = netdev_priv(ndev);
+ struct sk_buff *skb;
+ u32 enqueue, dequeue, size;
+ int error = 0;
+#ifdef CONFIG_RIONET_DMA
+ dma_cookie_t rx_cookie = 0;
+ struct dma_device *dmadev;
+ struct dma_async_tx_descriptor *tx;
+#endif
+
+ dequeue = rnet->rxbuff->dequeue;
+ enqueue = rnet->rxbuff->enqueue;
+
+ while (enqueue != dequeue) {
+ size = rnet->rxbuff->size[dequeue];
+ if (!(size & RIONET_SKDATA_EN))
+ return;
+ size &= ~RIONET_SKDATA_EN;
+
+ skb = dev_alloc_skb(size + 2);
+ if (!skb)
+ return;
+
+#ifdef CONFIG_RIONET_DMA
+ dmadev = rnet->rxdmachan->device;
+ tx = dmadev->device_prep_dma_memcpy(rnet->rxdmachan,
+ dma_map_single(&ndev->dev, skb_put(skb, size),
+ size, DMA_FROM_DEVICE),
+ (void *)rnet->rxbuff->skdata[dequeue].data
+ - (void *)rnet->rxbuff + rnet->rx_addr,
+ size, DMA_CTRL_ACK);
+ if (!tx)
+ return;
+ rx_cookie = tx->tx_submit(tx);
+ dma_async_memcpy_issue_pending(rnet->rxdmachan);
+ while (dma_async_memcpy_complete(rnet->rxdmachan,
+ rx_cookie, NULL, NULL) == DMA_IN_PROGRESS);
+#else
+ memcpy(skb_put(skb, size),
+ rnet->rxbuff->skdata[dequeue].data,
+ size);
+#endif /* CONFIG_RIONET_DMA */
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ error = netif_rx(skb);
+
+ rnet->rxbuff->size[dequeue] &= ~RIONET_SKDATA_EN;
+ rnet->rxbuff->dequeue = RIONET_QUEUE_NEXT(dequeue);
+ dequeue = RIONET_QUEUE_NEXT(dequeue);
+
+ if (error == NET_RX_DROP) {
+ ndev->stats.rx_dropped++;
+ } else if (error == NET_RX_BAD) {
+ if (netif_msg_rx_err(rnet))
+ printk(KERN_WARNING "%s: bad rx packet\n",
+ DRV_NAME);
+ ndev->stats.rx_errors++;
+ } else {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE;
+ }
+ }
+}
+
+static void rionet_inb_recv_event(struct rio_mport *mport, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct rionet_private *rnet = netdev_priv(ndev);
+ unsigned long flags;
+
+ if (netif_msg_intr(rnet))
+ printk(KERN_INFO "%s: inbound memory data receive event\n",
+ DRV_NAME);
+
+ spin_lock_irqsave(&rnet->lock, flags);
+ rio_recv_mem(ndev);
+ spin_unlock_irqrestore(&rnet->lock, flags);
+}
+#endif
+
+
static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid,
u16 info)
{
@@ -232,6 +453,10 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
}
} else if (info == RIONET_DOORBELL_LEAVE) {
rionet_active[sid] = NULL;
+#ifdef CONFIG_RIONET_MEMMAP
+ } else if (info == RIONET_DOORBELL_SEND) {
+ rionet_inb_recv_event(mport, ndev);
+#endif
} else {
if (netif_msg_intr(rnet))
printk(KERN_WARNING "%s: unhandled doorbell\n",
@@ -239,6 +464,7 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
}
}
+#ifndef CONFIG_RIONET_MEMMAP
static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
{
int n;
@@ -281,6 +507,58 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
spin_unlock(&rnet->lock);
}
+#endif
+
+#ifdef CONFIG_RIONET_DMA
+static enum dma_state_client rionet_dma_event(struct dma_client *client,
+ struct dma_chan *chan, enum dma_state state)
+{
+ struct rionet_private *rnet = container_of(client,
+ struct rionet_private, rio_dma_client);
+ enum dma_state_client ack = DMA_DUP;
+
+ spin_lock(&rnet->lock);
+ switch (state) {
+ case DMA_RESOURCE_AVAILABLE:
+ if (!rnet->txdmachan) {
+ ack = DMA_ACK;
+ rnet->txdmachan = chan;
+ } else if (!rnet->rxdmachan) {
+ ack = DMA_ACK;
+ rnet->rxdmachan = chan;
+ }
+ break;
+ case DMA_RESOURCE_REMOVED:
+ if (rnet->txdmachan == chan) {
+ ack = DMA_ACK;
+ rnet->txdmachan = NULL;
+ } else if (rnet->rxdmachan == chan) {
+ ack = DMA_ACK;
+ rnet->rxdmachan = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+ spin_unlock(&rnet->lock);
+ return ack;
+}
+
+static int rionet_dma_register(struct rionet_private *rnet)
+{
+ int rc = 0;
+ spin_lock_init(&rnet->rio_dma_event_lock);
+ rnet->rio_dma_client.event_callback = rionet_dma_event;
+ dma_cap_set(DMA_MEMCPY, rnet->rio_dma_client.cap_mask);
+ dma_async_client_register(&rnet->rio_dma_client);
+ dma_async_client_chan_request(&rnet->rio_dma_client);
+
+ if (!rnet->txdmachan || !rnet->rxdmachan)
+ rc = -ENODEV;
+
+ return rc;
+}
+#endif
static int rionet_open(struct net_device *ndev)
{
@@ -297,21 +575,63 @@ static int rionet_open(struct net_device *ndev)
RIONET_DOORBELL_JOIN,
RIONET_DOORBELL_LEAVE,
rionet_dbell_event)) < 0)
- goto out;
+ return rc;
+
+#ifdef CONFIG_RIONET_MEMMAP
+ if (!request_rio_region(RIONET_MEM_RIO_BASE, RIONET_TX_RX_BUFF_SIZE,
+ ndev->name, 0)) {
+ dev_err(&ndev->dev, "RapidIO space busy\n");
+ rc = -EBUSY;
+ goto out1;
+ }
+ rnet->riores = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ if (!rnet->riores) {
+ rc = -ENOMEM;
+ goto out2;
+ }
+ rnet->riores->start = RIONET_MEM_RIO_BASE;
+ rnet->riores->end = RIONET_MEM_RIO_BASE + RIONET_TX_RX_BUFF_SIZE - 1;
+ rnet->rxbuff = dma_alloc_coherent(&ndev->dev, RIONET_TX_RX_BUFF_SIZE,
+ &rnet->rx_addr, GFP_KERNEL);
+ if (!rnet->rxbuff) {
+ rc = -ENOMEM;
+ goto out3;
+ }
+ rc = rio_map_inb_region(rnet->mport, rnet->riores, rnet->rx_addr, 0);
+ if (rc) {
+ rc = -EBUSY;
+ goto out4;
+ }
+
+ /* Use space right after the doorbell window, aligned to
+ * size of RIONET_TX_RX_BUFF_SIZE */
+ rnet->tx_addr = rnet->mport->iores.start + 0x500000;
+ rnet->txbuff = ioremap(rnet->tx_addr, resource_size(rnet->riores));
+ if (!rnet->txbuff) {
+ rc = -ENOMEM;
+ goto out5;
+ }
+#ifdef CONFIG_RIONET_DMA
+ rc = rionet_dma_register(rnet);
+ if (rc)
+ goto out6;
+#endif /* CONFIG_RIONET_DMA */
+#else
if ((rc = rio_request_inb_mbox(rnet->mport,
(void *)ndev,
RIONET_MAILBOX,
RIONET_RX_RING_SIZE,
rionet_inb_msg_event)) < 0)
- goto out;
+ goto out1;
if ((rc = rio_request_outb_mbox(rnet->mport,
(void *)ndev,
RIONET_MAILBOX,
RIONET_TX_RING_SIZE,
rionet_outb_msg_event)) < 0)
- goto out;
+ goto out8;
+#endif
/* Initialize inbound message ring */
for (i = 0; i < RIONET_RX_RING_SIZE; i++)
@@ -344,8 +664,31 @@ static int rionet_open(struct net_device *ndev)
if (pwdcsr & RIO_DOORBELL_AVAIL)
rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
}
+ return 0;
- out:
+#ifndef CONFIG_RIONET_MEMMAP
+out8:
+ rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
+#else
+#ifdef CONFIG_RIONET_DMA
+out6:
+ iounmap(rnet->txbuff);
+#endif
+out5:
+ rio_unmap_inb_region(rnet->mport, rnet->rx_addr);
+out4:
+ dma_free_coherent(&ndev->dev, RIONET_TX_RX_BUFF_SIZE,
+ rnet->rxbuff, rnet->rx_addr);
+ rnet->rxbuff = NULL;
+ rnet->txbuff = NULL;
+out3:
+ kfree(rnet->riores);
+out2:
+ release_rio_region(RIONET_MEM_RIO_BASE, RIONET_TX_RX_BUFF_SIZE);
+#endif
+out1:
+ rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
+ RIONET_DOORBELL_LEAVE);
return rc;
}
@@ -374,8 +717,22 @@ static int rionet_close(struct net_device *ndev)
rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
RIONET_DOORBELL_LEAVE);
+#ifdef CONFIG_RIONET_MEMMAP
+ rio_unmap_inb_region(rnet->mport, rnet->rx_addr);
+ iounmap(rnet->txbuff);
+ dma_free_coherent(&ndev->dev, RIONET_TX_RX_BUFF_SIZE,
+ rnet->rxbuff, rnet->rx_addr);
+ kfree(rnet->riores);
+ release_rio_region(RIONET_MEM_RIO_BASE, RIONET_TX_RX_BUFF_SIZE);
+ rnet->rxbuff = NULL;
+ rnet->txbuff = NULL;
+#ifdef CONFIG_RIONET_DMA
+ dma_async_client_unregister(&rnet->rio_dma_client);
+#endif
+#else
rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
+#endif
return 0;
}
--
1.5.4
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH 5/6] rio: warn_unused_result warnings fix
2009-05-12 8:36 ` [PATCH 4/6] rionet: add memory access to simulated Ethernet over rapidio Li Yang
@ 2009-05-12 8:36 ` Li Yang
2009-05-12 8:36 ` [PATCH 6/6] rio: fix section mismatch Li Yang
` (2 more replies)
2009-05-12 22:10 ` [PATCH 4/6] rionet: add memory access to simulated Ethernet over rapidio Andrew Morton
1 sibling, 3 replies; 19+ messages in thread
From: Li Yang @ 2009-05-12 8:36 UTC (permalink / raw)
To: akpm, galak, davem, mporter; +Cc: linuxppc-dev, Li Yang, linux-kernel, netdev
Adding failure path for the following two cases.
warning: ignoring return value of 'device_add', declared with attribute warn_unused_result
warning: ignoring return value of 'sysfs_create_bin_file', declared with attribute warn_unused_result
Signed-off-by: Li Yang <leoli@freescale.com>
---
drivers/rapidio/rio-scan.c | 44 ++++++++++++++++++++++++++----------------
drivers/rapidio/rio-sysfs.c | 6 +++-
2 files changed, 31 insertions(+), 19 deletions(-)
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 74d0bfa..0838fb2 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -263,15 +263,21 @@ static void rio_route_set_ops(struct rio_dev *rdev)
* device to the RIO device list. Creates the generic sysfs nodes
* for an RIO device.
*/
-static void __devinit rio_add_device(struct rio_dev *rdev)
+static int __devinit rio_add_device(struct rio_dev *rdev)
{
- device_add(&rdev->dev);
+ int err;
+
+ err = device_add(&rdev->dev);
+ if (err)
+ return err;
spin_lock(&rio_global_list_lock);
list_add_tail(&rdev->global_list, &rio_devices);
spin_unlock(&rio_global_list_lock);
rio_create_sysfs_dev_files(rdev);
+
+ return 0;
}
/**
@@ -294,13 +300,14 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
struct rio_mport *port, u16 destid,
u8 hopcount, int do_enum)
{
+ int ret = 0;
struct rio_dev *rdev;
- struct rio_switch *rswitch;
+ struct rio_switch *rswitch = NULL;
int result, rdid;
rdev = kzalloc(sizeof(struct rio_dev), GFP_KERNEL);
if (!rdev)
- goto out;
+ return NULL;
rdev->net = net;
rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR,
@@ -343,23 +350,16 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
rio_mport_read_config_32(port, destid, hopcount,
RIO_SWP_INFO_CAR, &rdev->swpinfo);
rswitch = kmalloc(sizeof(struct rio_switch), GFP_KERNEL);
- if (!rswitch) {
- kfree(rdev);
- rdev = NULL;
- goto out;
- }
+ if (!rswitch)
+ goto cleanup;
rswitch->switchid = next_switchid;
rswitch->hopcount = hopcount;
rswitch->destid = destid;
rswitch->route_table = kzalloc(sizeof(u8)*
RIO_MAX_ROUTE_ENTRIES(port->sys_size),
GFP_KERNEL);
- if (!rswitch->route_table) {
- kfree(rdev);
- rdev = NULL;
- kfree(rswitch);
- goto out;
- }
+ if (!rswitch->route_table)
+ goto cleanup;
/* Initialize switch route table */
for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size);
rdid++)
@@ -390,10 +390,20 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
0, 0xffff);
- rio_add_device(rdev);
+ ret = rio_add_device(rdev);
+ if (ret)
+ goto cleanup;
- out:
return rdev;
+
+cleanup:
+ if (rswitch) {
+ if (rswitch->route_table)
+ kfree(rswitch->route_table);
+ kfree(rswitch);
+ }
+ kfree(rdev);
+ return NULL;
}
/**
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 97a147f..ba742e8 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -214,9 +214,11 @@ static struct bin_attribute rio_config_attr = {
*/
int rio_create_sysfs_dev_files(struct rio_dev *rdev)
{
- sysfs_create_bin_file(&rdev->dev.kobj, &rio_config_attr);
+ int err = 0;
- return 0;
+ err = sysfs_create_bin_file(&rdev->dev.kobj, &rio_config_attr);
+
+ return err;
}
/**
--
1.5.4
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH 6/6] rio: fix section mismatch
2009-05-12 8:36 ` [PATCH 5/6] rio: warn_unused_result warnings fix Li Yang
@ 2009-05-12 8:36 ` Li Yang
2009-05-13 22:08 ` Kumar Gala
2009-05-12 22:11 ` [PATCH 5/6] rio: warn_unused_result warnings fix Andrew Morton
2009-06-11 4:34 ` Kumar Gala
2 siblings, 1 reply; 19+ messages in thread
From: Li Yang @ 2009-05-12 8:36 UTC (permalink / raw)
To: akpm, galak, davem, mporter; +Cc: linuxppc-dev, Li Yang, linux-kernel, netdev
Signed-off-by: Li Yang <leoli@freescale.com>
---
drivers/rapidio/rio-scan.c | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 0838fb2..e29be3c 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -296,7 +296,7 @@ static int __devinit rio_add_device(struct rio_dev *rdev)
* to a RIO device on success or NULL on failure.
*
*/
-static struct rio_dev *rio_setup_device(struct rio_net *net,
+static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
struct rio_mport *port, u16 destid,
u8 hopcount, int do_enum)
{
@@ -569,7 +569,7 @@ static void rio_net_add_mport(struct rio_net *net, struct rio_mport *port)
* Recursively enumerates a RIO network. Transactions are sent via the
* master port passed in @port.
*/
-static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
+static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
u8 hopcount)
{
int port_num;
@@ -728,7 +728,7 @@ static int rio_enum_complete(struct rio_mport *port)
* Recursively discovers a RIO network. Transactions are sent via the
* master port passed in @port.
*/
-static int
+static int __devinit
rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
u8 hopcount)
{
--
1.5.4
^ permalink raw reply related [flat|nested] 19+ messages in thread
* Re: [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block
2009-05-12 8:36 ` [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block Li Yang
2009-05-12 8:36 ` [PATCH 4/6] rionet: add memory access to simulated Ethernet over rapidio Li Yang
@ 2009-05-12 22:05 ` Andrew Morton
2009-05-21 10:43 ` Li Yang
1 sibling, 1 reply; 19+ messages in thread
From: Andrew Morton @ 2009-05-12 22:05 UTC (permalink / raw)
To: Li Yang; +Cc: zw, netdev, linux-kernel, linuxppc-dev, leoli, davem
On Tue, 12 May 2009 16:36:00 +0800
Li Yang <leoli@freescale.com> wrote:
> + align = (size < 0x1000) ? 0x1000 : 1 << (__ilog2(size - 1) + 1);
> +
> + /* Align the size */
> + if ((lstart + size) > (_ALIGN_DOWN(lstart, align) + align)) {
__ilog2() and _ALIGN_DOWN() are powerpc-specific functions. It would
be preferable to use more general helpers if possible. ALIGN() and ilog2()
might suit here.
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 4/6] rionet: add memory access to simulated Ethernet over rapidio
2009-05-12 8:36 ` [PATCH 4/6] rionet: add memory access to simulated Ethernet over rapidio Li Yang
2009-05-12 8:36 ` [PATCH 5/6] rio: warn_unused_result warnings fix Li Yang
@ 2009-05-12 22:10 ` Andrew Morton
1 sibling, 0 replies; 19+ messages in thread
From: Andrew Morton @ 2009-05-12 22:10 UTC (permalink / raw)
To: Li Yang; +Cc: zw, netdev, linux-kernel, linuxppc-dev, leoli, davem
On Tue, 12 May 2009 16:36:01 +0800
Li Yang <leoli@freescale.com> wrote:
> Through the newly added IO memory access of RapidIO, sender can
> write directly to recipient's rx buffer, either by cpu or DMA engine.
>
> ...
>
> +/* Definitions for rionet memory map driver */
> +#define RIONET_DRVID 0x101
> +#define RIONET_MAX_SK_DATA_SIZE 0x1000
> +#define RIONET_MEM_RIO_BASE 0x10000000
> +#define RIONET_TX_RX_BUFF_SIZE (0x1000 * (128 + 128))
> +#define RIONET_QUEUE_NEXT(x) (((x) < 127) ? ((x) + 1) : 0)
References its arg multiple times, hence is buggy or inefficient when
passed an expression with side-effects.
static inline int rionet_queue_next(int x)
would be better. Assuming that some sane identifier is used instead of
"x".
> +#define RIONET_QUEUE_INC(x) (x = RIONET_QUEUE_NEXT(x))
It's pretty ugly to hide an assignment inside a macro like this. Why
not do
foo = rionet_queue_inc(foo);
at the callsites? It makes it much clearer for the reader.
>
> ...
>
> +#ifdef CONFIG_RIONET_MEMMAP
> +static int rio_send_mem(struct sk_buff *skb,
> + struct net_device *ndev, struct rio_dev *rdev)
> +{
> + struct rionet_private *rnet = netdev_priv(ndev);
> + int enqueue, dequeue;
> +
> + if (!rdev)
> + return -EFAULT;
Is that an appropriate error code?
>
> ...
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 5/6] rio: warn_unused_result warnings fix
2009-05-12 8:36 ` [PATCH 5/6] rio: warn_unused_result warnings fix Li Yang
2009-05-12 8:36 ` [PATCH 6/6] rio: fix section mismatch Li Yang
@ 2009-05-12 22:11 ` Andrew Morton
2009-06-11 4:34 ` Kumar Gala
2 siblings, 0 replies; 19+ messages in thread
From: Andrew Morton @ 2009-05-12 22:11 UTC (permalink / raw)
To: Li Yang; +Cc: netdev, linux-kernel, linuxppc-dev, leoli, davem
On Tue, 12 May 2009 16:36:02 +0800
Li Yang <leoli@freescale.com> wrote:
> + if (rswitch) {
> + if (rswitch->route_table)
this `if' is unneeded.
> + kfree(rswitch->route_table);
> + kfree(rswitch);
> + }
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 2/6] powerpc/fsl_rio: use LAW address from device tree
2009-05-12 8:35 ` [PATCH 2/6] powerpc/fsl_rio: use LAW address from device tree Li Yang
2009-05-12 8:36 ` [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block Li Yang
@ 2009-05-13 22:08 ` Kumar Gala
1 sibling, 0 replies; 19+ messages in thread
From: Kumar Gala @ 2009-05-13 22:08 UTC (permalink / raw)
To: Li Yang; +Cc: netdev, linux-kernel, davem, linuxppc-dev, akpm
On May 12, 2009, at 3:35 AM, Li Yang wrote:
> Instead of fixed address in old code.
>
> Signed-off-by: Li Yang <leoli@freescale.com>
> ---
> arch/powerpc/sysdev/fsl_rio.c | 12 +++++++-----
> 1 files changed, 7 insertions(+), 5 deletions(-)
applied to next
- k
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 6/6] rio: fix section mismatch
2009-05-12 8:36 ` [PATCH 6/6] rio: fix section mismatch Li Yang
@ 2009-05-13 22:08 ` Kumar Gala
0 siblings, 0 replies; 19+ messages in thread
From: Kumar Gala @ 2009-05-13 22:08 UTC (permalink / raw)
To: Li Yang; +Cc: netdev, linux-kernel, davem, linuxppc-dev, akpm
On May 12, 2009, at 3:36 AM, Li Yang wrote:
> Signed-off-by: Li Yang <leoli@freescale.com>
> ---
> drivers/rapidio/rio-scan.c | 6 +++---
> 1 files changed, 3 insertions(+), 3 deletions(-)
applied to next
- k
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block
2009-05-12 22:05 ` [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block Andrew Morton
@ 2009-05-21 10:43 ` Li Yang
0 siblings, 0 replies; 19+ messages in thread
From: Li Yang @ 2009-05-21 10:43 UTC (permalink / raw)
To: Andrew Morton; +Cc: zw, netdev, linux-kernel, linuxppc-dev, davem
On Wed, May 13, 2009 at 6:05 AM, Andrew Morton
<akpm@linux-foundation.org> wrote:
> On Tue, 12 May 2009 16:36:00 +0800
> Li Yang <leoli@freescale.com> wrote:
>
>> + =C2=A0 =C2=A0 align =3D (size < 0x1000) ? 0x1000 : 1 << (__ilog2(size =
- 1) + 1);
>> +
>> + =C2=A0 =C2=A0 /* Align the size */
>> + =C2=A0 =C2=A0 if ((lstart + size) > (_ALIGN_DOWN(lstart, align) + alig=
n)) {
>
> __ilog2() and _ALIGN_DOWN() are powerpc-specific functions. =C2=A0It woul=
d
> be preferable to use more general helpers if possible. =C2=A0ALIGN() and =
ilog2()
> might suit here.
Will change to use ilog2().
_ALIGN_DOWN() gets a lower aligned address while ALIGN() gets a higher
address. It seems that we don't have a general helper to do the same.
- Leo
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access
2009-05-12 8:35 [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access Li Yang
2009-05-12 8:35 ` [PATCH 2/6] powerpc/fsl_rio: use LAW address from device tree Li Yang
@ 2009-06-11 4:13 ` Kumar Gala
2009-06-11 9:47 ` Li Yang-R58472
1 sibling, 1 reply; 19+ messages in thread
From: Kumar Gala @ 2009-06-11 4:13 UTC (permalink / raw)
To: Li Yang; +Cc: Zhang Wei, netdev, linux-kernel, davem, linuxppc-dev, akpm
On May 12, 2009, at 3:35 AM, Li Yang wrote:
> Add the mapping functions used to support direct IO memory access of
> rapidIO.
>
> Signed-off-by: Zhang Wei <zw@zh-kernel.org>
> Signed-off-by: Li Yang <leoli@freescale.com>
Use inbnd/outbnd instead of inb/outb which make one think of byte
level io accessors.
As I look at this I don't think this is the correct API. I think we
should be using the DMA mapping API to hide these details. The
concept of mapping like this seems to be more a function of FSL's
Address translation/mapping unit (ATMU) than anything specific to the
RIO bus standard.
> ---
> drivers/rapidio/rio.c | 95 ++++++++++++++++++++++++++++++++++++++
> +++++++++
> include/linux/rio.h | 25 ++++++++++++
> include/linux/rio_drv.h | 24 +++++++++---
> 3 files changed, 138 insertions(+), 6 deletions(-)
- k
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 5/6] rio: warn_unused_result warnings fix
2009-05-12 8:36 ` [PATCH 5/6] rio: warn_unused_result warnings fix Li Yang
2009-05-12 8:36 ` [PATCH 6/6] rio: fix section mismatch Li Yang
2009-05-12 22:11 ` [PATCH 5/6] rio: warn_unused_result warnings fix Andrew Morton
@ 2009-06-11 4:34 ` Kumar Gala
2 siblings, 0 replies; 19+ messages in thread
From: Kumar Gala @ 2009-06-11 4:34 UTC (permalink / raw)
To: Li Yang; +Cc: netdev, linux-kernel, davem, linuxppc-dev, akpm
On May 12, 2009, at 3:36 AM, Li Yang wrote:
> Adding failure path for the following two cases.
>
> warning: ignoring return value of 'device_add', declared with
> attribute warn_unused_result
> warning: ignoring return value of 'sysfs_create_bin_file', declared
> with attribute warn_unused_result
>
> Signed-off-by: Li Yang <leoli@freescale.com>
> ---
> drivers/rapidio/rio-scan.c | 44 +++++++++++++++++++++++++
> +----------------
> drivers/rapidio/rio-sysfs.c | 6 +++-
> 2 files changed, 31 insertions(+), 19 deletions(-)
applied w/fixup based on Andrew's comments.
- k
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access
2009-06-11 4:13 ` [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access Kumar Gala
@ 2009-06-11 9:47 ` Li Yang-R58472
2009-06-11 13:32 ` Kumar Gala
0 siblings, 1 reply; 19+ messages in thread
From: Li Yang-R58472 @ 2009-06-11 9:47 UTC (permalink / raw)
To: Kumar Gala; +Cc: Zhang Wei, netdev, linux-kernel, davem, linuxppc-dev, akpm
>On May 12, 2009, at 3:35 AM, Li Yang wrote:
>
>> Add the mapping functions used to support direct IO memory access of=20
>> rapidIO.
>>
>> Signed-off-by: Zhang Wei <zw@zh-kernel.org>
>> Signed-off-by: Li Yang <leoli@freescale.com>
>
>Use inbnd/outbnd instead of inb/outb which make one think of=20
>byte level io accessors.
>
>As I look at this I don't think this is the correct API. I=20
>think we should be using the DMA mapping API to hide these=20
>details. The concept of mapping like this seems to be more a=20
>function of FSL's Address translation/mapping unit (ATMU) than=20
>anything specific to the RIO bus standard.
This is a separate RIO block level ATMU. Although it looks like the
system level ATMU, system ATMU doesn't have the knowledge of rapidIO
target device ID. The mapping need to be dynamic, as it's easy to have
more RIO devices than the outbound windows.
- Leo
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access
2009-06-11 9:47 ` Li Yang-R58472
@ 2009-06-11 13:32 ` Kumar Gala
2009-06-12 13:27 ` Li Yang
0 siblings, 1 reply; 19+ messages in thread
From: Kumar Gala @ 2009-06-11 13:32 UTC (permalink / raw)
To: Li Yang-R58472; +Cc: Zhang Wei, netdev, linux-kernel, davem, linuxppc-dev, akpm
On Jun 11, 2009, at 4:47 AM, Li Yang-R58472 wrote:
>> On May 12, 2009, at 3:35 AM, Li Yang wrote:
>>
>>> Add the mapping functions used to support direct IO memory access of
>>> rapidIO.
>>>
>>> Signed-off-by: Zhang Wei <zw@zh-kernel.org>
>>> Signed-off-by: Li Yang <leoli@freescale.com>
>>
>> Use inbnd/outbnd instead of inb/outb which make one think of
>> byte level io accessors.
>>
>> As I look at this I don't think this is the correct API. I
>> think we should be using the DMA mapping API to hide these
>> details. The concept of mapping like this seems to be more a
>> function of FSL's Address translation/mapping unit (ATMU) than
>> anything specific to the RIO bus standard.
>
> This is a separate RIO block level ATMU. Although it looks like the
> system level ATMU, system ATMU doesn't have the knowledge of rapidIO
> target device ID. The mapping need to be dynamic, as it's easy to
> have
> more RIO devices than the outbound windows.
I understand that. What I'm saying is the RIO block level ATMU is a
Freescale specific detail and not part of any standard RIO bus
programming model. We have mapping APIs that we can connect to for
this via the DMA API layer.
- k
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access
2009-06-11 13:32 ` Kumar Gala
@ 2009-06-12 13:27 ` Li Yang
2009-06-12 13:58 ` Kumar Gala
0 siblings, 1 reply; 19+ messages in thread
From: Li Yang @ 2009-06-12 13:27 UTC (permalink / raw)
To: Kumar Gala; +Cc: Zhang Wei, netdev, linux-kernel, davem, linuxppc-dev, akpm
On Thu, Jun 11, 2009 at 9:32 PM, Kumar Gala<galak@kernel.crashing.org> wrot=
e:
>
> On Jun 11, 2009, at 4:47 AM, Li Yang-R58472 wrote:
>
>>> On May 12, 2009, at 3:35 AM, Li Yang wrote:
>>>
>>>> Add the mapping functions used to support direct IO memory access of
>>>> rapidIO.
>>>>
>>>> Signed-off-by: Zhang Wei <zw@zh-kernel.org>
>>>> Signed-off-by: Li Yang <leoli@freescale.com>
>>>
>>> Use inbnd/outbnd instead of inb/outb which make one think of
>>> byte level io accessors.
>>>
>>> As I look at this I don't think this is the correct API. =C2=A0I
>>> think we should be using the DMA mapping API to hide these
>>> details. =C2=A0The concept of mapping like this seems to be more a
>>> function of FSL's Address translation/mapping unit (ATMU) than
>>> anything specific to the RIO bus standard.
>>
>> This is a separate RIO block level ATMU. =C2=A0Although it looks like th=
e
>> system level ATMU, system ATMU doesn't have the knowledge of rapidIO
>> target device ID. =C2=A0The mapping need to be dynamic, as it's easy to =
have
>> more RIO devices than the outbound windows.
>
> I understand that. =C2=A0What I'm saying is the RIO block level ATMU is a
> Freescale specific detail and not part of any standard RIO bus programmin=
g
> model. =C2=A0We have mapping APIs that we can connect to for this via the=
DMA API
> layer.
Ok, I see your point now. Do you mean dma_map_*() for DMA API layer?
But in my understanding the current dma_map_*() APIs are preparing
local memory for device to access which is similar to the inbound
case. Is it suitable to also use them for mapping device's space for
CPU access? Can you give an example of using this API for Address
Translation and Mapping purpose?
- Leo
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access
2009-06-12 13:27 ` Li Yang
@ 2009-06-12 13:58 ` Kumar Gala
2009-06-15 10:38 ` Li Yang
0 siblings, 1 reply; 19+ messages in thread
From: Kumar Gala @ 2009-06-12 13:58 UTC (permalink / raw)
To: Li Yang; +Cc: Zhang Wei, netdev, linux-kernel, davem, linuxppc-dev, akpm
On Jun 12, 2009, at 8:27 AM, Li Yang wrote:
> On Thu, Jun 11, 2009 at 9:32 PM, Kumar
> Gala<galak@kernel.crashing.org> wrote:
>>
>> On Jun 11, 2009, at 4:47 AM, Li Yang-R58472 wrote:
>>
>>>> On May 12, 2009, at 3:35 AM, Li Yang wrote:
>>>>
>>>>> Add the mapping functions used to support direct IO memory
>>>>> access of
>>>>> rapidIO.
>>>>>
>>>>> Signed-off-by: Zhang Wei <zw@zh-kernel.org>
>>>>> Signed-off-by: Li Yang <leoli@freescale.com>
>>>>
>>>> Use inbnd/outbnd instead of inb/outb which make one think of
>>>> byte level io accessors.
>>>>
>>>> As I look at this I don't think this is the correct API. I
>>>> think we should be using the DMA mapping API to hide these
>>>> details. The concept of mapping like this seems to be more a
>>>> function of FSL's Address translation/mapping unit (ATMU) than
>>>> anything specific to the RIO bus standard.
>>>
>>> This is a separate RIO block level ATMU. Although it looks like the
>>> system level ATMU, system ATMU doesn't have the knowledge of rapidIO
>>> target device ID. The mapping need to be dynamic, as it's easy to
>>> have
>>> more RIO devices than the outbound windows.
>>
>> I understand that. What I'm saying is the RIO block level ATMU is a
>> Freescale specific detail and not part of any standard RIO bus
>> programming
>> model. We have mapping APIs that we can connect to for this via
>> the DMA API
>> layer.
>
> Ok, I see your point now. Do you mean dma_map_*() for DMA API layer?
> But in my understanding the current dma_map_*() APIs are preparing
> local memory for device to access which is similar to the inbound
> case. Is it suitable to also use them for mapping device's space for
> CPU access? Can you give an example of using this API for Address
> Translation and Mapping purpose?
Yes, I meant the dma_map_*() API. Any system with a true IOMMU uses
the dma_map_ layer as the way to do address translation.
- k
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access
2009-06-12 13:58 ` Kumar Gala
@ 2009-06-15 10:38 ` Li Yang
0 siblings, 0 replies; 19+ messages in thread
From: Li Yang @ 2009-06-15 10:38 UTC (permalink / raw)
To: Kumar Gala; +Cc: Zhang Wei, netdev, linux-kernel, davem, linuxppc-dev, akpm
On Fri, Jun 12, 2009 at 9:58 PM, Kumar Gala<galak@kernel.crashing.org> wrot=
e:
>
> On Jun 12, 2009, at 8:27 AM, Li Yang wrote:
>
>> On Thu, Jun 11, 2009 at 9:32 PM, Kumar Gala<galak@kernel.crashing.org>
>> wrote:
>>>
>>> On Jun 11, 2009, at 4:47 AM, Li Yang-R58472 wrote:
>>>
>>>>> On May 12, 2009, at 3:35 AM, Li Yang wrote:
>>>>>
>>>>>> Add the mapping functions used to support direct IO memory access of
>>>>>> rapidIO.
>>>>>>
>>>>>> Signed-off-by: Zhang Wei <zw@zh-kernel.org>
>>>>>> Signed-off-by: Li Yang <leoli@freescale.com>
>>>>>
>>>>> Use inbnd/outbnd instead of inb/outb which make one think of
>>>>> byte level io accessors.
>>>>>
>>>>> As I look at this I don't think this is the correct API. =C2=A0I
>>>>> think we should be using the DMA mapping API to hide these
>>>>> details. =C2=A0The concept of mapping like this seems to be more a
>>>>> function of FSL's Address translation/mapping unit (ATMU) than
>>>>> anything specific to the RIO bus standard.
>>>>
>>>> This is a separate RIO block level ATMU. =C2=A0Although it looks like =
the
>>>> system level ATMU, system ATMU doesn't have the knowledge of rapidIO
>>>> target device ID. =C2=A0The mapping need to be dynamic, as it's easy t=
o have
>>>> more RIO devices than the outbound windows.
>>>
>>> I understand that. =C2=A0What I'm saying is the RIO block level ATMU is=
a
>>> Freescale specific detail and not part of any standard RIO bus
>>> programming
>>> model. =C2=A0We have mapping APIs that we can connect to for this via t=
he DMA
>>> API
>>> layer.
>>
>> Ok, I see your point now. Do you mean dma_map_*() for DMA API layer?
>> But in my understanding the current dma_map_*() APIs are preparing
>> local memory for device to access which is similar to the inbound
>> case. =C2=A0Is it suitable to also use them for mapping device's space f=
or
>> CPU access? =C2=A0Can you give an example of using this API for Address
>> Translation and Mapping purpose?
>
> Yes, I meant the dma_map_*() API. =C2=A0Any system with a true IOMMU uses=
the
> dma_map_ layer as the way to do address translation.
IOMMU case is not very similar to the RapidIO scenario. RapidIO
mapping is more like PCI address space mapping.
To be specific, the DMA API return dma_addr_t not the rapidIO address
type. And they can only handle inbound mapping not both ways. I
don't think the DMA API is competent enough to be used here for RIO
mapping. Unless we have a more universal mapping API, it can be
justified to create its own API.
- Leo
^ permalink raw reply [flat|nested] 19+ messages in thread
end of thread, other threads:[~2009-06-15 10:39 UTC | newest]
Thread overview: 19+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-05-12 8:35 [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access Li Yang
2009-05-12 8:35 ` [PATCH 2/6] powerpc/fsl_rio: use LAW address from device tree Li Yang
2009-05-12 8:36 ` [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block Li Yang
2009-05-12 8:36 ` [PATCH 4/6] rionet: add memory access to simulated Ethernet over rapidio Li Yang
2009-05-12 8:36 ` [PATCH 5/6] rio: warn_unused_result warnings fix Li Yang
2009-05-12 8:36 ` [PATCH 6/6] rio: fix section mismatch Li Yang
2009-05-13 22:08 ` Kumar Gala
2009-05-12 22:11 ` [PATCH 5/6] rio: warn_unused_result warnings fix Andrew Morton
2009-06-11 4:34 ` Kumar Gala
2009-05-12 22:10 ` [PATCH 4/6] rionet: add memory access to simulated Ethernet over rapidio Andrew Morton
2009-05-12 22:05 ` [PATCH 3/6] powerpc: add memory map support to Freescale RapidIO block Andrew Morton
2009-05-21 10:43 ` Li Yang
2009-05-13 22:08 ` [PATCH 2/6] powerpc/fsl_rio: use LAW address from device tree Kumar Gala
2009-06-11 4:13 ` [PATCH 1/6] rapidio: add common mapping APIs for RapidIO memory access Kumar Gala
2009-06-11 9:47 ` Li Yang-R58472
2009-06-11 13:32 ` Kumar Gala
2009-06-12 13:27 ` Li Yang
2009-06-12 13:58 ` Kumar Gala
2009-06-15 10:38 ` Li Yang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).