* [PATCH][5/5] RapidIO support: net driver over messaging
2005-06-02 21:25 ` [PATCH][4/5] RapidIO support: ppc32 Matt Porter
@ 2005-06-02 21:34 ` Matt Porter
2005-06-02 22:05 ` Stephen Hemminger
0 siblings, 1 reply; 5+ messages in thread
From: Matt Porter @ 2005-06-02 21:34 UTC (permalink / raw)
To: torvalds, akpm, jgarzik; +Cc: netdev, linux-kernel, linuxppc-embedded
Adds an "Ethernet" driver which sends Ethernet packets over the
standard RapidIO messaging. This depends on the core RIO
patch for mailbox/doorbell access.
Signed-off-by: Matt Porter <mporter@kernel.crashing.org>
Index: drivers/net/Kconfig
===================================================================
--- 711ec47634f5d5ded866eaa965a0f7dadcbc65f4/drivers/net/Kconfig (mode:100644)
+++ 8bdd37ff79724c95795ed39c28588a94e1f13e60/drivers/net/Kconfig (mode:100644)
@@ -2185,6 +2185,20 @@
tristate "iSeries Virtual Ethernet driver support"
depends on NETDEVICES && PPC_ISERIES
+config RIONET
+ tristate "RapidIO Ethernet over messaging driver support"
+ depends on NETDEVICES && RAPIDIO
+
+config RIONET_TX_SIZE
+ int "Number of outbound queue entries"
+ depends on RIONET
+ default "128"
+
+config RIONET_RX_SIZE
+ int "Number of inbound queue entries"
+ depends on RIONET
+ default "128"
+
config FDDI
bool "FDDI driver support"
depends on NETDEVICES && (PCI || EISA)
Index: drivers/net/Makefile
===================================================================
--- 711ec47634f5d5ded866eaa965a0f7dadcbc65f4/drivers/net/Makefile (mode:100644)
+++ 8bdd37ff79724c95795ed39c28588a94e1f13e60/drivers/net/Makefile (mode:100644)
@@ -58,6 +58,7 @@
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
+obj-$(CONFIG_RIONET) += rionet.o
#
# end link order section
Index: drivers/net/rionet.c
===================================================================
--- /dev/null (tree:711ec47634f5d5ded866eaa965a0f7dadcbc65f4)
+++ 8bdd37ff79724c95795ed39c28588a94e1f13e60/drivers/net/rionet.c (mode:100644)
@@ -0,0 +1,622 @@
+/*
+ * rionet - Ethernet driver over RapidIO messaging services
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+
+#define DRV_NAME "rionet"
+#define DRV_VERSION "0.1"
+#define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
+#define DRV_DESC "Ethernet over RapidIO"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+
+#define RIONET_DEFAULT_MSGLEVEL 0
+#define RIONET_DOORBELL_JOIN 0x1000
+#define RIONET_DOORBELL_LEAVE 0x1001
+
+#define RIONET_MAILBOX 0
+
+#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
+#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
+
+LIST_HEAD(rionet_peers);
+
+struct rionet_private {
+ struct rio_mport *mport;
+ struct sk_buff *rx_skb[RIONET_RX_RING_SIZE];
+ struct sk_buff *tx_skb[RIONET_TX_RING_SIZE];
+ struct net_device_stats stats;
+ int rx_slot;
+ int tx_slot;
+ int tx_cnt;
+ int ack_slot;
+ spinlock_t lock;
+ u32 msg_enable;
+};
+
+struct rionet_peer {
+ struct list_head node;
+ struct rio_dev *rdev;
+ struct resource *res;
+};
+
+static int rionet_check = 0;
+static int rionet_capable = 1;
+static struct net_device *sndev = NULL;
+
+/*
+ * This is a fast lookup table for for translating TX
+ * Ethernet packets into a destination RIO device. It
+ * could be made into a hash table to save memory depending
+ * on system trade-offs.
+ */
+static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES];
+
+#define is_rionet_capable(pef, src_ops, dst_ops) \
+ ((pef & RIO_PEF_INB_MBOX) && \
+ (pef & RIO_PEF_INB_DOORBELL) && \
+ (src_ops & RIO_SRC_OPS_DOORBELL) && \
+ (dst_ops & RIO_DST_OPS_DOORBELL))
+#define dev_rionet_capable(dev) \
+ is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops)
+
+#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
+#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
+
+static struct net_device_stats *rionet_stats(struct net_device *ndev)
+{
+ struct rionet_private *rnet = ndev->priv;
+ return &rnet->stats;
+}
+
+static int rionet_rx_clean(struct net_device *ndev)
+{
+ int i;
+ int error = 0;
+ struct rionet_private *rnet = ndev->priv;
+ void *data;
+
+ i = rnet->rx_slot;
+
+ do {
+ if (!rnet->rx_skb[i]) {
+ rnet->stats.rx_dropped++;
+ continue;
+ }
+
+ if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
+ break;
+
+ rnet->rx_skb[i]->data = data;
+ skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
+ rnet->rx_skb[i]->dev = sndev;
+ rnet->rx_skb[i]->protocol =
+ eth_type_trans(rnet->rx_skb[i], sndev);
+ error = netif_rx(rnet->rx_skb[i]);
+
+ if (error == NET_RX_DROP) {
+ rnet->stats.rx_dropped++;
+ } else if (error == NET_RX_BAD) {
+ if (netif_msg_rx_err(rnet))
+ printk(KERN_WARNING "%s: bad rx packet\n",
+ DRV_NAME);
+ rnet->stats.rx_errors++;
+ } else {
+ rnet->stats.rx_packets++;
+ rnet->stats.rx_bytes += RIO_MAX_MSG_SIZE;
+ }
+
+ } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
+
+ return i;
+}
+
+static void rionet_rx_fill(struct net_device *ndev, int end)
+{
+ int i;
+ struct rionet_private *rnet = ndev->priv;
+
+ i = rnet->rx_slot;
+ do {
+ rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
+
+ if (!rnet->rx_skb[i])
+ break;
+
+ rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
+ rnet->rx_skb[i]->data);
+ } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
+
+ rnet->rx_slot = i;
+}
+
+static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
+ struct rio_dev *rdev)
+{
+ struct rionet_private *rnet = ndev->priv;
+
+ rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
+ rnet->tx_skb[rnet->tx_slot] = skb;
+
+ rnet->stats.tx_packets++;
+ rnet->stats.tx_bytes += skb->len;
+
+ if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
+ netif_stop_queue(ndev);
+
+ if (++rnet->tx_slot == RIONET_TX_RING_SIZE)
+ rnet->tx_slot = 0;
+
+ if (netif_msg_tx_queued(rnet))
+ printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME,
+ (u32) skb, skb->len);
+
+ return 0;
+}
+
+static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int i;
+ struct rionet_private *rnet = ndev->priv;
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ u16 destid;
+
+ spin_lock_irq(&rnet->lock);
+
+ if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
+ netif_stop_queue(ndev);
+ spin_unlock_irq(&rnet->lock);
+ return -EBUSY;
+ }
+
+ if (eth->h_dest[0] & 0x01) {
+ /*
+ * XXX Need to delay queuing if ring max is reached,
+ * flush additional packets in tx_event() before
+ * awakening the queue. We can easily exceed ring
+ * size with a large number of nodes or even a
+ * small number where the ring is relatively full
+ * on entrance to hard_start_xmit.
+ */
+ for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++)
+ if (rionet_active[i])
+ rionet_queue_tx_msg(skb, ndev,
+ rionet_active[i]);
+ } else if (RIONET_MAC_MATCH(eth->h_dest)) {
+ destid = RIONET_GET_DESTID(eth->h_dest);
+ if (rionet_active[destid])
+ rionet_queue_tx_msg(skb, ndev, rionet_active[destid]);
+ }
+
+ spin_unlock_irq(&rnet->lock);
+
+ return 0;
+}
+
+static int rionet_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ return 0;
+}
+
+static int rionet_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct rionet_private *rnet = ndev->priv;
+
+ if (netif_msg_drv(rnet))
+ printk(KERN_WARNING
+ "%s: rionet_change_mtu(): not implemented\n", DRV_NAME);
+
+ return 0;
+}
+
+static void rionet_set_multicast_list(struct net_device *ndev)
+{
+ struct rionet_private *rnet = ndev->priv;
+
+ if (netif_msg_drv(rnet))
+ printk(KERN_WARNING
+ "%s: rionet_set_multicast_list(): not implemented\n",
+ DRV_NAME);
+}
+
+static void rionet_dbell_event(struct rio_mport *mport, u16 sid, u16 tid,
+ u16 info)
+{
+ struct net_device *ndev = sndev;
+ struct rionet_private *rnet = ndev->priv;
+ struct rionet_peer *peer;
+
+ if (netif_msg_intr(rnet))
+ printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
+ DRV_NAME, sid, tid, info);
+ if (info == RIONET_DOORBELL_JOIN) {
+ if (!rionet_active[sid]) {
+ list_for_each_entry(peer, &rionet_peers, node) {
+ if (peer->rdev->destid == sid)
+ rionet_active[sid] = peer->rdev;
+ }
+ rio_mport_send_doorbell(mport, sid,
+ RIONET_DOORBELL_JOIN);
+ }
+ } else if (info == RIONET_DOORBELL_LEAVE) {
+ rionet_active[sid] = NULL;
+ } else {
+ if (netif_msg_intr(rnet))
+ printk(KERN_WARNING "%s: unhandled doorbell\n",
+ DRV_NAME);
+ }
+}
+
+static void rionet_inb_msg_event(struct rio_mport *mport, int mbox, int slot)
+{
+ int n;
+ struct net_device *ndev = sndev;
+ struct rionet_private *rnet = (struct rionet_private *)ndev->priv;
+
+ if (netif_msg_intr(rnet))
+ printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
+ DRV_NAME, mbox, slot);
+
+ spin_lock(&rnet->lock);
+ if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
+ rionet_rx_fill(ndev, n);
+ spin_unlock(&rnet->lock);
+}
+
+static void rionet_outb_msg_event(struct rio_mport *mport, int mbox, int slot)
+{
+ struct net_device *ndev = sndev;
+ struct rionet_private *rnet = ndev->priv;
+
+ spin_lock(&rnet->lock);
+
+ if (netif_msg_intr(rnet))
+ printk(KERN_INFO
+ "%s: outbound message event, mbox %d slot %d\n",
+ DRV_NAME, mbox, slot);
+
+ while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
+ /* dma unmap single */
+ dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
+ rnet->tx_skb[rnet->ack_slot] = NULL;
+ if (++rnet->ack_slot == RIONET_TX_RING_SIZE)
+ rnet->ack_slot = 0;
+ rnet->tx_cnt--;
+ }
+
+ if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
+ netif_wake_queue(ndev);
+
+ spin_unlock(&rnet->lock);
+}
+
+static int rionet_open(struct net_device *ndev)
+{
+ int i, rc = 0;
+ struct rionet_peer *peer, *tmp;
+ u32 pwdcsr;
+ struct rionet_private *rnet = ndev->priv;
+
+ if (netif_msg_ifup(rnet))
+ printk(KERN_INFO "%s: open\n", DRV_NAME);
+
+ if ((rc = rio_request_inb_dbell(rnet->mport,
+ RIONET_DOORBELL_JOIN,
+ RIONET_DOORBELL_LEAVE,
+ rionet_dbell_event)) < 0)
+ goto out;
+
+ if ((rc = rio_request_inb_mbox(rnet->mport,
+ RIONET_MAILBOX,
+ RIONET_RX_RING_SIZE,
+ rionet_inb_msg_event)) < 0)
+ goto out;
+
+ if ((rc = rio_request_outb_mbox(rnet->mport,
+ RIONET_MAILBOX,
+ RIONET_TX_RING_SIZE,
+ rionet_outb_msg_event)) < 0)
+ goto out;
+
+ /* Initialize inbound message ring */
+ for (i = 0; i < RIONET_RX_RING_SIZE; i++)
+ rnet->rx_skb[i] = NULL;
+ rnet->rx_slot = 0;
+ rionet_rx_fill(ndev, 0);
+
+ rnet->tx_slot = 0;
+ rnet->tx_cnt = 0;
+ rnet->ack_slot = 0;
+
+ spin_lock_init(&rnet->lock);
+
+ rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
+
+ netif_carrier_on(ndev);
+ netif_start_queue(ndev);
+
+ list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
+ if (!(peer->res = rio_request_outb_dbell(peer->rdev,
+ RIONET_DOORBELL_JOIN,
+ RIONET_DOORBELL_LEAVE)))
+ {
+ printk(KERN_ERR "%s: error requesting doorbells\n",
+ DRV_NAME);
+ continue;
+ }
+
+ /*
+ * If device has initialized inbound doorbells,
+ * send a join message
+ */
+ rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
+ if (pwdcsr & RIO_DOORBELL_AVAIL)
+ rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
+ }
+
+ out:
+ return rc;
+}
+
+static int rionet_close(struct net_device *ndev)
+{
+ struct rionet_private *rnet = (struct rionet_private *)ndev->priv;
+ struct rionet_peer *peer, *tmp;
+ int i;
+
+ if (netif_msg_ifup(rnet))
+ printk(KERN_INFO "%s: close\n", DRV_NAME);
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ for (i = 0; i < RIONET_RX_RING_SIZE; i++)
+ if (rnet->rx_skb[i])
+ kfree_skb(rnet->rx_skb[i]);
+
+ list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
+ if (rionet_active[peer->rdev->destid]) {
+ rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
+ rionet_active[peer->rdev->destid] = NULL;
+ }
+ rio_release_outb_dbell(peer->rdev, peer->res);
+ }
+
+ rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
+ RIONET_DOORBELL_LEAVE);
+ rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
+ rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
+
+ return 0;
+}
+
+static void rionet_remove(struct rio_dev *rdev)
+{
+ struct net_device *ndev = NULL;
+ struct rionet_peer *peer, *tmp;
+
+ unregister_netdev(ndev);
+ kfree(ndev);
+
+ list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
+ list_del(&peer->node);
+ kfree(peer);
+ }
+}
+
+static int rionet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static void rionet_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct rionet_private *rnet = ndev->priv;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->fw_version, "n/a");
+ sprintf(info->bus_info, "RIO master port %d", rnet->mport->id);
+}
+
+static u32 rionet_get_msglevel(struct net_device *ndev)
+{
+ struct rionet_private *rnet = ndev->priv;
+
+ return rnet->msg_enable;
+}
+
+static void rionet_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct rionet_private *rnet = ndev->priv;
+
+ rnet->msg_enable = value;
+}
+
+static u32 rionet_get_link(struct net_device *ndev)
+{
+ return netif_carrier_ok(ndev);
+}
+
+static struct ethtool_ops rionet_ethtool_ops = {
+ .get_drvinfo = rionet_get_drvinfo,
+ .get_msglevel = rionet_get_msglevel,
+ .set_msglevel = rionet_set_msglevel,
+ .get_link = rionet_get_link,
+};
+
+static int rionet_setup_netdev(struct rio_mport *mport)
+{
+ int rc = 0;
+ struct net_device *ndev = NULL;
+ struct rionet_private *rnet;
+ u16 device_id;
+
+ /* Allocate our net_device structure */
+ ndev = alloc_etherdev(sizeof(struct rionet_private));
+ if (ndev == NULL) {
+ printk(KERN_INFO "%s: could not allocate ethernet device.\n",
+ DRV_NAME);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * XXX hack, store point a static at ndev so we can get it...
+ * Perhaps need an array of these that the handler can
+ * index via the mbox number.
+ */
+ sndev = ndev;
+
+ /* Set up private area */
+ rnet = (struct rionet_private *)ndev->priv;
+ rnet->mport = mport;
+
+ /* Set the default MAC address */
+ device_id = rio_local_get_device_id(mport);
+ ndev->dev_addr[0] = 0x00;
+ ndev->dev_addr[1] = 0x01;
+ ndev->dev_addr[2] = 0x00;
+ ndev->dev_addr[3] = 0x01;
+ ndev->dev_addr[4] = device_id >> 8;
+ ndev->dev_addr[5] = device_id & 0xff;
+
+ /* Fill in the driver function table */
+ ndev->open = &rionet_open;
+ ndev->hard_start_xmit = &rionet_start_xmit;
+ ndev->stop = &rionet_close;
+ ndev->get_stats = &rionet_stats;
+ ndev->change_mtu = &rionet_change_mtu;
+ ndev->set_mac_address = &rionet_set_mac_address;
+ ndev->set_multicast_list = &rionet_set_multicast_list;
+ ndev->do_ioctl = &rionet_ioctl;
+ SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
+
+ ndev->mtu = RIO_MAX_MSG_SIZE - 14;
+
+ SET_MODULE_OWNER(ndev);
+
+ rc = register_netdev(ndev);
+ if (rc != 0)
+ goto out;
+
+ printk("%s: %s %s Version %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ndev->name,
+ DRV_NAME,
+ DRV_DESC,
+ DRV_VERSION,
+ ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
+ ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
+
+ out:
+ return rc;
+}
+
+/*
+ * XXX Make multi-net safe
+ */
+static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
+{
+ int rc = -ENODEV;
+ u32 lpef, lsrc_ops, ldst_ops;
+ struct rionet_peer *peer;
+
+ /* If local device is not rionet capable, give up quickly */
+ if (!rionet_capable)
+ goto out;
+
+ /*
+ * First time through, make sure local device is rionet
+ * capable, setup netdev, and set flags so this is skipped
+ * on later probes
+ */
+ if (!rionet_check) {
+ rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
+ rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
+ &lsrc_ops);
+ rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
+ &ldst_ops);
+ if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) {
+ printk(KERN_ERR
+ "%s: local device is not network capable\n",
+ DRV_NAME);
+ rionet_check = 1;
+ rionet_capable = 0;
+ goto out;
+ }
+
+ rc = rionet_setup_netdev(rdev->net->hport);
+ rionet_check = 1;
+ }
+
+ /*
+ * If the remote device has mailbox/doorbell capabilities,
+ * add it to the peer list.
+ */
+ if (dev_rionet_capable(rdev)) {
+ if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ peer->rdev = rdev;
+ list_add_tail(&peer->node, &rionet_peers);
+ }
+
+ out:
+ return rc;
+}
+
+static struct rio_device_id rionet_id_table[] = {
+ {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}
+};
+
+static struct rio_driver rionet_driver = {
+ .name = "rionet",
+ .id_table = rionet_id_table,
+ .probe = rionet_probe,
+ .remove = rionet_remove,
+};
+
+static int __init rionet_init(void)
+{
+ return rio_register_driver(&rionet_driver);
+}
+
+static void __exit rionet_exit(void)
+{
+ rio_unregister_driver(&rionet_driver);
+}
+
+module_init(rionet_init);
+module_exit(rionet_exit);
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH][5/5] RapidIO support: net driver over messaging
2005-06-02 21:34 ` [PATCH][5/5] RapidIO support: net driver over messaging Matt Porter
@ 2005-06-02 22:05 ` Stephen Hemminger
2005-06-03 22:43 ` Matt Porter
0 siblings, 1 reply; 5+ messages in thread
From: Stephen Hemminger @ 2005-06-02 22:05 UTC (permalink / raw)
To: Matt Porter
Cc: akpm, netdev, linux-kernel, torvalds, linuxppc-embedded, jgarzik
How much is this like ethernet? does it still do ARP?
Can it do promiscious receive?
> +LIST_HEAD(rionet_peers);
Does this have to be global?
Not sure about the locking of this stuff, are you
relying on the RTNL?
> +
> +static int rionet_change_mtu(struct net_device *ndev, int new_mtu)
> +{
> + struct rionet_private *rnet = ndev->priv;
> +
> + if (netif_msg_drv(rnet))
> + printk(KERN_WARNING
> + "%s: rionet_change_mtu(): not implemented\n", DRV_NAME);
> +
> + return 0;
> +}
If you can allow any mtu then don't need this at all.
Or if you are limited then better return an error for bad values.
> +static void rionet_set_multicast_list(struct net_device *ndev)
> +{
> + struct rionet_private *rnet = ndev->priv;
> +
> + if (netif_msg_drv(rnet))
> + printk(KERN_WARNING
> + "%s: rionet_set_multicast_list(): not implemented\n",
> + DRV_NAME);
> +}
If you can't handle it then just leave dev->set_multicast_list
as NULL and all attempts to add or delete will get -EINVAL
> +
> +static int rionet_open(struct net_device *ndev)
> +{
> + /* Initialize inbound message ring */
> + for (i = 0; i < RIONET_RX_RING_SIZE; i++)
> + rnet->rx_skb[i] = NULL;
> + rnet->rx_slot = 0;
> + rionet_rx_fill(ndev, 0);
> +
> + rnet->tx_slot = 0;
> + rnet->tx_cnt = 0;
> + rnet->ack_slot = 0;
> +
> + spin_lock_init(&rnet->lock);
> +
> + rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
Better to do all initialization of the per device data
in the place it is allocated (rio_setup_netdev)
> +
> +static int rionet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
> +{
> + return -EOPNOTSUPP;
> +}
Unneeded, if dev->do_ioctl is NULL, then all private ioctl's will
return -EINVAL that is what you want.
> +
> +static u32 rionet_get_link(struct net_device *ndev)
> +{
> + return netif_carrier_ok(ndev);
> +}
Use ethtool_op_get_link
> +
> +static int rionet_setup_netdev(struct rio_mport *mport)
> +{
> + int rc = 0;
> + struct net_device *ndev = NULL;
> + struct rionet_private *rnet;
> + u16 device_id;
> +
> + /* Allocate our net_device structure */
> + ndev = alloc_etherdev(sizeof(struct rionet_private));
> + if (ndev == NULL) {
> + printk(KERN_INFO "%s: could not allocate ethernet device.\n",
> + DRV_NAME);
> + rc = -ENOMEM;
> + goto out;
> + }
> +
> + /*
> + * XXX hack, store point a static at ndev so we can get it...
> + * Perhaps need an array of these that the handler can
> + * index via the mbox number.
> + */
> + sndev = ndev;
> +
> + /* Set up private area */
> + rnet = (struct rionet_private *)ndev->priv;
> + rnet->mport = mport;
> +
> + /* Set the default MAC address */
> + device_id = rio_local_get_device_id(mport);
> + ndev->dev_addr[0] = 0x00;
> + ndev->dev_addr[1] = 0x01;
> + ndev->dev_addr[2] = 0x00;
> + ndev->dev_addr[3] = 0x01;
> + ndev->dev_addr[4] = device_id >> 8;
> + ndev->dev_addr[5] = device_id & 0xff;
> +
> + /* Fill in the driver function table */
> + ndev->open = &rionet_open;
> + ndev->hard_start_xmit = &rionet_start_xmit;
> + ndev->stop = &rionet_close;
> + ndev->get_stats = &rionet_stats;
> + ndev->change_mtu = &rionet_change_mtu;
> + ndev->set_mac_address = &rionet_set_mac_address;
> + ndev->set_multicast_list = &rionet_set_multicast_list;
> + ndev->do_ioctl = &rionet_ioctl;
> + SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
> +
> + ndev->mtu = RIO_MAX_MSG_SIZE - 14;
> +
> + SET_MODULE_OWNER(ndev);
Can you set any ndev->features to get better performance.
Can you take >32bit data addresses? then set HIGHDMA
You are doing your on locking, can you use LLTX?
Does the hardware support scatter gather?
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH][5/5] RapidIO support: net driver over messaging
2005-06-02 22:05 ` Stephen Hemminger
@ 2005-06-03 22:43 ` Matt Porter
0 siblings, 0 replies; 5+ messages in thread
From: Matt Porter @ 2005-06-03 22:43 UTC (permalink / raw)
To: Stephen Hemminger
Cc: akpm, netdev, linux-kernel, torvalds, linuxppc-embedded, jgarzik
On Thu, Jun 02, 2005 at 03:05:43PM -0700, Stephen Hemminger wrote:
> How much is this like ethernet? does it still do ARP?
It's nothing like Ethernet, the only relation is that an Ethernet network
driver is easy to implement over top of raw message ports on a switched
fabric network. It gives easy access to RIO messaging from userspace
without inventing a new interface.
ARP works by the driver emulating a broadcast over RIO by sending the
same ARP packet to each node that is participating in the rionet. Nodes
join/leave the rionet by sending RIO-specific doorbell messages to
potential participants on the switched fabric. A table is kept to
flag active participants such that a fast lookup can be made to translate
the dst MAC address to a RIO device struct that is used to actually
send the Ethernet packet encapsulated into a standard RIO message
to the appropriate node(s).
> Can it do promiscious receive?
No.
> > +LIST_HEAD(rionet_peers);
>
> Does this have to be global?
Nope, should be static. Fixing.
> Not sure about the locking of this stuff, are you
> relying on the RTNL?
Yes, last I looked that was sufficient for all the entry points.
I protect the driver-specific data (tx skb rings, etc.) with
a private lock.
> > +
> > +static int rionet_change_mtu(struct net_device *ndev, int new_mtu)
> > +{
> > + struct rionet_private *rnet = ndev->priv;
> > +
> > + if (netif_msg_drv(rnet))
> > + printk(KERN_WARNING
> > + "%s: rionet_change_mtu(): not implemented\n", DRV_NAME);
> > +
> > + return 0;
> > +}
>
> If you can allow any mtu then don't need this at all.
> Or if you are limited then better return an error for bad values.
Ok, I do have a upper limit of 4082 as the RIO messages have a
max 4096 byte payload. That's the default on open as well. I'll
fix this up.
> > +static void rionet_set_multicast_list(struct net_device *ndev)
> > +{
> > + struct rionet_private *rnet = ndev->priv;
> > +
> > + if (netif_msg_drv(rnet))
> > + printk(KERN_WARNING
> > + "%s: rionet_set_multicast_list(): not implemented\n",
> > + DRV_NAME);
> > +}
>
> If you can't handle it then just leave dev->set_multicast_list
> as NULL and all attempts to add or delete will get -EINVAL
Will do. It was a placeholder at one point when I thought I might
emulate multicast in the driver...it's fallen down my priority
list.
> > +
> > +static int rionet_open(struct net_device *ndev)
> > +{
>
>
> > + /* Initialize inbound message ring */
> > + for (i = 0; i < RIONET_RX_RING_SIZE; i++)
> > + rnet->rx_skb[i] = NULL;
> > + rnet->rx_slot = 0;
> > + rionet_rx_fill(ndev, 0);
> > +
> > + rnet->tx_slot = 0;
> > + rnet->tx_cnt = 0;
> > + rnet->ack_slot = 0;
> > +
> > + spin_lock_init(&rnet->lock);
> > +
> > + rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
>
> Better to do all initialization of the per device data
> in the place it is allocated (rio_setup_netdev)
Right, will do.
> > +static int rionet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
> > +{
> > + return -EOPNOTSUPP;
> > +}
>
> Unneeded, if dev->do_ioctl is NULL, then all private ioctl's will
> return -EINVAL that is what you want.
Ah, ok. Good, none of the MII stuff applies in this case.
> > +static u32 rionet_get_link(struct net_device *ndev)
> > +{
> > + return netif_carrier_ok(ndev);
> > +}
>
> Use ethtool_op_get_link
Ok
<snip>
> > + /* Fill in the driver function table */
> > + ndev->open = &rionet_open;
> > + ndev->hard_start_xmit = &rionet_start_xmit;
> > + ndev->stop = &rionet_close;
> > + ndev->get_stats = &rionet_stats;
> > + ndev->change_mtu = &rionet_change_mtu;
> > + ndev->set_mac_address = &rionet_set_mac_address;
> > + ndev->set_multicast_list = &rionet_set_multicast_list;
> > + ndev->do_ioctl = &rionet_ioctl;
> > + SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
> > +
> > + ndev->mtu = RIO_MAX_MSG_SIZE - 14;
> > +
> > + SET_MODULE_OWNER(ndev);
>
> Can you set any ndev->features to get better performance.
> Can you take >32bit data addresses? then set HIGHDMA
> You are doing your on locking, can you use LLTX?
> Does the hardware support scatter gather?
Some of these get tricky. In general, rionet could support
SG and with driver help we can flag IP_CSUM. In practice, the
current generation MPC85xx HW on my development system have
some problems with their message port dma queues. In short,
their implementation is such that the arch-specific code is
forced to do a copy of the skb on both tx and rx. Because of
this, adding SG/IP_CSUM doesn't have any value yet...it'll make
sense to add the addtional features once we get a platform with
better messaging hardware. HIGHDMA may not be suitable on all
platforms. Since rionet sits on top of a hardware abstraction,
it doesn't have full knowledge of the DMA capabilities of the
hardware. We can eventually have some interfaces to the arch
code to learn that info, but it's not there yet. I have to
look into LLTX, I know what it stands for, but I'm not sure
of the details. Do you have a good LLTX example reference?
That said, my goal is to enable as many features as possible
when we have hw to take advantage of them.
-Matt
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH][5/5] RapidIO support: net driver over messaging
@ 2005-07-27 10:36 Avni Hillel-R58467
2005-07-27 13:56 ` Matt Porter
0 siblings, 1 reply; 5+ messages in thread
From: Avni Hillel-R58467 @ 2005-07-27 10:36 UTC (permalink / raw)
To: 'linuxppc-embedded@ozlabs.org'
Hi Matt,
Two questions:
A. How can a node (not the host) know who is in the rionet to broadcast to them?
B. How do you emulate broadcasting to all the mailboxes, in multi mbox systems? Is this done by the node getting the broadcast in MB 0 and forwarding it to the other MBs?
Regards,
Hillel
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH][5/5] RapidIO support: net driver over messaging
2005-07-27 10:36 [PATCH][5/5] RapidIO support: net driver over messaging Avni Hillel-R58467
@ 2005-07-27 13:56 ` Matt Porter
0 siblings, 0 replies; 5+ messages in thread
From: Matt Porter @ 2005-07-27 13:56 UTC (permalink / raw)
To: Avni Hillel-R58467; +Cc: 'linuxppc-embedded@ozlabs.org'
On Wed, Jul 27, 2005 at 01:36:15PM +0300, Avni Hillel-R58467 wrote:
> Hi Matt,
>
> Two questions:
>
> A. How can a node (not the host) know who is in the rionet to broadcast to them?
All nodes in the rionet are flagged in the active peer list. There is an
active peer list kept for the rionet instance on _each node_. There is
no distinction as to whether a node was the winning enumerating host or
is just another processing element that found devices in the system via
passing discovery. The only inherently significant about a "host" in
RapidIO is that it participates in enumeration. After the system is
enumerated it's no longer special (unless your particular system
application designates the hosts have some special network-wide
ownership of resources or something).
Broadcast works the same way on all nodes by sending the same packet
to every node in the active peer list.
> B. How do you emulate broadcasting to all the mailboxes, in multi mbox systems? Is this done by the node getting the broadcast in MB 0 and forwarding it to the other MBs?
rionet doesn't handle multiple mailboxes yet.
However, it becomes tricky because we don't want to bridge separate
Ethernet networks by policy in the driver. If two mailboxes are
part of separate rio device trees, then it doesn't make sense to send
broadcasts out on both mailboxes. It needs some thought and also
docs on how new silicon might be implementing queues in new mailboxes.
With RIO, there's so much left to be implementation specific in the
silicon. It did not make sense to make assumptions and try to
handle multiple mailboxes. If you have a multi mbox system it would
help to have a description so we can work to support it.
-Matt
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2005-07-27 13:56 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2005-07-27 10:36 [PATCH][5/5] RapidIO support: net driver over messaging Avni Hillel-R58467
2005-07-27 13:56 ` Matt Porter
-- strict thread matches above, loose matches on Subject: below --
2005-06-02 21:03 [PATCH][1/5] RapidIO support: core Matt Porter
2005-06-02 21:12 ` [PATCH][2/5] RapidIO support: core includes Matt Porter
2005-06-02 21:19 ` [PATCH][3/5] RapidIO support: enumeration Matt Porter
2005-06-02 21:25 ` [PATCH][4/5] RapidIO support: ppc32 Matt Porter
2005-06-02 21:34 ` [PATCH][5/5] RapidIO support: net driver over messaging Matt Porter
2005-06-02 22:05 ` Stephen Hemminger
2005-06-03 22:43 ` Matt Porter
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).