* [PATCH][MIPS][7/7] AR7: ethernet
[not found] <200708201704.11529.technoboy85@gmail.com>
@ 2007-09-06 15:34 ` Matteo Croce
2007-09-06 22:30 ` Andrew Morton
0 siblings, 1 reply; 13+ messages in thread
From: Matteo Croce @ 2007-09-06 15:34 UTC (permalink / raw)
To: linux-mips
Cc: Eugene Konev, akpm, jgarzik, netdev, davem, kuznet, pekkas,
jmorris, yoshfuji, kaber
Driver for the cpmac 100M ethernet driver.
It works fine disabling napi support, enabling it gives a kernel panic
when the first IPv6 packet has to be forwarded.
Other than that works fine.
Signed-off-by: Matteo Croce <technoboy85@gmail.com>
Signed-off-by: Eugene Konev <ejka@imfi.kspu.ru>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d9b7d9c..6f38a84 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1822,6 +1822,15 @@ config SC92031
To compile this driver as a module, choose M here: the module
will be called sc92031. This is recommended.
+config CPMAC
+ tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
+ depends on NET_ETHERNET && EXPERIMENTAL && AR7
+ select PHYLIB
+ select FIXED_PHY
+ select FIXED_MII_100_FDX
+ help
+ TI AR7 CPMAC Ethernet support
+
config NET_POCKET
bool "Pocket and portable adapters"
depends on PARPORT
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 535d2a0..bb22df9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -156,6 +156,7 @@ obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ZNET) += znet.o
obj-$(CONFIG_LAN_SAA9730) += saa9730.o
+obj-$(CONFIG_CPMAC) += cpmac.o
obj-$(CONFIG_DEPCA) += depca.o
obj-$(CONFIG_EWRK3) += ewrk3.o
obj-$(CONFIG_ATP) += atp.o
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
new file mode 100644
index 0000000..a20a5b6
--- /dev/null
+++ b/drivers/net/cpmac.c
@@ -0,0 +1,1217 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <asm/ar7/ar7.h>
+#include <gpio.h>
+
+MODULE_AUTHOR("Eugene Konev");
+MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
+MODULE_LICENSE("GPL");
+
+static int rx_ring_size = 64;
+static int disable_napi;
+module_param(rx_ring_size, int, 64);
+module_param(disable_napi, int, 0);
+MODULE_PARM_DESC(rx_ring_size, "Size of rx ring (in skbs)");
+MODULE_PARM_DESC(disable_napi, "Disable NAPI polling");
+
+/* Register definitions */
+struct cpmac_control_regs {
+ volatile u32 revision;
+ volatile u32 control;
+ volatile u32 teardown;
+ volatile u32 unused;
+} __attribute__ ((packed));
+
+struct cpmac_int_regs {
+ volatile u32 stat_raw;
+ volatile u32 stat_masked;
+ volatile u32 enable;
+ volatile u32 clear;
+} __attribute__ ((packed));
+
+struct cpmac_stats {
+ volatile u32 good;
+ volatile u32 bcast;
+ volatile u32 mcast;
+ volatile u32 pause;
+ volatile u32 crc_error;
+ volatile u32 align_error;
+ volatile u32 oversized;
+ volatile u32 jabber;
+ volatile u32 undersized;
+ volatile u32 fragment;
+ volatile u32 filtered;
+ volatile u32 qos_filtered;
+ volatile u32 octets;
+} __attribute__ ((packed));
+
+struct cpmac_regs {
+ struct cpmac_control_regs tx_ctrl;
+ struct cpmac_control_regs rx_ctrl;
+ volatile u32 unused1[56];
+ volatile u32 mbp;
+/* MBP bits */
+#define MBP_RXPASSCRC 0x40000000
+#define MBP_RXQOS 0x20000000
+#define MBP_RXNOCHAIN 0x10000000
+#define MBP_RXCMF 0x01000000
+#define MBP_RXSHORT 0x00800000
+#define MBP_RXCEF 0x00400000
+#define MBP_RXPROMISC 0x00200000
+#define MBP_PROMISCCHAN(chan) (((chan) & 0x7) << 16)
+#define MBP_RXBCAST 0x00002000
+#define MBP_BCASTCHAN(chan) (((chan) & 0x7) << 8)
+#define MBP_RXMCAST 0x00000020
+#define MBP_MCASTCHAN(chan) ((chan) & 0x7)
+ volatile u32 unicast_enable;
+ volatile u32 unicast_clear;
+ volatile u32 max_len;
+ volatile u32 buffer_offset;
+ volatile u32 filter_flow_threshold;
+ volatile u32 unused2[2];
+ volatile u32 flow_thre[8];
+ volatile u32 free_buffer[8];
+ volatile u32 mac_control;
+#define MAC_TXPTYPE 0x00000200
+#define MAC_TXPACE 0x00000040
+#define MAC_MII 0x00000020
+#define MAC_TXFLOW 0x00000010
+#define MAC_RXFLOW 0x00000008
+#define MAC_MTEST 0x00000004
+#define MAC_LOOPBACK 0x00000002
+#define MAC_FDX 0x00000001
+ volatile u32 mac_status;
+#define MACST_QOS 0x4
+#define MACST_RXFLOW 0x2
+#define MACST_TXFLOW 0x1
+ volatile u32 emc_control;
+ volatile u32 unused3;
+ struct cpmac_int_regs tx_int;
+ volatile u32 mac_int_vector;
+/* Int Status bits */
+#define INTST_STATUS 0x80000
+#define INTST_HOST 0x40000
+#define INTST_RX 0x20000
+#define INTST_TX 0x10000
+ volatile u32 mac_eoi_vector;
+ volatile u32 unused4[2];
+ struct cpmac_int_regs rx_int;
+ volatile u32 mac_int_stat_raw;
+ volatile u32 mac_int_stat_masked;
+ volatile u32 mac_int_enable;
+ volatile u32 mac_int_clear;
+ volatile u32 mac_addr_low[8];
+ volatile u32 mac_addr_mid;
+ volatile u32 mac_addr_high;
+ volatile u32 mac_hash_low;
+ volatile u32 mac_hash_high;
+ volatile u32 boff_test;
+ volatile u32 pac_test;
+ volatile u32 rx_pause;
+ volatile u32 tx_pause;
+ volatile u32 unused5[2];
+ struct cpmac_stats rx_stats;
+ struct cpmac_stats tx_stats;
+ volatile u32 unused6[232];
+ volatile u32 tx_ptr[8];
+ volatile u32 rx_ptr[8];
+ volatile u32 tx_ack[8];
+ volatile u32 rx_ack[8];
+
+} __attribute__ ((packed));
+
+struct cpmac_mdio_regs {
+ volatile u32 version;
+ volatile u32 control;
+#define MDIOC_IDLE 0x80000000
+#define MDIOC_ENABLE 0x40000000
+#define MDIOC_PREAMBLE 0x00100000
+#define MDIOC_FAULT 0x00080000
+#define MDIOC_FAULTDETECT 0x00040000
+#define MDIOC_INTTEST 0x00020000
+#define MDIOC_CLKDIV(div) ((div) & 0xff)
+ volatile u32 alive;
+ volatile u32 link;
+ struct cpmac_int_regs link_int;
+ struct cpmac_int_regs user_int;
+ u32 unused[20];
+ volatile u32 access;
+#define MDIO_BUSY 0x80000000
+#define MDIO_WRITE 0x40000000
+#define MDIO_REG(reg) (((reg) & 0x1f) << 21)
+#define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
+#define MDIO_DATA(data) ((data) & 0xffff)
+ volatile u32 physel;
+} __attribute__ ((packed));
+
+/* Descriptor */
+struct cpmac_desc {
+ u32 hw_next;
+ u32 hw_data;
+ u16 buflen;
+ u16 bufflags;
+ u16 datalen;
+ u16 dataflags;
+/* Flags bits */
+#define CPMAC_SOP 0x8000
+#define CPMAC_EOP 0x4000
+#define CPMAC_OWN 0x2000
+#define CPMAC_EOQ 0x1000
+ struct sk_buff *skb;
+ struct cpmac_desc *next;
+} __attribute__ ((packed));
+
+struct cpmac_priv {
+ struct net_device_stats stats;
+ spinlock_t lock; /* irq{save,restore} */
+ struct sk_buff *skb_pool;
+ int free_skbs;
+ struct cpmac_desc *rx_head;
+ int tx_head, tx_tail;
+ struct cpmac_desc *desc_ring;
+ struct cpmac_regs *regs;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy;
+ char phy_name[BUS_ID_SIZE];
+ struct plat_cpmac_data *config;
+ int oldlink, oldspeed, oldduplex;
+ u32 msg_enable;
+ struct net_device *dev;
+ struct work_struct alloc_work;
+};
+
+static irqreturn_t cpmac_irq(int, void *);
+static void cpmac_reset(struct net_device *dev);
+static void cpmac_hw_init(struct net_device *dev);
+static int cpmac_stop(struct net_device *dev);
+static int cpmac_open(struct net_device *dev);
+
+#undef CPMAC_DEBUG
+#define CPMAC_LOW_THRESH 32
+#define CPMAC_ALLOC_SIZE 64
+#define CPMAC_SKB_SIZE 1518
+#define CPMAC_TX_RING_SIZE 8
+
+#ifdef CPMAC_DEBUG
+static void cpmac_dump_regs(u32 *base, int count)
+{
+ int i;
+ for (i = 0; i < (count + 3) / 4; i++) {
+ if (i % 4 == 0) printk("\nCPMAC[0x%04x]:", i * 4);
+ printk(" 0x%08x", *(base + i));
+ }
+ printk("\n");
+}
+
+static const char *cpmac_dump_buf(const uint8_t *buf, unsigned size)
+{
+ static char buffer[3 * 25 + 1];
+ char *p = &buffer[0];
+ if (size > 20)
+ size = 20;
+ while (size-- > 0) {
+ p += sprintf(p, " %02x", *buf++);
+ }
+ return buffer;
+}
+#endif
+
+static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct cpmac_mdio_regs *regs = (struct cpmac_mdio_regs *)bus->priv;
+ volatile u32 val;
+
+ while ((val = regs->access) & MDIO_BUSY);
+ regs->access = MDIO_BUSY | MDIO_REG(regnum & 0x1f) |
+ MDIO_PHY(phy_id & 0x1f);
+ while ((val = regs->access) & MDIO_BUSY);
+
+ return val & 0xffff;
+}
+
+static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
+{
+ struct cpmac_mdio_regs *regs = (struct cpmac_mdio_regs *)bus->priv;
+ volatile u32 tmp;
+
+ while ((tmp = regs->access) & MDIO_BUSY);
+ regs->access = MDIO_BUSY | MDIO_WRITE |
+ MDIO_REG(regnum & 0x1f) | MDIO_PHY(phy_id & 0x1f) |
+ val;
+
+ return 0;
+}
+
+static int cpmac_mdio_reset(struct mii_bus *bus)
+{
+ struct cpmac_mdio_regs *regs = (struct cpmac_mdio_regs *)bus->priv;
+
+ ar7_device_reset(AR7_RESET_BIT_MDIO);
+ regs->control = MDIOC_ENABLE |
+ MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1);
+
+ return 0;
+}
+
+static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
+
+static struct mii_bus cpmac_mii = {
+ .name = "cpmac-mii",
+ .read = cpmac_mdio_read,
+ .write = cpmac_mdio_write,
+ .reset = cpmac_mdio_reset,
+ .irq = mii_irqs,
+};
+
+static int cpmac_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr)
+ return -EOPNOTSUPP;
+
+ /* ignore other fields */
+ return 0;
+}
+
+static int cpmac_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+static void cpmac_set_multicast_list(struct net_device *dev)
+{
+ struct dev_mc_list *iter;
+ int i;
+ int hash, tmp;
+ int hashlo = 0, hashhi = 0;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ priv->regs->mbp &= ~MBP_PROMISCCHAN(0); /* promisc channel 0 */
+ priv->regs->mbp |= MBP_RXPROMISC;
+ } else {
+ priv->regs->mbp &= ~MBP_RXPROMISC;
+ if (dev->flags & IFF_ALLMULTI) {
+ /* enable all multicast mode */
+ priv->regs->mac_hash_low = 0xffffffff;
+ priv->regs->mac_hash_high = 0xffffffff;
+ } else {
+ for (i = 0, iter = dev->mc_list; i < dev->mc_count;
+ i++, iter = iter->next) {
+ hash = 0;
+ tmp = iter->dmi_addr[0];
+ hash ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = iter->dmi_addr[1];
+ hash ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = iter->dmi_addr[2];
+ hash ^= (tmp >> 6) ^ tmp;
+ tmp = iter->dmi_addr[4];
+ hash ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = iter->dmi_addr[5];
+ hash ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = iter->dmi_addr[6];
+ hash ^= (tmp >> 6) ^ tmp;
+ hash &= 0x3f;
+ if (hash < 32) {
+ hashlo |= 1<<hash;
+ } else {
+ hashhi |= 1<<(hash - 32);
+ }
+ }
+
+ priv->regs->mac_hash_low = hashlo;
+ priv->regs->mac_hash_high = hashhi;
+ }
+ }
+}
+
+static struct sk_buff *cpmac_get_skb(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ skb = priv->skb_pool;
+ if (likely(skb)) {
+ priv->skb_pool = skb->next;
+ } else {
+ skb = dev_alloc_skb(CPMAC_SKB_SIZE + 2);
+ if (skb) {
+ skb->next = NULL;
+ skb_reserve(skb, 2);
+ skb->dev = priv->dev;
+ }
+ }
+
+ if (likely(priv->free_skbs))
+ priv->free_skbs--;
+
+ if (priv->free_skbs < CPMAC_LOW_THRESH)
+ schedule_work(&priv->alloc_work);
+
+ return skb;
+}
+
+static inline struct sk_buff *cpmac_rx_one(struct net_device *dev,
+ struct cpmac_priv *priv,
+ struct cpmac_desc *desc)
+{
+ unsigned long flags;
+ char *data;
+ struct sk_buff *skb, *result = NULL;
+
+ priv->regs->rx_ack[0] = virt_to_phys(desc);
+ if (unlikely(!desc->datalen)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "%s: rx: spurious interrupt\n",
+ dev->name);
+ priv->stats.rx_errors++;
+ return NULL;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ skb = cpmac_get_skb(dev);
+ if (likely(skb)) {
+ data = (char *)phys_to_virt(desc->hw_data);
+ dma_cache_inv((u32)data, desc->datalen);
+ skb_put(desc->skb, desc->datalen);
+ desc->skb->protocol = eth_type_trans(desc->skb, dev);
+ desc->skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += desc->datalen;
+ result = desc->skb;
+ desc->skb = skb;
+ } else {
+#ifdef CPMAC_DEBUG
+ if (printk_ratelimit())
+ printk("%s: low on skbs, dropping packet\n",
+ dev->name);
+#endif
+ priv->stats.rx_dropped++;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ desc->hw_data = virt_to_phys(desc->skb->data);
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+ dma_cache_wback((u32)desc, 16);
+
+ return result;
+}
+
+static void cpmac_rx(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ if (unlikely(!priv->rx_head)) {
+ spin_unlock(&priv->lock);
+ return;
+ }
+
+ desc = priv->rx_head;
+ dma_cache_inv((u32)desc, 16);
+#ifdef CPMAC_DEBUG
+ printk(KERN_DEBUG "%s: len=%d, %s\n", __func__, pkt->datalen,
+ cpmac_dump_buf(data, pkt->datalen));
+#endif
+
+ while ((desc->dataflags & CPMAC_OWN) == 0) {
+ skb = cpmac_rx_one(dev, priv, desc);
+ if (likely(skb)) {
+ netif_rx(skb);
+ }
+ desc = desc->next;
+ dma_cache_inv((u32)desc, 16);
+ }
+
+ priv->rx_head = desc;
+ priv->regs->rx_ptr[0] = virt_to_phys(desc);
+ spin_unlock(&priv->lock);
+}
+
+static int cpmac_poll(struct net_device *dev, int *budget)
+{
+ struct sk_buff *skb;
+ struct cpmac_desc *desc;
+ int received = 0, quota = min(dev->quota, *budget);
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!priv->rx_head)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "%s: rx: polling, but no queue\n",
+ dev->name);
+ netif_rx_complete(dev);
+ return 0;
+ }
+
+ desc = priv->rx_head;
+ dma_cache_inv((u32)desc, 16);
+
+ while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) {
+ skb = cpmac_rx_one(dev, priv, desc);
+ if (likely(skb)) {
+ netif_receive_skb(skb);
+ received++;
+ }
+ desc = desc->next;
+ priv->rx_head = desc;
+ dma_cache_inv((u32)desc, 16);
+ }
+
+ *budget -= received;
+ dev->quota -= received;
+#ifdef CPMAC_DEBUG
+ printk("%s: processed %d packets\n", dev->name, received);
+#endif
+ if (desc->dataflags & CPMAC_OWN) {
+ priv->regs->rx_ptr[0] = virt_to_phys(desc);
+ netif_rx_complete(dev);
+ priv->regs->rx_int.enable = 0x1;
+ priv->regs->rx_int.clear = 0xfe;
+ return 0;
+ }
+
+ return 1;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+static void
+cpmac_alloc_skbs(struct work_struct *work)
+{
+ struct cpmac_priv *priv = container_of(work, struct cpmac_priv,
+ alloc_work);
+#else
+static void
+cpmac_alloc_skbs(void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct cpmac_priv *priv = netdev_priv(dev);
+#endif
+ unsigned long flags;
+ int i, num_skbs = 0;
+ struct sk_buff *skb, *skbs = NULL;
+
+ for (i = 0; i < CPMAC_ALLOC_SIZE; i++) {
+ skb = alloc_skb(CPMAC_SKB_SIZE + 2, GFP_KERNEL);
+ if (!skb)
+ break;
+ skb->next = skbs;
+ skb_reserve(skb, 2);
+ skb->dev = priv->dev;
+ num_skbs++;
+ skbs = skb;
+ }
+
+ if (skbs) {
+ spin_lock_irqsave(&priv->lock, flags);
+ for (skb = priv->skb_pool; skb && skb->next; skb = skb->next);
+ if (!skb) {
+ priv->skb_pool = skbs;
+ } else {
+ skb->next = skbs;
+ }
+ priv->free_skbs += num_skbs;
+ spin_unlock_irqrestore(&priv->lock, flags);
+#ifdef CPMAC_DEBUG
+ printk("%s: allocated %d skbs\n", priv->dev->name, num_skbs);
+#endif
+ }
+}
+
+static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ int len, chan;
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ len = skb->len;
+#ifdef CPMAC_DEBUG
+ printk(KERN_DEBUG "%s: len=%d\n", __func__, len); /* cpmac_dump_buf(const uint8_t * buf, unsigned size) */
+#endif
+ if (unlikely(len < ETH_ZLEN)) {
+ if (unlikely(skb_padto(skb, ETH_ZLEN))) {
+ if (printk_ratelimit())
+ printk(KERN_NOTICE "%s: padding failed, dropping\n",
+ dev->name);
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->stats.tx_dropped++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -ENOMEM;
+ }
+ len = ETH_ZLEN;
+ }
+ spin_lock_irqsave(&priv->lock, flags);
+ chan = priv->tx_tail++;
+ priv->tx_tail %= 8;
+ if (priv->tx_tail == priv->tx_head)
+ netif_stop_queue(dev);
+
+ desc = &priv->desc_ring[chan];
+ dma_cache_inv((u32)desc, 16);
+ if (desc->dataflags & CPMAC_OWN) {
+ printk(KERN_NOTICE "%s: tx dma ring full, dropping\n", dev->name);
+ priv->stats.tx_dropped++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -ENOMEM;
+ }
+
+ dev->trans_start = jiffies;
+ desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
+ desc->skb = skb;
+ desc->hw_data = virt_to_phys(skb->data);
+ dma_cache_wback((u32)skb->data, len);
+ desc->buflen = len;
+ desc->datalen = len;
+ desc->hw_next = 0;
+ dma_cache_wback((u32)desc, 16);
+ priv->regs->tx_ptr[chan] = virt_to_phys(desc);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void cpmac_end_xmit(struct net_device *dev, int channel)
+{
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ desc = &priv->desc_ring[channel];
+ priv->regs->tx_ack[channel] = virt_to_phys(desc);
+ if (likely(desc->skb)) {
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += desc->skb->len;
+ dev_kfree_skb_irq(desc->skb);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ } else {
+ if (printk_ratelimit())
+ printk(KERN_NOTICE "%s: end_xmit: spurious interrupt\n",
+ dev->name);
+ }
+ spin_unlock(&priv->lock);
+}
+
+static void cpmac_reset(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ ar7_device_reset(priv->config->reset_bit);
+ priv->regs->rx_ctrl.control &= ~1;
+ priv->regs->tx_ctrl.control &= ~1;
+ for (i = 0; i < 8; i++) {
+ priv->regs->tx_ptr[i] = 0;
+ priv->regs->rx_ptr[i] = 0;
+ }
+ priv->regs->mac_control &= ~MAC_MII; /* disable mii */
+}
+
+static inline void cpmac_free_rx_ring(struct net_device *dev)
+{
+ struct cpmac_desc *desc;
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!priv->rx_head))
+ return;
+
+ desc = priv->rx_head;
+ dma_cache_inv((u32)desc, 16);
+
+ for (i = 0; i < rx_ring_size; i++) {
+ desc->buflen = CPMAC_SKB_SIZE;
+ if ((desc->dataflags & CPMAC_OWN) == 0) {
+ desc->dataflags = CPMAC_OWN;
+ priv->stats.rx_dropped++;
+ }
+ dma_cache_wback((u32)desc, 16);
+ desc = desc->next;
+ dma_cache_inv((u32)desc, 16);
+ }
+}
+
+static irqreturn_t cpmac_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ u32 status;
+
+ if (!dev)
+ return IRQ_NONE;
+
+ status = priv->regs->mac_int_vector;
+
+ if (status & INTST_TX) {
+ cpmac_end_xmit(dev, (status & 7));
+ }
+
+ if (status & INTST_RX) {
+ if (disable_napi) {
+ cpmac_rx(dev);
+ } else {
+ priv->regs->rx_int.enable = 0;
+ priv->regs->rx_int.clear = 0xff;
+ netif_rx_schedule(dev);
+ }
+ }
+
+ priv->regs->mac_eoi_vector = 0;
+
+ if (unlikely(status & (INTST_HOST | INTST_STATUS))) {
+ if (printk_ratelimit()) {
+ printk(KERN_ERR "%s: hw error, resetting...\n", dev->name);
+ }
+ spin_lock(&priv->lock);
+ phy_stop(priv->phy);
+ cpmac_reset(dev);
+ cpmac_free_rx_ring(dev);
+ cpmac_hw_init(dev);
+ spin_unlock(&priv->lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cpmac_tx_timeout(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *desc;
+
+ priv->stats.tx_errors++;
+ desc = &priv->desc_ring[priv->tx_head++];
+ priv->tx_head %= 8;
+ printk("%s: transmit timeout\n", dev->name);
+ if (desc->skb)
+ dev_kfree_skb(desc->skb);
+ netif_wake_queue(dev);
+}
+
+static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ if (!(netif_running(dev)))
+ return -EINVAL;
+ if (!priv->phy)
+ return -EINVAL;
+ if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) ||
+ (cmd == SIOCSMIIREG))
+ return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd);
+
+ return -EINVAL;
+}
+
+static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (priv->phy)
+ return phy_ethtool_gset(priv->phy, cmd);
+
+ return -EINVAL;
+}
+
+static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (priv->phy)
+ return phy_ethtool_sset(priv->phy, cmd);
+
+ return -EINVAL;
+}
+
+static void cpmac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "cpmac");
+ strcpy(info->version, "0.0.3");
+ info->fw_version[0] = '\0';
+ sprintf(info->bus_info, "%s", "cpmac");
+ info->regdump_len = 0;
+}
+
+static const struct ethtool_ops cpmac_ethtool_ops = {
+ .get_settings = cpmac_get_settings,
+ .set_settings = cpmac_set_settings,
+ .get_drvinfo = cpmac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static struct net_device_stats *cpmac_stats(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (netif_device_present(dev))
+ return &priv->stats;
+
+ return NULL;
+}
+
+static int cpmac_change_mtu(struct net_device *dev, int mtu)
+{
+ unsigned long flags;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ spinlock_t *lock = &priv->lock;
+
+ if ((mtu < 68) || (mtu > 1500))
+ return -EINVAL;
+
+ spin_lock_irqsave(lock, flags);
+ dev->mtu = mtu;
+ spin_unlock_irqrestore(lock, flags);
+
+ return 0;
+}
+
+static void cpmac_adjust_link(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ int new_state = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->phy->link) {
+ if (priv->phy->duplex != priv->oldduplex) {
+ new_state = 1;
+ priv->oldduplex = priv->phy->duplex;
+ }
+
+ if (priv->phy->speed != priv->oldspeed) {
+ new_state = 1;
+ priv->oldspeed = priv->phy->speed;
+ }
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ netif_schedule(dev);
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state)
+ phy_print_status(priv->phy);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void cpmac_hw_init(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ for (i = 0; i < 8; i++)
+ priv->regs->tx_ptr[i] = 0;
+ priv->regs->rx_ptr[0] = virt_to_phys(priv->rx_head);
+
+ priv->regs->mbp = MBP_RXSHORT | MBP_RXBCAST | MBP_RXMCAST;
+ priv->regs->unicast_enable = 0x1;
+ priv->regs->unicast_clear = 0xfe;
+ priv->regs->buffer_offset = 0;
+ for (i = 0; i < 8; i++)
+ priv->regs->mac_addr_low[i] = dev->dev_addr[5];
+ priv->regs->mac_addr_mid = dev->dev_addr[4];
+ priv->regs->mac_addr_high = dev->dev_addr[0] | (dev->dev_addr[1] << 8)
+ | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
+ priv->regs->max_len = CPMAC_SKB_SIZE;
+ priv->regs->rx_int.enable = 0x1;
+ priv->regs->rx_int.clear = 0xfe;
+ priv->regs->tx_int.enable = 0xff;
+ priv->regs->tx_int.clear = 0;
+ priv->regs->mac_int_enable = 3;
+ priv->regs->mac_int_clear = 0xfc;
+
+ priv->regs->rx_ctrl.control |= 1;
+ priv->regs->tx_ctrl.control |= 1;
+ priv->regs->mac_control |= MAC_MII | MAC_FDX;
+
+ priv->phy->state = PHY_CHANGELINK;
+ phy_start(priv->phy);
+}
+
+static int cpmac_open(struct net_device *dev)
+{
+ int i, size, res;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *desc;
+ struct sk_buff *skb;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+ priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link,
+ 0, PHY_INTERFACE_MODE_MII);
+#else
+ priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0);
+#endif
+ if (IS_ERR(priv->phy)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(priv->phy);
+ }
+
+ if (!request_mem_region(dev->mem_start, dev->mem_end -
+ dev->mem_start, dev->name)) {
+ printk("%s: failed to request registers\n",
+ dev->name);
+ res = -ENXIO;
+ goto fail_reserve;
+ }
+
+ priv->regs = ioremap_nocache(dev->mem_start, dev->mem_end -
+ dev->mem_start);
+ if (!priv->regs) {
+ printk("%s: failed to remap registers\n", dev->name);
+ res = -ENXIO;
+ goto fail_remap;
+ }
+
+ priv->rx_head = NULL;
+ size = sizeof(struct cpmac_desc) * (rx_ring_size +
+ CPMAC_TX_RING_SIZE);
+ priv->desc_ring = (struct cpmac_desc *)kmalloc(size, GFP_KERNEL);
+ if (!priv->desc_ring) {
+ res = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ memset((char *)priv->desc_ring, 0, size);
+
+ priv->skb_pool = NULL;
+ priv->free_skbs = 0;
+ priv->rx_head = &priv->desc_ring[CPMAC_TX_RING_SIZE];
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+ INIT_WORK(&priv->alloc_work, cpmac_alloc_skbs);
+#else
+ INIT_WORK(&priv->alloc_work, cpmac_alloc_skbs, dev);
+#endif
+ schedule_work(&priv->alloc_work);
+ flush_scheduled_work();
+
+ for (i = 0; i < rx_ring_size; i++) {
+ desc = &priv->rx_head[i];
+ skb = cpmac_get_skb(dev);
+ if (!skb) {
+ res = -ENOMEM;
+ goto fail_desc;
+ }
+ desc->skb = skb;
+ desc->hw_data = virt_to_phys(skb->data);
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+ desc->next = &priv->rx_head[(i + 1) % rx_ring_size];
+ desc->hw_next = virt_to_phys(desc->next);
+ dma_cache_wback((u32)desc, 16);
+ }
+
+ if ((res = request_irq(dev->irq, cpmac_irq, SA_INTERRUPT,
+ dev->name, dev))) {
+ printk("%s: failed to obtain irq\n", dev->name);
+ goto fail_irq;
+ }
+
+ cpmac_reset(dev);
+ cpmac_hw_init(dev);
+
+ netif_start_queue(dev);
+ return 0;
+
+fail_irq:
+fail_desc:
+ for (i = 0; i < rx_ring_size; i++)
+ if (priv->rx_head[i].skb)
+ kfree_skb(priv->rx_head[i].skb);
+fail_alloc:
+ kfree(priv->desc_ring);
+
+ for (skb = priv->skb_pool; skb; skb = priv->skb_pool) {
+ priv->skb_pool = skb->next;
+ kfree_skb(skb);
+ }
+
+ iounmap(priv->regs);
+
+fail_remap:
+ release_mem_region(dev->mem_start, dev->mem_end -
+ dev->mem_start);
+
+fail_reserve:
+ phy_disconnect(priv->phy);
+
+ return res;
+}
+
+static int cpmac_stop(struct net_device *dev)
+{
+ int i;
+ struct sk_buff *skb;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ phy_stop(priv->phy);
+ phy_disconnect(priv->phy);
+ priv->phy = NULL;
+
+ cpmac_reset(dev);
+
+ for (i = 0; i < 8; i++) {
+ priv->regs->rx_ptr[i] = 0;
+ priv->regs->tx_ptr[i] = 0;
+ priv->regs->mbp = 0;
+ }
+
+ free_irq(dev->irq, dev);
+ release_mem_region(dev->mem_start, dev->mem_end -
+ dev->mem_start);
+
+ cancel_delayed_work(&priv->alloc_work);
+ flush_scheduled_work();
+
+ priv->rx_head = &priv->desc_ring[CPMAC_TX_RING_SIZE];
+ for (i = 0; i < rx_ring_size; i++)
+ if (priv->rx_head[i].skb)
+ kfree_skb(priv->rx_head[i].skb);
+
+ kfree(priv->desc_ring);
+
+ for (skb = priv->skb_pool; skb; skb = priv->skb_pool) {
+ priv->skb_pool = skb->next;
+ kfree_skb(skb);
+ }
+
+ return 0;
+}
+
+static int external_switch;
+
+static int __devinit cpmac_probe(struct platform_device *pdev)
+{
+ int i, rc, phy_id;
+ struct resource *res;
+ struct cpmac_priv *priv;
+ struct net_device *dev;
+ struct plat_cpmac_data *pdata;
+
+ if (strcmp(pdev->name, "cpmac") != 0)
+ return -ENODEV;
+
+ pdata = pdev->dev.platform_data;
+
+ for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+ if (!(pdata->phy_mask & (1 << phy_id)))
+ continue;
+ if (!cpmac_mii.phy_map[phy_id])
+ continue;
+ break;
+ }
+
+ if (phy_id == PHY_MAX_ADDR) {
+ if (external_switch) {
+ phy_id = 0;
+ } else {
+ printk("cpmac: no PHY present\n");
+ return -ENODEV;
+ }
+ }
+
+ dev = alloc_etherdev(sizeof(struct cpmac_priv));
+
+ if (!dev) {
+ printk(KERN_ERR "cpmac: Unable to allocate net_device structure!\n");
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ platform_set_drvdata(pdev, dev);
+ priv = netdev_priv(dev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res) {
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ dev->mem_start = res->start;
+ dev->mem_end = res->end;
+ dev->irq = platform_get_irq_byname(pdev, "irq");
+
+ dev->mtu = 1500;
+ dev->open = cpmac_open;
+ dev->stop = cpmac_stop;
+ dev->set_config = cpmac_config;
+ dev->hard_start_xmit = cpmac_start_xmit;
+ dev->do_ioctl = cpmac_ioctl;
+ dev->get_stats = cpmac_stats;
+ dev->change_mtu = cpmac_change_mtu;
+ dev->set_mac_address = cpmac_set_mac_address;
+ dev->set_multicast_list = cpmac_set_multicast_list;
+ dev->tx_timeout = cpmac_tx_timeout;
+ dev->ethtool_ops = &cpmac_ethtool_ops;
+ if (!disable_napi) {
+ dev->poll = cpmac_poll;
+ dev->weight = min(rx_ring_size, 64);
+ }
+
+ memset(priv, 0, sizeof(struct cpmac_priv));
+ spin_lock_init(&priv->lock);
+ priv->msg_enable = netif_msg_init(NETIF_MSG_WOL, 0x3fff);
+ priv->config = pdata;
+ priv->dev = dev;
+ memcpy(dev->dev_addr, priv->config->dev_addr, sizeof(dev->dev_addr));
+ if (phy_id == 31) {
+ snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT,
+ cpmac_mii.id, phy_id);
+ } else {
+ snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1);
+ }
+
+ if ((rc = register_netdev(dev))) {
+ printk("cpmac: error %i registering device %s\n",
+ rc, dev->name);
+ goto fail;
+ }
+
+ printk("cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: ",
+ dev->name, (u32 *)dev->mem_start, dev->irq,
+ priv->phy_name);
+ for (i = 0; i < 6; i++)
+ printk("%02x%s", dev->dev_addr[i], i < 5 ? ":" : ")\n");
+
+ return 0;
+
+fail:
+ free_netdev(dev);
+ return rc;
+}
+
+static int __devexit cpmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver cpmac_driver = {
+ .driver.name = "cpmac",
+ .probe = cpmac_probe,
+ .remove = cpmac_remove,
+};
+
+int __devinit cpmac_init(void)
+{
+ volatile u32 mask;
+ int i, res;
+ cpmac_mii.priv = (struct cpmac_mdio_regs *)
+ ioremap_nocache(AR7_REGS_MDIO, sizeof(struct cpmac_mdio_regs));
+
+ if (!cpmac_mii.priv) {
+ printk("Can't ioremap mdio registers\n");
+ return -ENXIO;
+ }
+
+#warning FIXME: unhardcode gpio&reset bits
+ ar7_gpio_disable(26);
+ ar7_gpio_disable(27);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
+ ar7_device_reset(AR7_RESET_BIT_EPHY);
+
+ cpmac_mii.reset(&cpmac_mii);
+
+ for (i = 0; i < 300000; i++) {
+ mask = ((struct cpmac_mdio_regs *)cpmac_mii.priv)->alive;
+ if (mask)
+ break;
+ }
+
+ mask &= 0x7fffffff;
+ if (mask & (mask - 1)) {
+ external_switch = 1;
+ mask = 0;
+ }
+
+ cpmac_mii.phy_mask = ~(mask | 0x80000000);
+
+ res = mdiobus_register(&cpmac_mii);
+ if (res)
+ goto fail_mii;
+
+ res = platform_driver_register(&cpmac_driver);
+ if (res)
+ goto fail_cpmac;
+
+ return 0;
+
+fail_cpmac:
+ mdiobus_unregister(&cpmac_mii);
+
+fail_mii:
+ iounmap(cpmac_mii.priv);
+
+ return res;
+}
+
+void __devexit cpmac_exit(void)
+{
+ platform_driver_unregister(&cpmac_driver);
+ mdiobus_unregister(&cpmac_mii);
+}
+
+module_init(cpmac_init);
+module_exit(cpmac_exit);
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH][MIPS][7/7] AR7: ethernet
2007-09-06 15:34 ` Matteo Croce
@ 2007-09-06 22:30 ` Andrew Morton
2007-09-06 23:04 ` Randy Dunlap
` (2 more replies)
0 siblings, 3 replies; 13+ messages in thread
From: Andrew Morton @ 2007-09-06 22:30 UTC (permalink / raw)
To: Matteo Croce
Cc: linux-mips, ejka, jgarzik, netdev, davem, kuznet, pekkas, jmorris,
yoshfuji, kaber
> On Thu, 6 Sep 2007 17:34:10 +0200 Matteo Croce <technoboy85@gmail.com> wrote:
> Driver for the cpmac 100M ethernet driver.
> It works fine disabling napi support, enabling it gives a kernel panic
> when the first IPv6 packet has to be forwarded.
> Other than that works fine.
>
I'm not too sure why I got cc'ed on this (and not on patches 1-6?) but
whatever.
This patch introduces quite a number of basic coding-style mistakes.
Please run it through scripts/checkpatch.pl and review the output.
The patch introduces vast number of volatile structure fields. Please see
Documentation/volatile-considered-harmful.txt.
The patch inroduces a modest number of unneeded (and undesirable) casts of
void*, such as
+ struct cpmac_mdio_regs *regs = (struct cpmac_mdio_regs *)bus->priv;
please check for those and fix them up.
The driver implements a driver-private skb pool. I don't know if this is
something which we like net drivers doing? If it is approved then surely
there should be a common implementation for it somewhere?
The driver does a lot of open-coded dma_cache_inv() calls (in a way which
assumes a 32-bit bus, too). I assume that dma_cache_inv() is some mips
thing. I'd have thought that it would be better to use the dma mapping API
thoughout the driver, and its associated dma invalidation APIs.
The driver has some LINUX_VERSION_CODE ifdefs. We usually prefer that such
code not be present in a merged-up driver.
> + priv->regs->mac_hash_low = 0xffffffff;
> + priv->regs->mac_hash_high = 0xffffffff;
> + } else {
> + for (i = 0, iter = dev->mc_list; i < dev->mc_count;
> + i++, iter = iter->next) {
> + hash = 0;
> + tmp = iter->dmi_addr[0];
> + hash ^= (tmp >> 2) ^ (tmp << 4);
> + tmp = iter->dmi_addr[1];
> + hash ^= (tmp >> 4) ^ (tmp << 2);
> + tmp = iter->dmi_addr[2];
> + hash ^= (tmp >> 6) ^ tmp;
> + tmp = iter->dmi_addr[4];
> + hash ^= (tmp >> 2) ^ (tmp << 4);
> + tmp = iter->dmi_addr[5];
> + hash ^= (tmp >> 4) ^ (tmp << 2);
> + tmp = iter->dmi_addr[6];
> + hash ^= (tmp >> 6) ^ tmp;
> + hash &= 0x3f;
> + if (hash < 32) {
> + hashlo |= 1<<hash;
> + } else {
> + hashhi |= 1<<(hash - 32);
> + }
> + }
> +
> + priv->regs->mac_hash_low = hashlo;
> + priv->regs->mac_hash_high = hashhi;
> + }
Do we not have a library function anywhere which will perform this little
multicasting hash?
> +static inline struct sk_buff *cpmac_rx_one(struct net_device *dev,
> + struct cpmac_priv *priv,
> + struct cpmac_desc *desc)
> +{
> + unsigned long flags;
> + char *data;
> + struct sk_buff *skb, *result = NULL;
> +
> + priv->regs->rx_ack[0] = virt_to_phys(desc);
> + if (unlikely(!desc->datalen)) {
> + if (printk_ratelimit())
> + printk(KERN_WARNING "%s: rx: spurious interrupt\n",
> + dev->name);
> + priv->stats.rx_errors++;
> + return NULL;
> + }
> +
> + spin_lock_irqsave(&priv->lock, flags);
> + skb = cpmac_get_skb(dev);
> + if (likely(skb)) {
> + data = (char *)phys_to_virt(desc->hw_data);
> + dma_cache_inv((u32)data, desc->datalen);
> + skb_put(desc->skb, desc->datalen);
> + desc->skb->protocol = eth_type_trans(desc->skb, dev);
> + desc->skb->ip_summed = CHECKSUM_NONE;
> + priv->stats.rx_packets++;
> + priv->stats.rx_bytes += desc->datalen;
> + result = desc->skb;
> + desc->skb = skb;
> + } else {
> +#ifdef CPMAC_DEBUG
> + if (printk_ratelimit())
> + printk("%s: low on skbs, dropping packet\n",
> + dev->name);
> +#endif
> + priv->stats.rx_dropped++;
> + }
> + spin_unlock_irqrestore(&priv->lock, flags);
> +
> + desc->hw_data = virt_to_phys(desc->skb->data);
> + desc->buflen = CPMAC_SKB_SIZE;
> + desc->dataflags = CPMAC_OWN;
> + dma_cache_wback((u32)desc, 16);
> +
> + return result;
> +}
This function is far too large to be inlined.
> +static irqreturn_t cpmac_irq(int irq, void *dev_id)
> +{
> + struct net_device *dev = (struct net_device *)dev_id;
unneeded cast
> +static void cpmac_tx_timeout(struct net_device *dev)
> +{
> + struct cpmac_priv *priv = netdev_priv(dev);
> + struct cpmac_desc *desc;
> +
> + priv->stats.tx_errors++;
> + desc = &priv->desc_ring[priv->tx_head++];
> + priv->tx_head %= 8;
Is locking not needed for the above?
> +static int __devinit cpmac_probe(struct platform_device *pdev)
> +{
> + int i, rc, phy_id;
> + struct resource *res;
> + struct cpmac_priv *priv;
> + struct net_device *dev;
> + struct plat_cpmac_data *pdata;
> +
> + if (strcmp(pdev->name, "cpmac") != 0)
> + return -ENODEV;
I don't think this can happen? If it can, something is pretty screwed up?
> + pdata = pdev->dev.platform_data;
> +
> + for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
> + if (!(pdata->phy_mask & (1 << phy_id)))
> + continue;
> + if (!cpmac_mii.phy_map[phy_id])
> + continue;
> + break;
> + }
> +
> + if (phy_id == PHY_MAX_ADDR) {
> + if (external_switch) {
> + phy_id = 0;
> + } else {
> + printk("cpmac: no PHY present\n");
> + return -ENODEV;
> + }
> + }
> +
> + dev = alloc_etherdev(sizeof(struct cpmac_priv));
> +
> + if (!dev) {
> + printk(KERN_ERR "cpmac: Unable to allocate net_device structure!\n");
> + return -ENOMEM;
> + }
> +
> + SET_MODULE_OWNER(dev);
> + platform_set_drvdata(pdev, dev);
> + priv = netdev_priv(dev);
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
> + if (!res) {
> + rc = -ENODEV;
> + goto fail;
> + }
> +
> + dev->mem_start = res->start;
> + dev->mem_end = res->end;
> + dev->irq = platform_get_irq_byname(pdev, "irq");
> +
> + dev->mtu = 1500;
> + dev->open = cpmac_open;
> + dev->stop = cpmac_stop;
> + dev->set_config = cpmac_config;
> + dev->hard_start_xmit = cpmac_start_xmit;
> + dev->do_ioctl = cpmac_ioctl;
> + dev->get_stats = cpmac_stats;
> + dev->change_mtu = cpmac_change_mtu;
> + dev->set_mac_address = cpmac_set_mac_address;
> + dev->set_multicast_list = cpmac_set_multicast_list;
> + dev->tx_timeout = cpmac_tx_timeout;
> + dev->ethtool_ops = &cpmac_ethtool_ops;
> + if (!disable_napi) {
> + dev->poll = cpmac_poll;
> + dev->weight = min(rx_ring_size, 64);
> + }
> +
> + memset(priv, 0, sizeof(struct cpmac_priv));
I think alloc_etherdev() already did that?
> + spin_lock_init(&priv->lock);
> + priv->msg_enable = netif_msg_init(NETIF_MSG_WOL, 0x3fff);
> + priv->config = pdata;
> + priv->dev = dev;
> + memcpy(dev->dev_addr, priv->config->dev_addr, sizeof(dev->dev_addr));
> + if (phy_id == 31) {
> + snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT,
> + cpmac_mii.id, phy_id);
> + } else {
> + snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1);
> + }
> +
> + if ((rc = register_netdev(dev))) {
> + printk("cpmac: error %i registering device %s\n",
> + rc, dev->name);
> + goto fail;
> + }
> +
> + printk("cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: ",
> + dev->name, (u32 *)dev->mem_start, dev->irq,
> + priv->phy_name);
> + for (i = 0; i < 6; i++)
> + printk("%02x%s", dev->dev_addr[i], i < 5 ? ":" : ")\n");
> +
> + return 0;
> +
> +fail:
> + free_netdev(dev);
> + return rc;
> +}
> +
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH][MIPS][7/7] AR7: ethernet
2007-09-06 22:30 ` Andrew Morton
@ 2007-09-06 23:04 ` Randy Dunlap
2007-09-06 23:21 ` Matteo Croce
2007-09-07 7:10 ` Geert Uytterhoeven
2 siblings, 0 replies; 13+ messages in thread
From: Randy Dunlap @ 2007-09-06 23:04 UTC (permalink / raw)
To: Andrew Morton
Cc: Matteo Croce, linux-mips, ejka, jgarzik, netdev, davem, kuznet,
pekkas, jmorris, yoshfuji, kaber
On Thu, 6 Sep 2007 15:30:25 -0700 Andrew Morton wrote:
> > On Thu, 6 Sep 2007 17:34:10 +0200 Matteo Croce <technoboy85@gmail.com> wrote:
> > Driver for the cpmac 100M ethernet driver.
> > It works fine disabling napi support, enabling it gives a kernel panic
> > when the first IPv6 packet has to be forwarded.
> > Other than that works fine.
> >
>
> I'm not too sure why I got cc'ed on this (and not on patches 1-6?) but
> whatever.
>
> This patch introduces quite a number of basic coding-style mistakes.
> Please run it through scripts/checkpatch.pl and review the output.
>
> The patch introduces vast number of volatile structure fields. Please see
> Documentation/volatile-considered-harmful.txt.
>
> The patch inroduces a modest number of unneeded (and undesirable) casts of
> void*, such as
>
> + struct cpmac_mdio_regs *regs = (struct cpmac_mdio_regs *)bus->priv;
>
> please check for those and fix them up.
>
> The driver implements a driver-private skb pool. I don't know if this is
> something which we like net drivers doing? If it is approved then surely
> there should be a common implementation for it somewhere?
>
> The driver does a lot of open-coded dma_cache_inv() calls (in a way which
> assumes a 32-bit bus, too). I assume that dma_cache_inv() is some mips
> thing. I'd have thought that it would be better to use the dma mapping API
> thoughout the driver, and its associated dma invalidation APIs.
>
> The driver has some LINUX_VERSION_CODE ifdefs. We usually prefer that such
> code not be present in a merged-up driver.
>
>
>
> > + priv->regs->mac_hash_low = 0xffffffff;
> > + priv->regs->mac_hash_high = 0xffffffff;
> > + } else {
> > + for (i = 0, iter = dev->mc_list; i < dev->mc_count;
> > + i++, iter = iter->next) {
> > + hash = 0;
> > + tmp = iter->dmi_addr[0];
> > + hash ^= (tmp >> 2) ^ (tmp << 4);
> > + tmp = iter->dmi_addr[1];
> > + hash ^= (tmp >> 4) ^ (tmp << 2);
> > + tmp = iter->dmi_addr[2];
> > + hash ^= (tmp >> 6) ^ tmp;
> > + tmp = iter->dmi_addr[4];
> > + hash ^= (tmp >> 2) ^ (tmp << 4);
> > + tmp = iter->dmi_addr[5];
> > + hash ^= (tmp >> 4) ^ (tmp << 2);
> > + tmp = iter->dmi_addr[6];
> > + hash ^= (tmp >> 6) ^ tmp;
> > + hash &= 0x3f;
> > + if (hash < 32) {
> > + hashlo |= 1<<hash;
> > + } else {
> > + hashhi |= 1<<(hash - 32);
> > + }
> > + }
> > +
> > + priv->regs->mac_hash_low = hashlo;
> > + priv->regs->mac_hash_high = hashhi;
> > + }
>
> Do we not have a library function anywhere which will perform this little
> multicasting hash?
Depends on the ethernet controller, but the ones that I know about
just use a CRC (crc-16 IIRC) calculation for the multicast hash.
---
~Randy
*** Remember to use Documentation/SubmitChecklist when testing your code ***
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH][MIPS][7/7] AR7: ethernet
2007-09-06 22:30 ` Andrew Morton
2007-09-06 23:04 ` Randy Dunlap
@ 2007-09-06 23:21 ` Matteo Croce
2007-09-07 0:41 ` Andrew Morton
2007-09-07 23:04 ` Jeff Garzik
2007-09-07 7:10 ` Geert Uytterhoeven
2 siblings, 2 replies; 13+ messages in thread
From: Matteo Croce @ 2007-09-06 23:21 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-mips, ejka, jgarzik, netdev, davem, kuznet, pekkas, jmorris,
yoshfuji, kaber
Il Friday 07 September 2007 00:30:25 Andrew Morton ha scritto:
> > On Thu, 6 Sep 2007 17:34:10 +0200 Matteo Croce <technoboy85@gmail.com> wrote:
> > Driver for the cpmac 100M ethernet driver.
> > It works fine disabling napi support, enabling it gives a kernel panic
> > when the first IPv6 packet has to be forwarded.
> > Other than that works fine.
> >
>
> I'm not too sure why I got cc'ed on this (and not on patches 1-6?) but
> whatever.
I mailed every maintainer in the respective section in the file MAINTAINERS
and you were in the "NETWORK DEVICE DRIVERS" section
> This patch introduces quite a number of basic coding-style mistakes.
> Please run it through scripts/checkpatch.pl and review the output.
Already done. I'm collecting other suggestions before committing
> The patch introduces vast number of volatile structure fields. Please see
> Documentation/volatile-considered-harmful.txt.
Removing them and the kernel hangs at module load
> The patch inroduces a modest number of unneeded (and undesirable) casts of
> void*, such as
>
> + struct cpmac_mdio_regs *regs = (struct cpmac_mdio_regs *)bus->priv;
>
> please check for those and fix them up.
Done
> The driver implements a driver-private skb pool. I don't know if this is
> something which we like net drivers doing? If it is approved then surely
> there should be a common implementation for it somewhere?
Are you referring at cpmac_poll?
> The driver has some LINUX_VERSION_CODE ifdefs. We usually prefer that such
> code not be present in a merged-up driver.
I will remove in the final release, now I need for testing: my running kernel
is older than current git
>
> > + priv->regs->mac_hash_low = 0xffffffff;
> > + priv->regs->mac_hash_high = 0xffffffff;
> > + } else {
> > + for (i = 0, iter = dev->mc_list; i < dev->mc_count;
> > + i++, iter = iter->next) {
> > + hash = 0;
> > + tmp = iter->dmi_addr[0];
> > + hash ^= (tmp >> 2) ^ (tmp << 4);
> > + tmp = iter->dmi_addr[1];
> > + hash ^= (tmp >> 4) ^ (tmp << 2);
> > + tmp = iter->dmi_addr[2];
> > + hash ^= (tmp >> 6) ^ tmp;
> > + tmp = iter->dmi_addr[4];
> > + hash ^= (tmp >> 2) ^ (tmp << 4);
> > + tmp = iter->dmi_addr[5];
> > + hash ^= (tmp >> 4) ^ (tmp << 2);
> > + tmp = iter->dmi_addr[6];
> > + hash ^= (tmp >> 6) ^ tmp;
> > + hash &= 0x3f;
> > + if (hash < 32) {
> > + hashlo |= 1<<hash;
> > + } else {
> > + hashhi |= 1<<(hash - 32);
> > + }
> > + }
> > +
> > + priv->regs->mac_hash_low = hashlo;
> > + priv->regs->mac_hash_high = hashhi;
> > + }
>
> Do we not have a library function anywhere which will perform this little
> multicasting hash?
Can you tell me the function so i'll implement it?
> > +static inline struct sk_buff *cpmac_rx_one(struct net_device *dev,
> > + struct cpmac_priv *priv,
> > + struct cpmac_desc *desc)
> > +{
> > + unsigned long flags;
> > + char *data;
> > + struct sk_buff *skb, *result = NULL;
> > +
> > + priv->regs->rx_ack[0] = virt_to_phys(desc);
> > + if (unlikely(!desc->datalen)) {
> > + if (printk_ratelimit())
> > + printk(KERN_WARNING "%s: rx: spurious interrupt\n",
> > + dev->name);
> > + priv->stats.rx_errors++;
> > + return NULL;
> > + }
> > +
> > + spin_lock_irqsave(&priv->lock, flags);
> > + skb = cpmac_get_skb(dev);
> > + if (likely(skb)) {
> > + data = (char *)phys_to_virt(desc->hw_data);
> > + dma_cache_inv((u32)data, desc->datalen);
> > + skb_put(desc->skb, desc->datalen);
> > + desc->skb->protocol = eth_type_trans(desc->skb, dev);
> > + desc->skb->ip_summed = CHECKSUM_NONE;
> > + priv->stats.rx_packets++;
> > + priv->stats.rx_bytes += desc->datalen;
> > + result = desc->skb;
> > + desc->skb = skb;
> > + } else {
> > +#ifdef CPMAC_DEBUG
> > + if (printk_ratelimit())
> > + printk("%s: low on skbs, dropping packet\n",
> > + dev->name);
> > +#endif
> > + priv->stats.rx_dropped++;
> > + }
> > + spin_unlock_irqrestore(&priv->lock, flags);
> > +
> > + desc->hw_data = virt_to_phys(desc->skb->data);
> > + desc->buflen = CPMAC_SKB_SIZE;
> > + desc->dataflags = CPMAC_OWN;
> > + dma_cache_wback((u32)desc, 16);
> > +
> > + return result;
> > +}
>
> This function is far too large to be inlined.
>
> > +static irqreturn_t cpmac_irq(int irq, void *dev_id)
> > +{
> > + struct net_device *dev = (struct net_device *)dev_id;
>
> unneeded cast
fixed
> > +static int __devinit cpmac_probe(struct platform_device *pdev)
> > +{
> > + int i, rc, phy_id;
> > + struct resource *res;
> > + struct cpmac_priv *priv;
> > + struct net_device *dev;
> > + struct plat_cpmac_data *pdata;
> > +
> > + if (strcmp(pdev->name, "cpmac") != 0)
> > + return -ENODEV;
>
> I don't think this can happen? If it can, something is pretty screwed up?
Hehe, so screwed that you won't care about your ethernet ;)
> > + pdata = pdev->dev.platform_data;
> > +
> > + for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
> > + if (!(pdata->phy_mask & (1 << phy_id)))
> > + continue;
> > + if (!cpmac_mii.phy_map[phy_id])
> > + continue;
> > + break;
> > + }
> > +
> > + if (phy_id == PHY_MAX_ADDR) {
> > + if (external_switch) {
> > + phy_id = 0;
> > + } else {
> > + printk("cpmac: no PHY present\n");
> > + return -ENODEV;
> > + }
> > + }
> > +
> > + dev = alloc_etherdev(sizeof(struct cpmac_priv));
> > +
> > + if (!dev) {
> > + printk(KERN_ERR "cpmac: Unable to allocate net_device structure!\n");
> > + return -ENOMEM;
> > + }
> > +
> > + SET_MODULE_OWNER(dev);
> > + platform_set_drvdata(pdev, dev);
> > + priv = netdev_priv(dev);
> > +
> > + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
> > + if (!res) {
> > + rc = -ENODEV;
> > + goto fail;
> > + }
> > +
> > + dev->mem_start = res->start;
> > + dev->mem_end = res->end;
> > + dev->irq = platform_get_irq_byname(pdev, "irq");
> > +
> > + dev->mtu = 1500;
> > + dev->open = cpmac_open;
> > + dev->stop = cpmac_stop;
> > + dev->set_config = cpmac_config;
> > + dev->hard_start_xmit = cpmac_start_xmit;
> > + dev->do_ioctl = cpmac_ioctl;
> > + dev->get_stats = cpmac_stats;
> > + dev->change_mtu = cpmac_change_mtu;
> > + dev->set_mac_address = cpmac_set_mac_address;
> > + dev->set_multicast_list = cpmac_set_multicast_list;
> > + dev->tx_timeout = cpmac_tx_timeout;
> > + dev->ethtool_ops = &cpmac_ethtool_ops;
> > + if (!disable_napi) {
> > + dev->poll = cpmac_poll;
> > + dev->weight = min(rx_ring_size, 64);
> > + }
> > +
> > + memset(priv, 0, sizeof(struct cpmac_priv));
>
> I think alloc_etherdev() already did that?
What? zeroing the memory or other stuff?
> > + spin_lock_init(&priv->lock);
> > + priv->msg_enable = netif_msg_init(NETIF_MSG_WOL, 0x3fff);
> > + priv->config = pdata;
> > + priv->dev = dev;
> > + memcpy(dev->dev_addr, priv->config->dev_addr, sizeof(dev->dev_addr));
> > + if (phy_id == 31) {
> > + snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT,
> > + cpmac_mii.id, phy_id);
> > + } else {
> > + snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1);
> > + }
> > +
> > + if ((rc = register_netdev(dev))) {
> > + printk("cpmac: error %i registering device %s\n",
> > + rc, dev->name);
> > + goto fail;
> > + }
> > +
> > + printk("cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: ",
> > + dev->name, (u32 *)dev->mem_start, dev->irq,
> > + priv->phy_name);
> > + for (i = 0; i < 6; i++)
> > + printk("%02x%s", dev->dev_addr[i], i < 5 ? ":" : ")\n");
> > +
> > + return 0;
> > +
> > +fail:
> > + free_netdev(dev);
> > + return rc;
> > +}
> > +
What about this?
Thanks for Your attention,
Matteo Croce
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH][MIPS][7/7] AR7: ethernet
2007-09-06 23:21 ` Matteo Croce
@ 2007-09-07 0:41 ` Andrew Morton
2007-09-07 23:04 ` Jeff Garzik
1 sibling, 0 replies; 13+ messages in thread
From: Andrew Morton @ 2007-09-07 0:41 UTC (permalink / raw)
To: Matteo Croce
Cc: linux-mips, ejka, jgarzik, netdev, davem, kuznet, pekkas, jmorris,
yoshfuji, kaber
> On Fri, 7 Sep 2007 01:21:41 +0200 Matteo Croce <technoboy85@gmail.com> wrote:
> > The patch introduces vast number of volatile structure fields. Please see
> > Documentation/volatile-considered-harmful.txt.
>
> Removing them and the kernel hangs at module load
They can't just be removed. Please see the document. There are I/O APIs
which, if properly used, make volatile unneeded.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH][MIPS][7/7] AR7: ethernet
2007-09-06 22:30 ` Andrew Morton
2007-09-06 23:04 ` Randy Dunlap
2007-09-06 23:21 ` Matteo Croce
@ 2007-09-07 7:10 ` Geert Uytterhoeven
2 siblings, 0 replies; 13+ messages in thread
From: Geert Uytterhoeven @ 2007-09-07 7:10 UTC (permalink / raw)
To: Andrew Morton
Cc: Matteo Croce, linux-mips, ejka, jgarzik, netdev, davem, kuznet,
pekkas, jmorris, yoshfuji, kaber
On Thu, 6 Sep 2007, Andrew Morton wrote:
> > On Thu, 6 Sep 2007 17:34:10 +0200 Matteo Croce <technoboy85@gmail.com> wrote:
> > Driver for the cpmac 100M ethernet driver.
> > It works fine disabling napi support, enabling it gives a kernel panic
> > when the first IPv6 packet has to be forwarded.
> > Other than that works fine.
>
> The driver does a lot of open-coded dma_cache_inv() calls (in a way which
> assumes a 32-bit bus, too). I assume that dma_cache_inv() is some mips
No, even i386 has it ;-)
> thing. I'd have thought that it would be better to use the dma mapping API
> thoughout the driver, and its associated dma invalidation APIs.
However, Ralf just posted a patch to remove it on all architectures, and
driver writers should consider it gone.
Gr{oetje,eeting}s,
Geert
--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org
In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH][MIPS][7/7] AR7: ethernet
2007-09-06 23:21 ` Matteo Croce
2007-09-07 0:41 ` Andrew Morton
@ 2007-09-07 23:04 ` Jeff Garzik
1 sibling, 0 replies; 13+ messages in thread
From: Jeff Garzik @ 2007-09-07 23:04 UTC (permalink / raw)
To: Matteo Croce
Cc: Andrew Morton, linux-mips, ejka, netdev, davem, kuznet, pekkas,
jmorris, yoshfuji, kaber
Matteo Croce wrote:
> Il Friday 07 September 2007 00:30:25 Andrew Morton ha scritto:
>>> On Thu, 6 Sep 2007 17:34:10 +0200 Matteo Croce <technoboy85@gmail.com> wrote:
>>> Driver for the cpmac 100M ethernet driver.
>>> It works fine disabling napi support, enabling it gives a kernel panic
>>> when the first IPv6 packet has to be forwarded.
>>> Other than that works fine.
>>>
>> I'm not too sure why I got cc'ed on this (and not on patches 1-6?) but
>> whatever.
>
> I mailed every maintainer in the respective section in the file MAINTAINERS
> and you were in the "NETWORK DEVICE DRIVERS" section
>
>> This patch introduces quite a number of basic coding-style mistakes.
>> Please run it through scripts/checkpatch.pl and review the output.
>
> Already done. I'm collecting other suggestions before committing
cool, I'll wait for the resend before reviewing, then.
As an author I understand that fixing up coding style / cosmetic stuff
rather than "meat" is annoying.
But it is important to emphasize that a "clean" driver is what makes a
good, thorough, effective review possible.
Jeff
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH][MIPS][7/7] AR7: ethernet
[not found] <200709080143.12345.technoboy85@gmail.com>
@ 2007-09-08 0:23 ` Matteo Croce
2007-09-12 16:50 ` Ralf Baechle
0 siblings, 1 reply; 13+ messages in thread
From: Matteo Croce @ 2007-09-08 0:23 UTC (permalink / raw)
To: linux-mips
Cc: Eugene Konev, netdev, davem, kuznet, pekkas, jmorris, yoshfuji,
kaber, openwrt-devel, Andrew Morton, Jeff Garzik
Driver for the cpmac 100M ethernet driver.
It works fine disabling napi support, enabling it gives a kernel panic
when the first IPv6 packet has to be forwarded.
Other than that works fine.
Signed-off-by: Matteo Croce <technoboy85@gmail.com>
Signed-off-by: Eugene Konev <ejka@imfi.kspu.ru>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d9b7d9c..6f38a84 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1822,6 +1822,15 @@ config SC92031
To compile this driver as a module, choose M here: the module
will be called sc92031. This is recommended.
+config CPMAC
+ tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
+ depends on NET_ETHERNET && EXPERIMENTAL && AR7
+ select PHYLIB
+ select FIXED_PHY
+ select FIXED_MII_100_FDX
+ help
+ TI AR7 CPMAC Ethernet support
+
config NET_POCKET
bool "Pocket and portable adapters"
depends on PARPORT
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 535d2a0..bb22df9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -156,6 +156,7 @@ obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ZNET) += znet.o
obj-$(CONFIG_LAN_SAA9730) += saa9730.o
+obj-$(CONFIG_CPMAC) += cpmac.o
obj-$(CONFIG_DEPCA) += depca.o
obj-$(CONFIG_EWRK3) += ewrk3.o
obj-$(CONFIG_ATP) += atp.o
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
new file mode 100644
index 0000000..c10ab08
--- /dev/null
+++ b/drivers/net/cpmac.c
@@ -0,0 +1,1194 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <asm/ar7/ar7.h>
+#include <gpio.h>
+
+MODULE_AUTHOR("Eugene Konev");
+MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
+MODULE_LICENSE("GPL");
+
+static int rx_ring_size = 64;
+static int disable_napi;
+module_param(rx_ring_size, int, 64);
+module_param(disable_napi, int, 0);
+MODULE_PARM_DESC(rx_ring_size, "Size of rx ring (in skbs)");
+MODULE_PARM_DESC(disable_napi, "Disable NAPI polling");
+
+/* Register definitions */
+struct cpmac_control_regs {
+ u32 revision;
+ u32 control;
+ u32 teardown;
+ u32 unused;
+} __attribute__ ((packed));
+
+struct cpmac_int_regs {
+ u32 stat_raw;
+ u32 stat_masked;
+ u32 enable;
+ u32 clear;
+} __attribute__ ((packed));
+
+struct cpmac_stats {
+ u32 good;
+ u32 bcast;
+ u32 mcast;
+ u32 pause;
+ u32 crc_error;
+ u32 align_error;
+ u32 oversized;
+ u32 jabber;
+ u32 undersized;
+ u32 fragment;
+ u32 filtered;
+ u32 qos_filtered;
+ u32 octets;
+} __attribute__ ((packed));
+
+struct cpmac_regs {
+ struct cpmac_control_regs tx_ctrl;
+ struct cpmac_control_regs rx_ctrl;
+ u32 unused1[56];
+ u32 mbp;
+/* MBP bits */
+#define MBP_RXPASSCRC 0x40000000
+#define MBP_RXQOS 0x20000000
+#define MBP_RXNOCHAIN 0x10000000
+#define MBP_RXCMF 0x01000000
+#define MBP_RXSHORT 0x00800000
+#define MBP_RXCEF 0x00400000
+#define MBP_RXPROMISC 0x00200000
+#define MBP_PROMISCCHAN(chan) (((chan) & 0x7) << 16)
+#define MBP_RXBCAST 0x00002000
+#define MBP_BCASTCHAN(chan) (((chan) & 0x7) << 8)
+#define MBP_RXMCAST 0x00000020
+#define MBP_MCASTCHAN(chan) ((chan) & 0x7)
+ u32 unicast_enable;
+ u32 unicast_clear;
+ u32 max_len;
+ u32 buffer_offset;
+ u32 filter_flow_threshold;
+ u32 unused2[2];
+ u32 flow_thre[8];
+ u32 free_buffer[8];
+ u32 mac_control;
+#define MAC_TXPTYPE 0x00000200
+#define MAC_TXPACE 0x00000040
+#define MAC_MII 0x00000020
+#define MAC_TXFLOW 0x00000010
+#define MAC_RXFLOW 0x00000008
+#define MAC_MTEST 0x00000004
+#define MAC_LOOPBACK 0x00000002
+#define MAC_FDX 0x00000001
+ u32 mac_status;
+#define MACST_QOS 0x4
+#define MACST_RXFLOW 0x2
+#define MACST_TXFLOW 0x1
+ u32 emc_control;
+ u32 unused3;
+ struct cpmac_int_regs tx_int;
+ u32 mac_int_vector;
+/* Int Status bits */
+#define INTST_STATUS 0x80000
+#define INTST_HOST 0x40000
+#define INTST_RX 0x20000
+#define INTST_TX 0x10000
+ u32 mac_eoi_vector;
+ u32 unused4[2];
+ struct cpmac_int_regs rx_int;
+ u32 mac_int_stat_raw;
+ u32 mac_int_stat_masked;
+ u32 mac_int_enable;
+ u32 mac_int_clear;
+ u32 mac_addr_low[8];
+ u32 mac_addr_mid;
+ u32 mac_addr_high;
+ u32 mac_hash_low;
+ u32 mac_hash_high;
+ u32 boff_test;
+ u32 pac_test;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 unused5[2];
+ struct cpmac_stats rx_stats;
+ struct cpmac_stats tx_stats;
+ u32 unused6[232];
+ u32 tx_ptr[8];
+ u32 rx_ptr[8];
+ u32 tx_ack[8];
+ u32 rx_ack[8];
+
+} __attribute__ ((packed));
+
+struct cpmac_mdio_regs {
+ u32 version;
+ u32 control;
+#define MDIOC_IDLE 0x80000000
+#define MDIOC_ENABLE 0x40000000
+#define MDIOC_PREAMBLE 0x00100000
+#define MDIOC_FAULT 0x00080000
+#define MDIOC_FAULTDETECT 0x00040000
+#define MDIOC_INTTEST 0x00020000
+#define MDIOC_CLKDIV(div) ((div) & 0xff)
+ u32 alive;
+ u32 link;
+ struct cpmac_int_regs link_int;
+ struct cpmac_int_regs user_int;
+ u32 unused[20];
+ volatile u32 access;
+#define MDIO_BUSY 0x80000000
+#define MDIO_WRITE 0x40000000
+#define MDIO_REG(reg) (((reg) & 0x1f) << 21)
+#define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
+#define MDIO_DATA(data) ((data) & 0xffff)
+ u32 physel;
+} __attribute__ ((packed));
+
+/* Descriptor */
+struct cpmac_desc {
+ u32 hw_next;
+ u32 hw_data;
+ u16 buflen;
+ u16 bufflags;
+ u16 datalen;
+ u16 dataflags;
+/* Flags bits */
+#define CPMAC_SOP 0x8000
+#define CPMAC_EOP 0x4000
+#define CPMAC_OWN 0x2000
+#define CPMAC_EOQ 0x1000
+ struct sk_buff *skb;
+ struct cpmac_desc *next;
+} __attribute__ ((packed));
+
+struct cpmac_priv {
+ struct net_device_stats stats;
+ spinlock_t lock; /* irq{save,restore} */
+ struct sk_buff *skb_pool;
+ int free_skbs;
+ struct cpmac_desc *rx_head;
+ int tx_head, tx_tail;
+ struct cpmac_desc *desc_ring;
+ struct cpmac_regs *regs;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy;
+ char phy_name[BUS_ID_SIZE];
+ struct plat_cpmac_data *config;
+ int oldlink, oldspeed, oldduplex;
+ u32 msg_enable;
+ struct net_device *dev;
+ struct work_struct alloc_work;
+};
+
+static irqreturn_t cpmac_irq(int, void *);
+static void cpmac_reset(struct net_device *dev);
+static void cpmac_hw_init(struct net_device *dev);
+static int cpmac_stop(struct net_device *dev);
+static int cpmac_open(struct net_device *dev);
+
+#undef CPMAC_DEBUG
+#define CPMAC_LOW_THRESH 32
+#define CPMAC_ALLOC_SIZE 64
+#define CPMAC_SKB_SIZE 1518
+#define CPMAC_TX_RING_SIZE 8
+
+#ifdef CPMAC_DEBUG
+static void cpmac_dump_regs(u32 *base, int count)
+{
+ int i;
+ for (i = 0; i < (count + 3) / 4; i++) {
+ if (i % 4 == 0) printk(KERN_DEBUG "\nCPMAC[0x%04x]:", i * 4);
+ printk(KERN_DEBUG " 0x%08x", *(base + i));
+ }
+ printk(KERN_DEBUG "\n");
+}
+
+static const char *cpmac_dump_buf(const uint8_t *buf, unsigned size)
+{
+ static char buffer[3 * 25 + 1];
+ char *p = &buffer[0];
+ if (size > 20)
+ size = 20;
+ while (size-- > 0)
+ p += sprintf(p, " %02x", *buf++);
+ return buffer;
+}
+#endif
+
+static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct cpmac_mdio_regs *regs = bus->priv;
+ u32 val;
+
+ while ((val = regs->access) & MDIO_BUSY);
+ regs->access = MDIO_BUSY | MDIO_REG(regnum & 0x1f) |
+ MDIO_PHY(phy_id & 0x1f);
+ while ((val = regs->access) & MDIO_BUSY);
+
+ return val & 0xffff;
+}
+
+static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
+ int regnum, u16 val)
+{
+ struct cpmac_mdio_regs *regs = bus->priv;
+
+ while (regs->access & MDIO_BUSY);
+ regs->access = MDIO_BUSY | MDIO_WRITE |
+ MDIO_REG(regnum & 0x1f) | MDIO_PHY(phy_id & 0x1f) | val;
+
+ return 0;
+}
+
+static int cpmac_mdio_reset(struct mii_bus *bus)
+{
+ ar7_device_reset(AR7_RESET_BIT_MDIO);
+ ((struct cpmac_mdio_regs *)bus->priv)->control = MDIOC_ENABLE |
+ MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1);
+
+ return 0;
+}
+
+static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
+
+static struct mii_bus cpmac_mii = {
+ .name = "cpmac-mii",
+ .read = cpmac_mdio_read,
+ .write = cpmac_mdio_write,
+ .reset = cpmac_mdio_reset,
+ .irq = mii_irqs,
+};
+
+static int cpmac_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr)
+ return -EOPNOTSUPP;
+
+ /* ignore other fields */
+ return 0;
+}
+
+static int cpmac_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+static void cpmac_set_multicast_list(struct net_device *dev)
+{
+ struct dev_mc_list *iter;
+ int i;
+ int hash, tmp;
+ int hashlo = 0, hashhi = 0;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ priv->regs->mbp &= ~MBP_PROMISCCHAN(0); /* promisc channel 0 */
+ priv->regs->mbp |= MBP_RXPROMISC;
+ } else {
+ priv->regs->mbp &= ~MBP_RXPROMISC;
+ if (dev->flags & IFF_ALLMULTI) {
+ /* enable all multicast mode */
+ priv->regs->mac_hash_low = 0xffffffff;
+ priv->regs->mac_hash_high = 0xffffffff;
+ } else {
+ for (i = 0, iter = dev->mc_list; i < dev->mc_count;
+ i++, iter = iter->next) {
+ hash = 0;
+ tmp = iter->dmi_addr[0];
+ hash ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = iter->dmi_addr[1];
+ hash ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = iter->dmi_addr[2];
+ hash ^= (tmp >> 6) ^ tmp;
+ tmp = iter->dmi_addr[4];
+ hash ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = iter->dmi_addr[5];
+ hash ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = iter->dmi_addr[6];
+ hash ^= (tmp >> 6) ^ tmp;
+ hash &= 0x3f;
+ if (hash < 32) {
+ hashlo |= 1<<hash;
+ } else {
+ hashhi |= 1<<(hash - 32);
+ }
+ }
+
+ priv->regs->mac_hash_low = hashlo;
+ priv->regs->mac_hash_high = hashhi;
+ }
+ }
+}
+
+static struct sk_buff *cpmac_get_skb(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ skb = priv->skb_pool;
+ if (likely(skb))
+ priv->skb_pool = skb->next;
+ else {
+ skb = dev_alloc_skb(CPMAC_SKB_SIZE + 2);
+ if (skb) {
+ skb->next = NULL;
+ skb_reserve(skb, 2);
+ skb->dev = priv->dev;
+ }
+ }
+
+ if (likely(priv->free_skbs))
+ priv->free_skbs--;
+
+ if (priv->free_skbs < CPMAC_LOW_THRESH)
+ schedule_work(&priv->alloc_work);
+
+ return skb;
+}
+
+static struct sk_buff *cpmac_rx_one(struct net_device *dev,
+ struct cpmac_priv *priv,
+ struct cpmac_desc *desc)
+{
+ unsigned long flags;
+ char *data;
+ struct sk_buff *skb, *result = NULL;
+
+ priv->regs->rx_ack[0] = virt_to_phys(desc);
+ if (unlikely(!desc->datalen)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "%s: rx: spurious interrupt\n",
+ dev->name);
+ priv->stats.rx_errors++;
+ return NULL;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ skb = cpmac_get_skb(dev);
+ if (likely(skb)) {
+ data = (char *)phys_to_virt(desc->hw_data);
+ dma_cache_inv((u32)data, desc->datalen);
+ skb_put(desc->skb, desc->datalen);
+ desc->skb->protocol = eth_type_trans(desc->skb, dev);
+ desc->skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += desc->datalen;
+ result = desc->skb;
+ desc->skb = skb;
+ } else {
+#ifdef CPMAC_DEBUG
+ if (printk_ratelimit())
+ printk(KERN_NOTICE "%s: low on skbs, dropping packet\n",
+ dev->name);
+#endif
+ priv->stats.rx_dropped++;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ desc->hw_data = virt_to_phys(desc->skb->data);
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+ dma_cache_wback((u32)desc, 16);
+
+ return result;
+}
+
+static void cpmac_rx(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ if (unlikely(!priv->rx_head)) {
+ spin_unlock(&priv->lock);
+ return;
+ }
+
+ desc = priv->rx_head;
+ dma_cache_inv((u32)desc, 16);
+#ifdef CPMAC_DEBUG
+ printk(KERN_DEBUG "%s: len=%d, %s\n", __func__, pkt->datalen,
+ cpmac_dump_buf(data, pkt->datalen));
+#endif
+
+ while ((desc->dataflags & CPMAC_OWN) == 0) {
+ skb = cpmac_rx_one(dev, priv, desc);
+ if (likely(skb))
+ netif_rx(skb);
+ desc = desc->next;
+ dma_cache_inv((u32)desc, 16);
+ }
+
+ priv->rx_head = desc;
+ priv->regs->rx_ptr[0] = virt_to_phys(desc);
+ spin_unlock(&priv->lock);
+}
+
+static int cpmac_poll(struct net_device *dev, int *budget)
+{
+ struct sk_buff *skb;
+ struct cpmac_desc *desc;
+ int received = 0, quota = min(dev->quota, *budget);
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!priv->rx_head)) {
+ if (printk_ratelimit())
+ printk(KERN_NOTICE "%s: rx: polling, but no queue\n",
+ dev->name);
+ netif_rx_complete(dev);
+ return 0;
+ }
+
+ desc = priv->rx_head;
+ dma_cache_inv((u32)desc, 16);
+
+ while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) {
+ skb = cpmac_rx_one(dev, priv, desc);
+ if (likely(skb)) {
+ netif_receive_skb(skb);
+ received++;
+ }
+ desc = desc->next;
+ priv->rx_head = desc;
+ dma_cache_inv((u32)desc, 16);
+ }
+
+ *budget -= received;
+ dev->quota -= received;
+#ifdef CPMAC_DEBUG
+ printk(KERN_DEBUG "%s: processed %d packets\n", dev->name, received);
+#endif
+ if (desc->dataflags & CPMAC_OWN) {
+ priv->regs->rx_ptr[0] = virt_to_phys(desc);
+ netif_rx_complete(dev);
+ priv->regs->rx_int.enable = 0x1;
+ priv->regs->rx_int.clear = 0xfe;
+ return 0;
+ }
+
+ return 1;
+}
+
+static void
+cpmac_alloc_skbs(struct work_struct *work)
+{
+ struct cpmac_priv *priv = container_of(work, struct cpmac_priv,
+ alloc_work);
+ unsigned long flags;
+ int i, num_skbs = 0;
+ struct sk_buff *skb, *skbs = NULL;
+
+ for (i = 0; i < CPMAC_ALLOC_SIZE; i++) {
+ skb = alloc_skb(CPMAC_SKB_SIZE + 2, GFP_KERNEL);
+ if (!skb)
+ break;
+ skb->next = skbs;
+ skb_reserve(skb, 2);
+ skb->dev = priv->dev;
+ num_skbs++;
+ skbs = skb;
+ }
+
+ if (skbs) {
+ spin_lock_irqsave(&priv->lock, flags);
+ for (skb = priv->skb_pool; skb && skb->next; skb = skb->next);
+ if (!skb)
+ priv->skb_pool = skbs;
+ else
+ skb->next = skbs;
+ priv->free_skbs += num_skbs;
+ spin_unlock_irqrestore(&priv->lock, flags);
+#ifdef CPMAC_DEBUG
+ printk(KERN_DEBUG "%s: allocated %d skbs\n",
+ priv->dev->name, num_skbs);
+#endif
+ }
+}
+
+static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ int len, chan;
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ len = skb->len;
+#ifdef CPMAC_DEBUG
+ printk(KERN_DEBUG "%s: len=%d\n", __func__, len);
+ /* cpmac_dump_buf(const uint8_t * buf, unsigned size) */
+#endif
+ if (unlikely(len < ETH_ZLEN)) {
+ if (unlikely(skb_padto(skb, ETH_ZLEN))) {
+ if (printk_ratelimit())
+ printk(KERN_NOTICE
+ "%s: padding failed, dropping\n",
+ dev->name);
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->stats.tx_dropped++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -ENOMEM;
+ }
+ len = ETH_ZLEN;
+ }
+ spin_lock_irqsave(&priv->lock, flags);
+ chan = priv->tx_tail++;
+ priv->tx_tail %= 8;
+ if (priv->tx_tail == priv->tx_head)
+ netif_stop_queue(dev);
+
+ desc = &priv->desc_ring[chan];
+ dma_cache_inv((u32)desc, 16);
+ if (desc->dataflags & CPMAC_OWN) {
+ printk(KERN_NOTICE "%s: tx dma ring full, dropping\n",
+ dev->name);
+ priv->stats.tx_dropped++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -ENOMEM;
+ }
+
+ dev->trans_start = jiffies;
+ desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
+ desc->skb = skb;
+ desc->hw_data = virt_to_phys(skb->data);
+ dma_cache_wback((u32)skb->data, len);
+ desc->buflen = len;
+ desc->datalen = len;
+ desc->hw_next = 0;
+ dma_cache_wback((u32)desc, 16);
+ priv->regs->tx_ptr[chan] = virt_to_phys(desc);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void cpmac_end_xmit(struct net_device *dev, int channel)
+{
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ desc = &priv->desc_ring[channel];
+ priv->regs->tx_ack[channel] = virt_to_phys(desc);
+ if (likely(desc->skb)) {
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += desc->skb->len;
+ dev_kfree_skb_irq(desc->skb);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ } else
+ if (printk_ratelimit())
+ printk(KERN_NOTICE "%s: end_xmit: spurious interrupt\n",
+ dev->name);
+ spin_unlock(&priv->lock);
+}
+
+static void cpmac_reset(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ ar7_device_reset(priv->config->reset_bit);
+ priv->regs->rx_ctrl.control &= ~1;
+ priv->regs->tx_ctrl.control &= ~1;
+ for (i = 0; i < 8; i++) {
+ priv->regs->tx_ptr[i] = 0;
+ priv->regs->rx_ptr[i] = 0;
+ }
+ priv->regs->mac_control &= ~MAC_MII; /* disable mii */
+}
+
+static inline void cpmac_free_rx_ring(struct net_device *dev)
+{
+ struct cpmac_desc *desc;
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!priv->rx_head))
+ return;
+
+ desc = priv->rx_head;
+ dma_cache_inv((u32)desc, 16);
+
+ for (i = 0; i < rx_ring_size; i++) {
+ desc->buflen = CPMAC_SKB_SIZE;
+ if ((desc->dataflags & CPMAC_OWN) == 0) {
+ desc->dataflags = CPMAC_OWN;
+ priv->stats.rx_dropped++;
+ }
+ dma_cache_wback((u32)desc, 16);
+ desc = desc->next;
+ dma_cache_inv((u32)desc, 16);
+ }
+}
+
+static irqreturn_t cpmac_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ u32 status;
+
+ if (!dev)
+ return IRQ_NONE;
+
+ status = priv->regs->mac_int_vector;
+
+ if (status & INTST_TX)
+ cpmac_end_xmit(dev, (status & 7));
+
+ if (status & INTST_RX) {
+ if (disable_napi)
+ cpmac_rx(dev);
+ else {
+ priv->regs->rx_int.enable = 0;
+ priv->regs->rx_int.clear = 0xff;
+ netif_rx_schedule(dev);
+ }
+ }
+
+ priv->regs->mac_eoi_vector = 0;
+
+ if (unlikely(status & (INTST_HOST | INTST_STATUS))) {
+ if (printk_ratelimit())
+ printk(KERN_ERR "%s: hw error, resetting...\n",
+ dev->name);
+ spin_lock(&priv->lock);
+ phy_stop(priv->phy);
+ cpmac_reset(dev);
+ cpmac_free_rx_ring(dev);
+ cpmac_hw_init(dev);
+ spin_unlock(&priv->lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cpmac_tx_timeout(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *desc;
+
+ priv->stats.tx_errors++;
+ desc = &priv->desc_ring[priv->tx_head++];
+ priv->tx_head %= 8;
+ printk(KERN_NOTICE "%s: transmit timeout\n", dev->name);
+ if (desc->skb)
+ dev_kfree_skb(desc->skb);
+ netif_wake_queue(dev);
+}
+
+static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ if (!(netif_running(dev)))
+ return -EINVAL;
+ if (!priv->phy)
+ return -EINVAL;
+ if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) ||
+ (cmd == SIOCSMIIREG))
+ return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd);
+
+ return -EINVAL;
+}
+
+static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (priv->phy)
+ return phy_ethtool_gset(priv->phy, cmd);
+
+ return -EINVAL;
+}
+
+static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (priv->phy)
+ return phy_ethtool_sset(priv->phy, cmd);
+
+ return -EINVAL;
+}
+
+static void cpmac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "cpmac");
+ strcpy(info->version, "0.0.3");
+ info->fw_version[0] = '\0';
+ sprintf(info->bus_info, "%s", "cpmac");
+ info->regdump_len = 0;
+}
+
+static const struct ethtool_ops cpmac_ethtool_ops = {
+ .get_settings = cpmac_get_settings,
+ .set_settings = cpmac_set_settings,
+ .get_drvinfo = cpmac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static struct net_device_stats *cpmac_stats(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (netif_device_present(dev))
+ return &priv->stats;
+
+ return NULL;
+}
+
+static int cpmac_change_mtu(struct net_device *dev, int mtu)
+{
+ unsigned long flags;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ spinlock_t *lock = &priv->lock;
+
+ if ((mtu < 68) || (mtu > 1500))
+ return -EINVAL;
+
+ spin_lock_irqsave(lock, flags);
+ dev->mtu = mtu;
+ spin_unlock_irqrestore(lock, flags);
+
+ return 0;
+}
+
+static void cpmac_adjust_link(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ int new_state = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->phy->link) {
+ if (priv->phy->duplex != priv->oldduplex) {
+ new_state = 1;
+ priv->oldduplex = priv->phy->duplex;
+ }
+
+ if (priv->phy->speed != priv->oldspeed) {
+ new_state = 1;
+ priv->oldspeed = priv->phy->speed;
+ }
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ netif_schedule(dev);
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state)
+ phy_print_status(priv->phy);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void cpmac_hw_init(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ for (i = 0; i < 8; i++)
+ priv->regs->tx_ptr[i] = 0;
+ priv->regs->rx_ptr[0] = virt_to_phys(priv->rx_head);
+
+ priv->regs->mbp = MBP_RXSHORT | MBP_RXBCAST | MBP_RXMCAST;
+ priv->regs->unicast_enable = 0x1;
+ priv->regs->unicast_clear = 0xfe;
+ priv->regs->buffer_offset = 0;
+ for (i = 0; i < 8; i++)
+ priv->regs->mac_addr_low[i] = dev->dev_addr[5];
+ priv->regs->mac_addr_mid = dev->dev_addr[4];
+ priv->regs->mac_addr_high = dev->dev_addr[0] | (dev->dev_addr[1] << 8)
+ | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
+ priv->regs->max_len = CPMAC_SKB_SIZE;
+ priv->regs->rx_int.enable = 0x1;
+ priv->regs->rx_int.clear = 0xfe;
+ priv->regs->tx_int.enable = 0xff;
+ priv->regs->tx_int.clear = 0;
+ priv->regs->mac_int_enable = 3;
+ priv->regs->mac_int_clear = 0xfc;
+
+ priv->regs->rx_ctrl.control |= 1;
+ priv->regs->tx_ctrl.control |= 1;
+ priv->regs->mac_control |= MAC_MII | MAC_FDX;
+
+ priv->phy->state = PHY_CHANGELINK;
+ phy_start(priv->phy);
+}
+
+static int cpmac_open(struct net_device *dev)
+{
+ int i, size, res;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *desc;
+ struct sk_buff *skb;
+
+ priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link,
+ 0, PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(priv->phy)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(priv->phy);
+ }
+
+ if (!request_mem_region(dev->mem_start, dev->mem_end -
+ dev->mem_start, dev->name)) {
+ printk(KERN_ERR "%s: failed to request registers\n",
+ dev->name);
+ res = -ENXIO;
+ goto fail_reserve;
+ }
+
+ priv->regs = ioremap_nocache(dev->mem_start, dev->mem_end -
+ dev->mem_start);
+ if (!priv->regs) {
+ printk(KERN_ERR "%s: failed to remap registers\n", dev->name);
+ res = -ENXIO;
+ goto fail_remap;
+ }
+
+ priv->rx_head = NULL;
+ size = sizeof(struct cpmac_desc) * (rx_ring_size +
+ CPMAC_TX_RING_SIZE);
+ priv->desc_ring = (struct cpmac_desc *)kmalloc(size, GFP_KERNEL);
+ if (!priv->desc_ring) {
+ res = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ memset((char *)priv->desc_ring, 0, size);
+
+ priv->skb_pool = NULL;
+ priv->free_skbs = 0;
+ priv->rx_head = &priv->desc_ring[CPMAC_TX_RING_SIZE];
+
+ INIT_WORK(&priv->alloc_work, cpmac_alloc_skbs);
+ schedule_work(&priv->alloc_work);
+ flush_scheduled_work();
+
+ for (i = 0; i < rx_ring_size; i++) {
+ desc = &priv->rx_head[i];
+ skb = cpmac_get_skb(dev);
+ if (!skb) {
+ res = -ENOMEM;
+ goto fail_desc;
+ }
+ desc->skb = skb;
+ desc->hw_data = virt_to_phys(skb->data);
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+ desc->next = &priv->rx_head[(i + 1) % rx_ring_size];
+ desc->hw_next = virt_to_phys(desc->next);
+ dma_cache_wback((u32)desc, 16);
+ }
+
+ if ((res = request_irq(dev->irq, cpmac_irq, SA_INTERRUPT,
+ dev->name, dev))) {
+ printk(KERN_ERR "%s: failed to obtain irq\n", dev->name);
+ goto fail_irq;
+ }
+
+ cpmac_reset(dev);
+ cpmac_hw_init(dev);
+
+ netif_start_queue(dev);
+ return 0;
+
+fail_irq:
+fail_desc:
+ for (i = 0; i < rx_ring_size; i++)
+ if (priv->rx_head[i].skb)
+ kfree_skb(priv->rx_head[i].skb);
+fail_alloc:
+ kfree(priv->desc_ring);
+
+ for (skb = priv->skb_pool; skb; skb = priv->skb_pool) {
+ priv->skb_pool = skb->next;
+ kfree_skb(skb);
+ }
+
+ iounmap(priv->regs);
+
+fail_remap:
+ release_mem_region(dev->mem_start, dev->mem_end -
+ dev->mem_start);
+
+fail_reserve:
+ phy_disconnect(priv->phy);
+
+ return res;
+}
+
+static int cpmac_stop(struct net_device *dev)
+{
+ int i;
+ struct sk_buff *skb;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ phy_stop(priv->phy);
+ phy_disconnect(priv->phy);
+ priv->phy = NULL;
+
+ cpmac_reset(dev);
+
+ for (i = 0; i < 8; i++) {
+ priv->regs->rx_ptr[i] = 0;
+ priv->regs->tx_ptr[i] = 0;
+ priv->regs->mbp = 0;
+ }
+
+ free_irq(dev->irq, dev);
+ release_mem_region(dev->mem_start, dev->mem_end -
+ dev->mem_start);
+
+ cancel_delayed_work(&priv->alloc_work);
+ flush_scheduled_work();
+
+ priv->rx_head = &priv->desc_ring[CPMAC_TX_RING_SIZE];
+ for (i = 0; i < rx_ring_size; i++)
+ if (priv->rx_head[i].skb)
+ kfree_skb(priv->rx_head[i].skb);
+
+ kfree(priv->desc_ring);
+
+ for (skb = priv->skb_pool; skb; skb = priv->skb_pool) {
+ priv->skb_pool = skb->next;
+ kfree_skb(skb);
+ }
+
+ return 0;
+}
+
+static int external_switch;
+
+static int __devinit cpmac_probe(struct platform_device *pdev)
+{
+ int i, rc, phy_id;
+ struct resource *res;
+ struct cpmac_priv *priv;
+ struct net_device *dev;
+ struct plat_cpmac_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+
+ for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+ if (!(pdata->phy_mask & (1 << phy_id)))
+ continue;
+ if (!cpmac_mii.phy_map[phy_id])
+ continue;
+ break;
+ }
+
+ if (phy_id == PHY_MAX_ADDR) {
+ if (external_switch)
+ phy_id = 0;
+ else {
+ printk(KERN_ERR "cpmac: no PHY present\n");
+ return -ENODEV;
+ }
+ }
+
+ dev = alloc_etherdev(sizeof(struct cpmac_priv));
+
+ if (!dev) {
+ printk(KERN_ERR
+ "cpmac: Unable to allocate net_device structure!\n");
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ platform_set_drvdata(pdev, dev);
+ priv = netdev_priv(dev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res) {
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ dev->mem_start = res->start;
+ dev->mem_end = res->end;
+ dev->irq = platform_get_irq_byname(pdev, "irq");
+
+ dev->mtu = 1500;
+ dev->open = cpmac_open;
+ dev->stop = cpmac_stop;
+ dev->set_config = cpmac_config;
+ dev->hard_start_xmit = cpmac_start_xmit;
+ dev->do_ioctl = cpmac_ioctl;
+ dev->get_stats = cpmac_stats;
+ dev->change_mtu = cpmac_change_mtu;
+ dev->set_mac_address = cpmac_set_mac_address;
+ dev->set_multicast_list = cpmac_set_multicast_list;
+ dev->tx_timeout = cpmac_tx_timeout;
+ dev->ethtool_ops = &cpmac_ethtool_ops;
+ if (!disable_napi) {
+ dev->poll = cpmac_poll;
+ dev->weight = min(rx_ring_size, 64);
+ }
+
+ memset(priv, 0, sizeof(struct cpmac_priv));
+ spin_lock_init(&priv->lock);
+ priv->msg_enable = netif_msg_init(NETIF_MSG_WOL, 0x3fff);
+ priv->config = pdata;
+ priv->dev = dev;
+ memcpy(dev->dev_addr, priv->config->dev_addr, sizeof(dev->dev_addr));
+ if (phy_id == 31)
+ snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT,
+ cpmac_mii.id, phy_id);
+ else
+ snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1);
+
+ if ((rc = register_netdev(dev))) {
+ printk(KERN_ERR "cpmac: error %i registering device %s\n",
+ rc, dev->name);
+ goto fail;
+ }
+
+ printk(KERN_INFO "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: ",
+ dev->name, (u32 *)dev->mem_start, dev->irq,
+ priv->phy_name);
+ for (i = 0; i < 6; i++)
+ printk("%02x%s", dev->dev_addr[i], i < 5 ? ":" : ")\n");
+
+ return 0;
+
+fail:
+ free_netdev(dev);
+ return rc;
+}
+
+static int __devexit cpmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver cpmac_driver = {
+ .driver.name = "cpmac",
+ .probe = cpmac_probe,
+ .remove = cpmac_remove,
+};
+
+int __devinit cpmac_init(void)
+{
+ u32 mask;
+ int i, res;
+ cpmac_mii.priv =
+ ioremap_nocache(AR7_REGS_MDIO, sizeof(struct cpmac_mdio_regs));
+
+ if (!cpmac_mii.priv) {
+ printk(KERN_ERR "Can't ioremap mdio registers\n");
+ return -ENXIO;
+ }
+
+#warning FIXME: unhardcode gpio&reset bits
+ ar7_gpio_disable(26);
+ ar7_gpio_disable(27);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
+ ar7_device_reset(AR7_RESET_BIT_EPHY);
+
+ cpmac_mii.reset(&cpmac_mii);
+
+ for (i = 0; i < 300000; i++) {
+ mask = ((struct cpmac_mdio_regs *)cpmac_mii.priv)->alive;
+ if (mask)
+ break;
+ }
+
+/* mask &= 0x7fffffff;
+ if (mask & (mask - 1)) {*/
+ external_switch = 1;
+ mask = 0;
+/* }*/
+
+ cpmac_mii.phy_mask = ~(mask | 0x80000000);
+
+ res = mdiobus_register(&cpmac_mii);
+ if (res)
+ goto fail_mii;
+
+ res = platform_driver_register(&cpmac_driver);
+ if (res)
+ goto fail_cpmac;
+
+ return 0;
+
+fail_cpmac:
+ mdiobus_unregister(&cpmac_mii);
+
+fail_mii:
+ iounmap(cpmac_mii.priv);
+
+ return res;
+}
+
+void __devexit cpmac_exit(void)
+{
+ platform_driver_unregister(&cpmac_driver);
+ mdiobus_unregister(&cpmac_mii);
+}
+
+module_init(cpmac_init);
+module_exit(cpmac_exit);
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH][MIPS][7/7] AR7: ethernet
2007-09-08 0:23 ` Matteo Croce
@ 2007-09-12 16:50 ` Ralf Baechle
2007-09-13 1:42 ` Thiemo Seufer
0 siblings, 1 reply; 13+ messages in thread
From: Ralf Baechle @ 2007-09-12 16:50 UTC (permalink / raw)
To: Matteo Croce
Cc: linux-mips, Eugene Konev, netdev, davem, kuznet, pekkas, jmorris,
yoshfuji, kaber, openwrt-devel, Andrew Morton, Jeff Garzik
On Sat, Sep 08, 2007 at 02:23:00AM +0200, Matteo Croce wrote:
> Driver for the cpmac 100M ethernet driver.
> It works fine disabling napi support, enabling it gives a kernel panic
> when the first IPv6 packet has to be forwarded.
> Other than that works fine.
>
> Signed-off-by: Matteo Croce <technoboy85@gmail.com>
> Signed-off-by: Eugene Konev <ejka@imfi.kspu.ru>
>
> diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
> index d9b7d9c..6f38a84 100644
> --- a/drivers/net/Kconfig
> +++ b/drivers/net/Kconfig
> @@ -1822,6 +1822,15 @@ config SC92031
> To compile this driver as a module, choose M here: the module
> will be called sc92031. This is recommended.
>
> +config CPMAC
> + tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
> + depends on NET_ETHERNET && EXPERIMENTAL && AR7
The dependency on NET_ETHERNET is not needed because this config block is
enclosed in a
if NET_ETHERNET
...
endif # NET_ETHERNET
block.
> + select PHYLIB
> + select FIXED_PHY
> + select FIXED_MII_100_FDX
> + help
> + TI AR7 CPMAC Ethernet support
> +
> config NET_POCKET
> bool "Pocket and portable adapters"
> depends on PARPORT
> diff --git a/drivers/net/Makefile b/drivers/net/Makefile
> index 535d2a0..bb22df9 100644
> --- a/drivers/net/Makefile
> +++ b/drivers/net/Makefile
> @@ -156,6 +156,7 @@ obj-$(CONFIG_8139CP) += 8139cp.o
> obj-$(CONFIG_8139TOO) += 8139too.o
> obj-$(CONFIG_ZNET) += znet.o
> obj-$(CONFIG_LAN_SAA9730) += saa9730.o
> +obj-$(CONFIG_CPMAC) += cpmac.o
> obj-$(CONFIG_DEPCA) += depca.o
> obj-$(CONFIG_EWRK3) += ewrk3.o
> obj-$(CONFIG_ATP) += atp.o
> diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
> new file mode 100644
> index 0000000..c10ab08
> --- /dev/null
> +++ b/drivers/net/cpmac.c
> @@ -0,0 +1,1194 @@
> +/*
> + * Copyright (C) 2006, 2007 Eugene Konev
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include <linux/module.h>
> +#include <linux/init.h>
> +#include <linux/moduleparam.h>
> +
> +#include <linux/sched.h>
> +#include <linux/kernel.h>
> +#include <linux/slab.h>
> +#include <linux/errno.h>
> +#include <linux/types.h>
> +#include <linux/delay.h>
> +#include <linux/version.h>
> +
> +#include <linux/netdevice.h>
> +#include <linux/etherdevice.h>
> +#include <linux/ethtool.h>
> +#include <linux/skbuff.h>
> +#include <linux/mii.h>
> +#include <linux/phy.h>
> +#include <linux/platform_device.h>
> +#include <asm/ar7/ar7.h>
> +#include <gpio.h>
> +
> +MODULE_AUTHOR("Eugene Konev");
> +MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
> +MODULE_LICENSE("GPL");
> +
> +static int rx_ring_size = 64;
> +static int disable_napi;
> +module_param(rx_ring_size, int, 64);
> +module_param(disable_napi, int, 0);
> +MODULE_PARM_DESC(rx_ring_size, "Size of rx ring (in skbs)");
> +MODULE_PARM_DESC(disable_napi, "Disable NAPI polling");
> +
> +/* Register definitions */
> +struct cpmac_control_regs {
> + u32 revision;
> + u32 control;
> + u32 teardown;
> + u32 unused;
> +} __attribute__ ((packed));
> +
> +struct cpmac_int_regs {
> + u32 stat_raw;
> + u32 stat_masked;
> + u32 enable;
> + u32 clear;
> +} __attribute__ ((packed));
> +
> +struct cpmac_stats {
> + u32 good;
> + u32 bcast;
> + u32 mcast;
> + u32 pause;
> + u32 crc_error;
> + u32 align_error;
> + u32 oversized;
> + u32 jabber;
> + u32 undersized;
> + u32 fragment;
> + u32 filtered;
> + u32 qos_filtered;
> + u32 octets;
> +} __attribute__ ((packed));
All struct members here are sized such that there is no padding needed, so
the packed attribute doesn't buy you anything - unless of course the
entire structure is missaligned but I don't see how that would be possible
in this driver so the __attribute__ ((packed)) should go - it result in
somwhat larger and slower code.
In any case, the __packed attribute is prefered over __attribute__ ((packed))
for readability sake.
> +
> +struct cpmac_regs {
> + struct cpmac_control_regs tx_ctrl;
> + struct cpmac_control_regs rx_ctrl;
> + u32 unused1[56];
> + u32 mbp;
> +/* MBP bits */
> +#define MBP_RXPASSCRC 0x40000000
> +#define MBP_RXQOS 0x20000000
> +#define MBP_RXNOCHAIN 0x10000000
> +#define MBP_RXCMF 0x01000000
> +#define MBP_RXSHORT 0x00800000
> +#define MBP_RXCEF 0x00400000
> +#define MBP_RXPROMISC 0x00200000
> +#define MBP_PROMISCCHAN(chan) (((chan) & 0x7) << 16)
> +#define MBP_RXBCAST 0x00002000
> +#define MBP_BCASTCHAN(chan) (((chan) & 0x7) << 8)
> +#define MBP_RXMCAST 0x00000020
> +#define MBP_MCASTCHAN(chan) ((chan) & 0x7)
> + u32 unicast_enable;
> + u32 unicast_clear;
> + u32 max_len;
> + u32 buffer_offset;
> + u32 filter_flow_threshold;
> + u32 unused2[2];
> + u32 flow_thre[8];
> + u32 free_buffer[8];
> + u32 mac_control;
> +#define MAC_TXPTYPE 0x00000200
> +#define MAC_TXPACE 0x00000040
> +#define MAC_MII 0x00000020
> +#define MAC_TXFLOW 0x00000010
> +#define MAC_RXFLOW 0x00000008
> +#define MAC_MTEST 0x00000004
> +#define MAC_LOOPBACK 0x00000002
> +#define MAC_FDX 0x00000001
> + u32 mac_status;
> +#define MACST_QOS 0x4
> +#define MACST_RXFLOW 0x2
> +#define MACST_TXFLOW 0x1
> + u32 emc_control;
> + u32 unused3;
> + struct cpmac_int_regs tx_int;
> + u32 mac_int_vector;
> +/* Int Status bits */
> +#define INTST_STATUS 0x80000
> +#define INTST_HOST 0x40000
> +#define INTST_RX 0x20000
> +#define INTST_TX 0x10000
> + u32 mac_eoi_vector;
> + u32 unused4[2];
> + struct cpmac_int_regs rx_int;
> + u32 mac_int_stat_raw;
> + u32 mac_int_stat_masked;
> + u32 mac_int_enable;
> + u32 mac_int_clear;
> + u32 mac_addr_low[8];
> + u32 mac_addr_mid;
> + u32 mac_addr_high;
> + u32 mac_hash_low;
> + u32 mac_hash_high;
> + u32 boff_test;
> + u32 pac_test;
> + u32 rx_pause;
> + u32 tx_pause;
> + u32 unused5[2];
> + struct cpmac_stats rx_stats;
> + struct cpmac_stats tx_stats;
> + u32 unused6[232];
> + u32 tx_ptr[8];
> + u32 rx_ptr[8];
> + u32 tx_ack[8];
> + u32 rx_ack[8];
> +
> +} __attribute__ ((packed));
> +
> +struct cpmac_mdio_regs {
> + u32 version;
> + u32 control;
> +#define MDIOC_IDLE 0x80000000
> +#define MDIOC_ENABLE 0x40000000
> +#define MDIOC_PREAMBLE 0x00100000
> +#define MDIOC_FAULT 0x00080000
> +#define MDIOC_FAULTDETECT 0x00040000
> +#define MDIOC_INTTEST 0x00020000
> +#define MDIOC_CLKDIV(div) ((div) & 0xff)
> + u32 alive;
> + u32 link;
> + struct cpmac_int_regs link_int;
> + struct cpmac_int_regs user_int;
> + u32 unused[20];
> + volatile u32 access;
> +#define MDIO_BUSY 0x80000000
> +#define MDIO_WRITE 0x40000000
> +#define MDIO_REG(reg) (((reg) & 0x1f) << 21)
> +#define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
> +#define MDIO_DATA(data) ((data) & 0xffff)
> + u32 physel;
> +} __attribute__ ((packed));
> +
> +/* Descriptor */
> +struct cpmac_desc {
> + u32 hw_next;
> + u32 hw_data;
> + u16 buflen;
> + u16 bufflags;
> + u16 datalen;
> + u16 dataflags;
> +/* Flags bits */
> +#define CPMAC_SOP 0x8000
> +#define CPMAC_EOP 0x4000
> +#define CPMAC_OWN 0x2000
> +#define CPMAC_EOQ 0x1000
> + struct sk_buff *skb;
> + struct cpmac_desc *next;
> +} __attribute__ ((packed));
> +
> +struct cpmac_priv {
> + struct net_device_stats stats;
> + spinlock_t lock; /* irq{save,restore} */
> + struct sk_buff *skb_pool;
> + int free_skbs;
> + struct cpmac_desc *rx_head;
> + int tx_head, tx_tail;
> + struct cpmac_desc *desc_ring;
> + struct cpmac_regs *regs;
> + struct mii_bus *mii_bus;
> + struct phy_device *phy;
> + char phy_name[BUS_ID_SIZE];
> + struct plat_cpmac_data *config;
> + int oldlink, oldspeed, oldduplex;
> + u32 msg_enable;
> + struct net_device *dev;
> + struct work_struct alloc_work;
> +};
> +
> +static irqreturn_t cpmac_irq(int, void *);
> +static void cpmac_reset(struct net_device *dev);
> +static void cpmac_hw_init(struct net_device *dev);
> +static int cpmac_stop(struct net_device *dev);
> +static int cpmac_open(struct net_device *dev);
> +
> +#undef CPMAC_DEBUG
> +#define CPMAC_LOW_THRESH 32
> +#define CPMAC_ALLOC_SIZE 64
> +#define CPMAC_SKB_SIZE 1518
> +#define CPMAC_TX_RING_SIZE 8
> +
> +#ifdef CPMAC_DEBUG
> +static void cpmac_dump_regs(u32 *base, int count)
> +{
> + int i;
> + for (i = 0; i < (count + 3) / 4; i++) {
> + if (i % 4 == 0) printk(KERN_DEBUG "\nCPMAC[0x%04x]:", i * 4);
> + printk(KERN_DEBUG " 0x%08x", *(base + i));
> + }
> + printk(KERN_DEBUG "\n");
> +}
> +
> +static const char *cpmac_dump_buf(const uint8_t *buf, unsigned size)
> +{
> + static char buffer[3 * 25 + 1];
> + char *p = &buffer[0];
> + if (size > 20)
> + size = 20;
> + while (size-- > 0)
> + p += sprintf(p, " %02x", *buf++);
> + return buffer;
> +}
> +#endif
> +
> +static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
> +{
> + struct cpmac_mdio_regs *regs = bus->priv;
> + u32 val;
> +
> + while ((val = regs->access) & MDIO_BUSY);
> + regs->access = MDIO_BUSY | MDIO_REG(regnum & 0x1f) |
> + MDIO_PHY(phy_id & 0x1f);
> + while ((val = regs->access) & MDIO_BUSY);
> +
> + return val & 0xffff;
> +}
> +
> +static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
> + int regnum, u16 val)
> +{
> + struct cpmac_mdio_regs *regs = bus->priv;
> +
> + while (regs->access & MDIO_BUSY);
> + regs->access = MDIO_BUSY | MDIO_WRITE |
> + MDIO_REG(regnum & 0x1f) | MDIO_PHY(phy_id & 0x1f) | val;
> +
> + return 0;
> +}
> +
> +static int cpmac_mdio_reset(struct mii_bus *bus)
> +{
> + ar7_device_reset(AR7_RESET_BIT_MDIO);
> + ((struct cpmac_mdio_regs *)bus->priv)->control = MDIOC_ENABLE |
> + MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1);
> +
> + return 0;
> +}
> +
> +static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
> +
> +static struct mii_bus cpmac_mii = {
> + .name = "cpmac-mii",
> + .read = cpmac_mdio_read,
> + .write = cpmac_mdio_write,
> + .reset = cpmac_mdio_reset,
> + .irq = mii_irqs,
> +};
> +
> +static int cpmac_config(struct net_device *dev, struct ifmap *map)
> +{
> + if (dev->flags & IFF_UP)
> + return -EBUSY;
> +
> + /* Don't allow changing the I/O address */
> + if (map->base_addr != dev->base_addr)
> + return -EOPNOTSUPP;
> +
> + /* ignore other fields */
> + return 0;
> +}
> +
> +static int cpmac_set_mac_address(struct net_device *dev, void *addr)
> +{
> + struct sockaddr *sa = addr;
> +
> + if (dev->flags & IFF_UP)
> + return -EBUSY;
> +
> + memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
> +
> + return 0;
> +}
> +
> +static void cpmac_set_multicast_list(struct net_device *dev)
> +{
> + struct dev_mc_list *iter;
> + int i;
> + int hash, tmp;
> + int hashlo = 0, hashhi = 0;
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + if (dev->flags & IFF_PROMISC) {
> + priv->regs->mbp &= ~MBP_PROMISCCHAN(0); /* promisc channel 0 */
> + priv->regs->mbp |= MBP_RXPROMISC;
> + } else {
> + priv->regs->mbp &= ~MBP_RXPROMISC;
> + if (dev->flags & IFF_ALLMULTI) {
> + /* enable all multicast mode */
> + priv->regs->mac_hash_low = 0xffffffff;
> + priv->regs->mac_hash_high = 0xffffffff;
> + } else {
> + for (i = 0, iter = dev->mc_list; i < dev->mc_count;
> + i++, iter = iter->next) {
> + hash = 0;
> + tmp = iter->dmi_addr[0];
> + hash ^= (tmp >> 2) ^ (tmp << 4);
> + tmp = iter->dmi_addr[1];
> + hash ^= (tmp >> 4) ^ (tmp << 2);
> + tmp = iter->dmi_addr[2];
> + hash ^= (tmp >> 6) ^ tmp;
> + tmp = iter->dmi_addr[4];
> + hash ^= (tmp >> 2) ^ (tmp << 4);
> + tmp = iter->dmi_addr[5];
> + hash ^= (tmp >> 4) ^ (tmp << 2);
> + tmp = iter->dmi_addr[6];
> + hash ^= (tmp >> 6) ^ tmp;
> + hash &= 0x3f;
> + if (hash < 32) {
> + hashlo |= 1<<hash;
> + } else {
> + hashhi |= 1<<(hash - 32);
> + }
> + }
> +
> + priv->regs->mac_hash_low = hashlo;
> + priv->regs->mac_hash_high = hashhi;
> + }
> + }
> +}
> +
> +static struct sk_buff *cpmac_get_skb(struct net_device *dev)
> +{
> + struct sk_buff *skb;
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + skb = priv->skb_pool;
> + if (likely(skb))
> + priv->skb_pool = skb->next;
> + else {
> + skb = dev_alloc_skb(CPMAC_SKB_SIZE + 2);
> + if (skb) {
> + skb->next = NULL;
> + skb_reserve(skb, 2);
> + skb->dev = priv->dev;
> + }
> + }
> +
> + if (likely(priv->free_skbs))
> + priv->free_skbs--;
> +
> + if (priv->free_skbs < CPMAC_LOW_THRESH)
> + schedule_work(&priv->alloc_work);
> +
> + return skb;
> +}
> +
> +static struct sk_buff *cpmac_rx_one(struct net_device *dev,
> + struct cpmac_priv *priv,
> + struct cpmac_desc *desc)
> +{
> + unsigned long flags;
> + char *data;
> + struct sk_buff *skb, *result = NULL;
> +
> + priv->regs->rx_ack[0] = virt_to_phys(desc);
> + if (unlikely(!desc->datalen)) {
> + if (printk_ratelimit())
> + printk(KERN_WARNING "%s: rx: spurious interrupt\n",
> + dev->name);
> + priv->stats.rx_errors++;
> + return NULL;
> + }
> +
> + spin_lock_irqsave(&priv->lock, flags);
> + skb = cpmac_get_skb(dev);
> + if (likely(skb)) {
> + data = (char *)phys_to_virt(desc->hw_data);
> + dma_cache_inv((u32)data, desc->datalen);
> + skb_put(desc->skb, desc->datalen);
> + desc->skb->protocol = eth_type_trans(desc->skb, dev);
> + desc->skb->ip_summed = CHECKSUM_NONE;
> + priv->stats.rx_packets++;
> + priv->stats.rx_bytes += desc->datalen;
> + result = desc->skb;
> + desc->skb = skb;
> + } else {
> +#ifdef CPMAC_DEBUG
> + if (printk_ratelimit())
> + printk(KERN_NOTICE "%s: low on skbs, dropping packet\n",
> + dev->name);
> +#endif
> + priv->stats.rx_dropped++;
> + }
> + spin_unlock_irqrestore(&priv->lock, flags);
> +
> + desc->hw_data = virt_to_phys(desc->skb->data);
> + desc->buflen = CPMAC_SKB_SIZE;
> + desc->dataflags = CPMAC_OWN;
> + dma_cache_wback((u32)desc, 16);
> +
> + return result;
> +}
> +
> +static void cpmac_rx(struct net_device *dev)
> +{
> + struct sk_buff *skb;
> + struct cpmac_desc *desc;
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + spin_lock(&priv->lock);
> + if (unlikely(!priv->rx_head)) {
> + spin_unlock(&priv->lock);
> + return;
> + }
> +
> + desc = priv->rx_head;
> + dma_cache_inv((u32)desc, 16);
> +#ifdef CPMAC_DEBUG
> + printk(KERN_DEBUG "%s: len=%d, %s\n", __func__, pkt->datalen,
> + cpmac_dump_buf(data, pkt->datalen));
> +#endif
> +
> + while ((desc->dataflags & CPMAC_OWN) == 0) {
> + skb = cpmac_rx_one(dev, priv, desc);
> + if (likely(skb))
> + netif_rx(skb);
> + desc = desc->next;
> + dma_cache_inv((u32)desc, 16);
> + }
> +
> + priv->rx_head = desc;
> + priv->regs->rx_ptr[0] = virt_to_phys(desc);
> + spin_unlock(&priv->lock);
> +}
> +
> +static int cpmac_poll(struct net_device *dev, int *budget)
> +{
> + struct sk_buff *skb;
> + struct cpmac_desc *desc;
> + int received = 0, quota = min(dev->quota, *budget);
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + if (unlikely(!priv->rx_head)) {
> + if (printk_ratelimit())
> + printk(KERN_NOTICE "%s: rx: polling, but no queue\n",
> + dev->name);
> + netif_rx_complete(dev);
> + return 0;
> + }
> +
> + desc = priv->rx_head;
> + dma_cache_inv((u32)desc, 16);
> +
> + while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) {
> + skb = cpmac_rx_one(dev, priv, desc);
> + if (likely(skb)) {
> + netif_receive_skb(skb);
> + received++;
> + }
> + desc = desc->next;
> + priv->rx_head = desc;
> + dma_cache_inv((u32)desc, 16);
> + }
> +
> + *budget -= received;
> + dev->quota -= received;
> +#ifdef CPMAC_DEBUG
> + printk(KERN_DEBUG "%s: processed %d packets\n", dev->name, received);
> +#endif
> + if (desc->dataflags & CPMAC_OWN) {
> + priv->regs->rx_ptr[0] = virt_to_phys(desc);
> + netif_rx_complete(dev);
> + priv->regs->rx_int.enable = 0x1;
> + priv->regs->rx_int.clear = 0xfe;
> + return 0;
> + }
> +
> + return 1;
> +}
> +
> +static void
> +cpmac_alloc_skbs(struct work_struct *work)
> +{
> + struct cpmac_priv *priv = container_of(work, struct cpmac_priv,
> + alloc_work);
> + unsigned long flags;
> + int i, num_skbs = 0;
> + struct sk_buff *skb, *skbs = NULL;
> +
> + for (i = 0; i < CPMAC_ALLOC_SIZE; i++) {
> + skb = alloc_skb(CPMAC_SKB_SIZE + 2, GFP_KERNEL);
> + if (!skb)
> + break;
> + skb->next = skbs;
> + skb_reserve(skb, 2);
> + skb->dev = priv->dev;
> + num_skbs++;
> + skbs = skb;
> + }
> +
> + if (skbs) {
> + spin_lock_irqsave(&priv->lock, flags);
> + for (skb = priv->skb_pool; skb && skb->next; skb = skb->next);
> + if (!skb)
> + priv->skb_pool = skbs;
> + else
> + skb->next = skbs;
> + priv->free_skbs += num_skbs;
> + spin_unlock_irqrestore(&priv->lock, flags);
> +#ifdef CPMAC_DEBUG
> + printk(KERN_DEBUG "%s: allocated %d skbs\n",
> + priv->dev->name, num_skbs);
> +#endif
> + }
> +}
> +
> +static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
> +{
> + unsigned long flags;
> + int len, chan;
> + struct cpmac_desc *desc;
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + len = skb->len;
> +#ifdef CPMAC_DEBUG
> + printk(KERN_DEBUG "%s: len=%d\n", __func__, len);
> + /* cpmac_dump_buf(const uint8_t * buf, unsigned size) */
> +#endif
> + if (unlikely(len < ETH_ZLEN)) {
> + if (unlikely(skb_padto(skb, ETH_ZLEN))) {
> + if (printk_ratelimit())
> + printk(KERN_NOTICE
> + "%s: padding failed, dropping\n",
> + dev->name);
> + spin_lock_irqsave(&priv->lock, flags);
> + priv->stats.tx_dropped++;
> + spin_unlock_irqrestore(&priv->lock, flags);
> + return -ENOMEM;
> + }
> + len = ETH_ZLEN;
> + }
> + spin_lock_irqsave(&priv->lock, flags);
> + chan = priv->tx_tail++;
> + priv->tx_tail %= 8;
> + if (priv->tx_tail == priv->tx_head)
> + netif_stop_queue(dev);
> +
> + desc = &priv->desc_ring[chan];
> + dma_cache_inv((u32)desc, 16);
> + if (desc->dataflags & CPMAC_OWN) {
> + printk(KERN_NOTICE "%s: tx dma ring full, dropping\n",
> + dev->name);
> + priv->stats.tx_dropped++;
> + spin_unlock_irqrestore(&priv->lock, flags);
> + return -ENOMEM;
> + }
> +
> + dev->trans_start = jiffies;
> + desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
> + desc->skb = skb;
> + desc->hw_data = virt_to_phys(skb->data);
> + dma_cache_wback((u32)skb->data, len);
> + desc->buflen = len;
> + desc->datalen = len;
> + desc->hw_next = 0;
> + dma_cache_wback((u32)desc, 16);
> + priv->regs->tx_ptr[chan] = virt_to_phys(desc);
> + spin_unlock_irqrestore(&priv->lock, flags);
> +
> + return 0;
> +}
> +
> +static void cpmac_end_xmit(struct net_device *dev, int channel)
> +{
> + struct cpmac_desc *desc;
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + spin_lock(&priv->lock);
> + desc = &priv->desc_ring[channel];
> + priv->regs->tx_ack[channel] = virt_to_phys(desc);
> + if (likely(desc->skb)) {
> + priv->stats.tx_packets++;
> + priv->stats.tx_bytes += desc->skb->len;
> + dev_kfree_skb_irq(desc->skb);
> + if (netif_queue_stopped(dev))
> + netif_wake_queue(dev);
> + } else
> + if (printk_ratelimit())
> + printk(KERN_NOTICE "%s: end_xmit: spurious interrupt\n",
> + dev->name);
> + spin_unlock(&priv->lock);
> +}
> +
> +static void cpmac_reset(struct net_device *dev)
> +{
> + int i;
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + ar7_device_reset(priv->config->reset_bit);
> + priv->regs->rx_ctrl.control &= ~1;
> + priv->regs->tx_ctrl.control &= ~1;
> + for (i = 0; i < 8; i++) {
> + priv->regs->tx_ptr[i] = 0;
> + priv->regs->rx_ptr[i] = 0;
> + }
> + priv->regs->mac_control &= ~MAC_MII; /* disable mii */
> +}
> +
> +static inline void cpmac_free_rx_ring(struct net_device *dev)
> +{
> + struct cpmac_desc *desc;
> + int i;
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + if (unlikely(!priv->rx_head))
> + return;
> +
> + desc = priv->rx_head;
> + dma_cache_inv((u32)desc, 16);
> +
> + for (i = 0; i < rx_ring_size; i++) {
> + desc->buflen = CPMAC_SKB_SIZE;
> + if ((desc->dataflags & CPMAC_OWN) == 0) {
> + desc->dataflags = CPMAC_OWN;
> + priv->stats.rx_dropped++;
> + }
> + dma_cache_wback((u32)desc, 16);
> + desc = desc->next;
> + dma_cache_inv((u32)desc, 16);
> + }
> +}
> +
> +static irqreturn_t cpmac_irq(int irq, void *dev_id)
> +{
> + struct net_device *dev = dev_id;
> + struct cpmac_priv *priv = netdev_priv(dev);
> + u32 status;
> +
> + if (!dev)
> + return IRQ_NONE;
> +
> + status = priv->regs->mac_int_vector;
> +
> + if (status & INTST_TX)
> + cpmac_end_xmit(dev, (status & 7));
> +
> + if (status & INTST_RX) {
> + if (disable_napi)
> + cpmac_rx(dev);
> + else {
> + priv->regs->rx_int.enable = 0;
> + priv->regs->rx_int.clear = 0xff;
> + netif_rx_schedule(dev);
> + }
> + }
> +
> + priv->regs->mac_eoi_vector = 0;
> +
> + if (unlikely(status & (INTST_HOST | INTST_STATUS))) {
> + if (printk_ratelimit())
> + printk(KERN_ERR "%s: hw error, resetting...\n",
> + dev->name);
> + spin_lock(&priv->lock);
> + phy_stop(priv->phy);
> + cpmac_reset(dev);
> + cpmac_free_rx_ring(dev);
> + cpmac_hw_init(dev);
> + spin_unlock(&priv->lock);
> + }
> +
> + return IRQ_HANDLED;
> +}
> +
> +static void cpmac_tx_timeout(struct net_device *dev)
> +{
> + struct cpmac_priv *priv = netdev_priv(dev);
> + struct cpmac_desc *desc;
> +
> + priv->stats.tx_errors++;
> + desc = &priv->desc_ring[priv->tx_head++];
> + priv->tx_head %= 8;
> + printk(KERN_NOTICE "%s: transmit timeout\n", dev->name);
> + if (desc->skb)
> + dev_kfree_skb(desc->skb);
> + netif_wake_queue(dev);
> +}
> +
> +static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
> +{
> + struct cpmac_priv *priv = netdev_priv(dev);
> + if (!(netif_running(dev)))
> + return -EINVAL;
> + if (!priv->phy)
> + return -EINVAL;
> + if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) ||
> + (cmd == SIOCSMIIREG))
> + return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd);
> +
> + return -EINVAL;
> +}
> +
> +static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
> +{
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + if (priv->phy)
> + return phy_ethtool_gset(priv->phy, cmd);
> +
> + return -EINVAL;
> +}
> +
> +static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
> +{
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + if (!capable(CAP_NET_ADMIN))
> + return -EPERM;
> +
> + if (priv->phy)
> + return phy_ethtool_sset(priv->phy, cmd);
> +
> + return -EINVAL;
> +}
> +
> +static void cpmac_get_drvinfo(struct net_device *dev,
> + struct ethtool_drvinfo *info)
> +{
> + strcpy(info->driver, "cpmac");
> + strcpy(info->version, "0.0.3");
> + info->fw_version[0] = '\0';
> + sprintf(info->bus_info, "%s", "cpmac");
> + info->regdump_len = 0;
> +}
> +
> +static const struct ethtool_ops cpmac_ethtool_ops = {
> + .get_settings = cpmac_get_settings,
> + .set_settings = cpmac_set_settings,
> + .get_drvinfo = cpmac_get_drvinfo,
> + .get_link = ethtool_op_get_link,
> +};
> +
> +static struct net_device_stats *cpmac_stats(struct net_device *dev)
> +{
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + if (netif_device_present(dev))
> + return &priv->stats;
> +
> + return NULL;
> +}
> +
> +static int cpmac_change_mtu(struct net_device *dev, int mtu)
> +{
> + unsigned long flags;
> + struct cpmac_priv *priv = netdev_priv(dev);
> + spinlock_t *lock = &priv->lock;
> +
> + if ((mtu < 68) || (mtu > 1500))
> + return -EINVAL;
> +
> + spin_lock_irqsave(lock, flags);
> + dev->mtu = mtu;
> + spin_unlock_irqrestore(lock, flags);
> +
> + return 0;
> +}
> +
> +static void cpmac_adjust_link(struct net_device *dev)
> +{
> + struct cpmac_priv *priv = netdev_priv(dev);
> + unsigned long flags;
> + int new_state = 0;
> +
> + spin_lock_irqsave(&priv->lock, flags);
> + if (priv->phy->link) {
> + if (priv->phy->duplex != priv->oldduplex) {
> + new_state = 1;
> + priv->oldduplex = priv->phy->duplex;
> + }
> +
> + if (priv->phy->speed != priv->oldspeed) {
> + new_state = 1;
> + priv->oldspeed = priv->phy->speed;
> + }
> +
> + if (!priv->oldlink) {
> + new_state = 1;
> + priv->oldlink = 1;
> + netif_schedule(dev);
> + }
> + } else if (priv->oldlink) {
> + new_state = 1;
> + priv->oldlink = 0;
> + priv->oldspeed = 0;
> + priv->oldduplex = -1;
> + }
> +
> + if (new_state)
> + phy_print_status(priv->phy);
> +
> + spin_unlock_irqrestore(&priv->lock, flags);
> +}
> +
> +static void cpmac_hw_init(struct net_device *dev)
> +{
> + int i;
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + for (i = 0; i < 8; i++)
> + priv->regs->tx_ptr[i] = 0;
> + priv->regs->rx_ptr[0] = virt_to_phys(priv->rx_head);
> +
> + priv->regs->mbp = MBP_RXSHORT | MBP_RXBCAST | MBP_RXMCAST;
> + priv->regs->unicast_enable = 0x1;
> + priv->regs->unicast_clear = 0xfe;
> + priv->regs->buffer_offset = 0;
> + for (i = 0; i < 8; i++)
> + priv->regs->mac_addr_low[i] = dev->dev_addr[5];
> + priv->regs->mac_addr_mid = dev->dev_addr[4];
> + priv->regs->mac_addr_high = dev->dev_addr[0] | (dev->dev_addr[1] << 8)
> + | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
> + priv->regs->max_len = CPMAC_SKB_SIZE;
> + priv->regs->rx_int.enable = 0x1;
> + priv->regs->rx_int.clear = 0xfe;
> + priv->regs->tx_int.enable = 0xff;
> + priv->regs->tx_int.clear = 0;
> + priv->regs->mac_int_enable = 3;
> + priv->regs->mac_int_clear = 0xfc;
> +
> + priv->regs->rx_ctrl.control |= 1;
> + priv->regs->tx_ctrl.control |= 1;
> + priv->regs->mac_control |= MAC_MII | MAC_FDX;
> +
> + priv->phy->state = PHY_CHANGELINK;
> + phy_start(priv->phy);
> +}
> +
> +static int cpmac_open(struct net_device *dev)
> +{
> + int i, size, res;
> + struct cpmac_priv *priv = netdev_priv(dev);
> + struct cpmac_desc *desc;
> + struct sk_buff *skb;
> +
> + priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link,
> + 0, PHY_INTERFACE_MODE_MII);
> + if (IS_ERR(priv->phy)) {
> + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
> + return PTR_ERR(priv->phy);
> + }
> +
> + if (!request_mem_region(dev->mem_start, dev->mem_end -
> + dev->mem_start, dev->name)) {
> + printk(KERN_ERR "%s: failed to request registers\n",
> + dev->name);
> + res = -ENXIO;
> + goto fail_reserve;
> + }
> +
> + priv->regs = ioremap_nocache(dev->mem_start, dev->mem_end -
> + dev->mem_start);
> + if (!priv->regs) {
> + printk(KERN_ERR "%s: failed to remap registers\n", dev->name);
> + res = -ENXIO;
> + goto fail_remap;
> + }
> +
> + priv->rx_head = NULL;
> + size = sizeof(struct cpmac_desc) * (rx_ring_size +
> + CPMAC_TX_RING_SIZE);
> + priv->desc_ring = (struct cpmac_desc *)kmalloc(size, GFP_KERNEL);
kmalloc returns void * so no cast to another pointer type necessary.
> + if (!priv->desc_ring) {
> + res = -ENOMEM;
> + goto fail_alloc;
> + }
> +
> + memset((char *)priv->desc_ring, 0, size);
Use kzalloc instead of kmalloc. kzalloc is like kmalloc but returns
zero'd memory.
> + priv->skb_pool = NULL;
> + priv->free_skbs = 0;
> + priv->rx_head = &priv->desc_ring[CPMAC_TX_RING_SIZE];
> +
> + INIT_WORK(&priv->alloc_work, cpmac_alloc_skbs);
> + schedule_work(&priv->alloc_work);
> + flush_scheduled_work();
> +
> + for (i = 0; i < rx_ring_size; i++) {
> + desc = &priv->rx_head[i];
> + skb = cpmac_get_skb(dev);
> + if (!skb) {
> + res = -ENOMEM;
> + goto fail_desc;
> + }
> + desc->skb = skb;
> + desc->hw_data = virt_to_phys(skb->data);
> + desc->buflen = CPMAC_SKB_SIZE;
> + desc->dataflags = CPMAC_OWN;
> + desc->next = &priv->rx_head[(i + 1) % rx_ring_size];
> + desc->hw_next = virt_to_phys(desc->next);
> + dma_cache_wback((u32)desc, 16);
> + }
> +
> + if ((res = request_irq(dev->irq, cpmac_irq, SA_INTERRUPT,
> + dev->name, dev))) {
> + printk(KERN_ERR "%s: failed to obtain irq\n", dev->name);
> + goto fail_irq;
> + }
> +
> + cpmac_reset(dev);
> + cpmac_hw_init(dev);
> +
> + netif_start_queue(dev);
> + return 0;
> +
> +fail_irq:
> +fail_desc:
> + for (i = 0; i < rx_ring_size; i++)
> + if (priv->rx_head[i].skb)
> + kfree_skb(priv->rx_head[i].skb);
> +fail_alloc:
> + kfree(priv->desc_ring);
> +
> + for (skb = priv->skb_pool; skb; skb = priv->skb_pool) {
> + priv->skb_pool = skb->next;
> + kfree_skb(skb);
> + }
> +
> + iounmap(priv->regs);
> +
> +fail_remap:
> + release_mem_region(dev->mem_start, dev->mem_end -
> + dev->mem_start);
> +
> +fail_reserve:
> + phy_disconnect(priv->phy);
> +
> + return res;
> +}
> +
> +static int cpmac_stop(struct net_device *dev)
> +{
> + int i;
> + struct sk_buff *skb;
> + struct cpmac_priv *priv = netdev_priv(dev);
> +
> + netif_stop_queue(dev);
> +
> + phy_stop(priv->phy);
> + phy_disconnect(priv->phy);
> + priv->phy = NULL;
> +
> + cpmac_reset(dev);
> +
> + for (i = 0; i < 8; i++) {
> + priv->regs->rx_ptr[i] = 0;
> + priv->regs->tx_ptr[i] = 0;
> + priv->regs->mbp = 0;
> + }
> +
> + free_irq(dev->irq, dev);
> + release_mem_region(dev->mem_start, dev->mem_end -
> + dev->mem_start);
> +
> + cancel_delayed_work(&priv->alloc_work);
> + flush_scheduled_work();
> +
> + priv->rx_head = &priv->desc_ring[CPMAC_TX_RING_SIZE];
> + for (i = 0; i < rx_ring_size; i++)
> + if (priv->rx_head[i].skb)
> + kfree_skb(priv->rx_head[i].skb);
> +
> + kfree(priv->desc_ring);
> +
> + for (skb = priv->skb_pool; skb; skb = priv->skb_pool) {
> + priv->skb_pool = skb->next;
> + kfree_skb(skb);
> + }
> +
> + return 0;
> +}
> +
> +static int external_switch;
> +
> +static int __devinit cpmac_probe(struct platform_device *pdev)
> +{
> + int i, rc, phy_id;
> + struct resource *res;
> + struct cpmac_priv *priv;
> + struct net_device *dev;
> + struct plat_cpmac_data *pdata;
> +
> + pdata = pdev->dev.platform_data;
> +
> + for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
> + if (!(pdata->phy_mask & (1 << phy_id)))
> + continue;
> + if (!cpmac_mii.phy_map[phy_id])
> + continue;
> + break;
> + }
> +
> + if (phy_id == PHY_MAX_ADDR) {
> + if (external_switch)
> + phy_id = 0;
> + else {
> + printk(KERN_ERR "cpmac: no PHY present\n");
> + return -ENODEV;
> + }
> + }
> +
> + dev = alloc_etherdev(sizeof(struct cpmac_priv));
> +
> + if (!dev) {
> + printk(KERN_ERR
> + "cpmac: Unable to allocate net_device structure!\n");
> + return -ENOMEM;
> + }
> +
> + SET_MODULE_OWNER(dev);
Set SET_MODULE_OWNER is a useless nop which only exists in 2.6 for
driver source compatibility with 2.4. So you can remove this call.
I used the opportunity to send out a patch to remove SET_MODULE_OWNER
from the kernel entirely.
> + platform_set_drvdata(pdev, dev);
> + priv = netdev_priv(dev);
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
> + if (!res) {
> + rc = -ENODEV;
> + goto fail;
> + }
> +
> + dev->mem_start = res->start;
> + dev->mem_end = res->end;
> + dev->irq = platform_get_irq_byname(pdev, "irq");
> +
> + dev->mtu = 1500;
Initializing this field is redundant; alloc_etherdev has already done it,
so remove this line.
> + dev->open = cpmac_open;
> + dev->stop = cpmac_stop;
> + dev->set_config = cpmac_config;
> + dev->hard_start_xmit = cpmac_start_xmit;
> + dev->do_ioctl = cpmac_ioctl;
> + dev->get_stats = cpmac_stats;
> + dev->change_mtu = cpmac_change_mtu;
> + dev->set_mac_address = cpmac_set_mac_address;
> + dev->set_multicast_list = cpmac_set_multicast_list;
> + dev->tx_timeout = cpmac_tx_timeout;
> + dev->ethtool_ops = &cpmac_ethtool_ops;
> + if (!disable_napi) {
> + dev->poll = cpmac_poll;
> + dev->weight = min(rx_ring_size, 64);
> + }
> +
> + memset(priv, 0, sizeof(struct cpmac_priv));
Useless, alloc_etherdev does that already.
> + spin_lock_init(&priv->lock);
> + priv->msg_enable = netif_msg_init(NETIF_MSG_WOL, 0x3fff);
> + priv->config = pdata;
> + priv->dev = dev;
> + memcpy(dev->dev_addr, priv->config->dev_addr, sizeof(dev->dev_addr));
> + if (phy_id == 31)
> + snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT,
> + cpmac_mii.id, phy_id);
> + else
> + snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1);
> +
> + if ((rc = register_netdev(dev))) {
> + printk(KERN_ERR "cpmac: error %i registering device %s\n",
> + rc, dev->name);
> + goto fail;
> + }
> +
> + printk(KERN_INFO "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: ",
> + dev->name, (u32 *)dev->mem_start, dev->irq,
> + priv->phy_name);
> + for (i = 0; i < 6; i++)
> + printk("%02x%s", dev->dev_addr[i], i < 5 ? ":" : ")\n");
> +
> + return 0;
> +
> +fail:
> + free_netdev(dev);
> + return rc;
> +}
> +
> +static int __devexit cpmac_remove(struct platform_device *pdev)
> +{
> + struct net_device *dev = platform_get_drvdata(pdev);
> + unregister_netdev(dev);
> + free_netdev(dev);
> + return 0;
> +}
> +
> +static struct platform_driver cpmac_driver = {
> + .driver.name = "cpmac",
> + .probe = cpmac_probe,
> + .remove = cpmac_remove,
This should be:
.remove = __devexit_p(cpmac_remove),
to avoid the final link from blowing up when the driver is built into the
kernel.
> +};
> +
> +int __devinit cpmac_init(void)
Make this function static; no need to export.
> +{
> + u32 mask;
> + int i, res;
> + cpmac_mii.priv =
> + ioremap_nocache(AR7_REGS_MDIO, sizeof(struct cpmac_mdio_regs));
> +
> + if (!cpmac_mii.priv) {
> + printk(KERN_ERR "Can't ioremap mdio registers\n");
> + return -ENXIO;
> + }
> +
> +#warning FIXME: unhardcode gpio&reset bits
Seeing such warnings always gives me a warm fuzzy feeling ;-)
> + ar7_gpio_disable(26);
> + ar7_gpio_disable(27);
> + ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
> + ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
> + ar7_device_reset(AR7_RESET_BIT_EPHY);
> +
> + cpmac_mii.reset(&cpmac_mii);
> +
> + for (i = 0; i < 300000; i++) {
> + mask = ((struct cpmac_mdio_regs *)cpmac_mii.priv)->alive;
> + if (mask)
> + break;
> + }
> +
> +/* mask &= 0x7fffffff;
> + if (mask & (mask - 1)) {*/
> + external_switch = 1;
> + mask = 0;
> +/* }*/
> +
> + cpmac_mii.phy_mask = ~(mask | 0x80000000);
> +
> + res = mdiobus_register(&cpmac_mii);
> + if (res)
> + goto fail_mii;
> +
> + res = platform_driver_register(&cpmac_driver);
> + if (res)
> + goto fail_cpmac;
> +
> + return 0;
> +
> +fail_cpmac:
> + mdiobus_unregister(&cpmac_mii);
> +
> +fail_mii:
> + iounmap(cpmac_mii.priv);
> +
> + return res;
> +}
> +
> +void __devexit cpmac_exit(void)
> +{
> + platform_driver_unregister(&cpmac_driver);
> + mdiobus_unregister(&cpmac_mii);
> +}
> +
> +module_init(cpmac_init);
> +module_exit(cpmac_exit);
Time to run ...
Ralf
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH][MIPS][7/7] AR7: ethernet
2007-09-12 16:50 ` Ralf Baechle
@ 2007-09-13 1:42 ` Thiemo Seufer
2007-09-13 11:35 ` Ralf Baechle
0 siblings, 1 reply; 13+ messages in thread
From: Thiemo Seufer @ 2007-09-13 1:42 UTC (permalink / raw)
To: Ralf Baechle
Cc: Matteo Croce, linux-mips, Eugene Konev, netdev, davem, kuznet,
pekkas, jmorris, yoshfuji, kaber, openwrt-devel, Andrew Morton,
Jeff Garzik
Ralf Baechle wrote:
> On Sat, Sep 08, 2007 at 02:23:00AM +0200, Matteo Croce wrote:
[snip]
> > +/* Register definitions */
> > +struct cpmac_control_regs {
> > + u32 revision;
> > + u32 control;
> > + u32 teardown;
> > + u32 unused;
> > +} __attribute__ ((packed));
> > +
> > +struct cpmac_int_regs {
> > + u32 stat_raw;
> > + u32 stat_masked;
> > + u32 enable;
> > + u32 clear;
> > +} __attribute__ ((packed));
> > +
> > +struct cpmac_stats {
> > + u32 good;
> > + u32 bcast;
> > + u32 mcast;
> > + u32 pause;
> > + u32 crc_error;
> > + u32 align_error;
> > + u32 oversized;
> > + u32 jabber;
> > + u32 undersized;
> > + u32 fragment;
> > + u32 filtered;
> > + u32 qos_filtered;
> > + u32 octets;
> > +} __attribute__ ((packed));
>
> All struct members here are sized such that there is no padding needed, so
> the packed attribute doesn't buy you anything - unless of course the
> entire structure is missaligned but I don't see how that would be possible
> in this driver so the __attribute__ ((packed)) should go - it result in
> somwhat larger and slower code.
FWIW, a modern gcc will warn about such superfluous packed attributes,
that's another reason to remove those.
Thiemo
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH][MIPS][7/7] AR7: ethernet
2007-09-13 1:42 ` Thiemo Seufer
@ 2007-09-13 11:35 ` Ralf Baechle
0 siblings, 0 replies; 13+ messages in thread
From: Ralf Baechle @ 2007-09-13 11:35 UTC (permalink / raw)
To: Thiemo Seufer
Cc: Matteo Croce, linux-mips, Eugene Konev, netdev, davem, kuznet,
pekkas, jmorris, yoshfuji, kaber, openwrt-devel, Andrew Morton,
Jeff Garzik
On Thu, Sep 13, 2007 at 02:42:46AM +0100, Thiemo Seufer wrote:
> > All struct members here are sized such that there is no padding needed, so
> > the packed attribute doesn't buy you anything - unless of course the
> > entire structure is missaligned but I don't see how that would be possible
> > in this driver so the __attribute__ ((packed)) should go - it result in
> > somwhat larger and slower code.
>
> FWIW, a modern gcc will warn about such superfluous packed attributes,
> that's another reason to remove those.
I doubt it will in this case; the packed structure is dereferenced by a
pointer so no way for gcc to know the alignment.
Ralf
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH][MIPS][7/7] AR7: ethernet
[not found] <200709201728.10866.technoboy85@gmail.com>
@ 2007-09-20 16:13 ` Matteo Croce
2007-09-29 5:39 ` Jeff Garzik
0 siblings, 1 reply; 13+ messages in thread
From: Matteo Croce @ 2007-09-20 16:13 UTC (permalink / raw)
To: linux-mips
Cc: Eugene Konev, netdev, davem, kuznet, pekkas, jmorris, yoshfuji,
kaber, Andrew Morton, Jeff Garzik
Driver for the cpmac 100M ethernet driver.
Jeff, here is the meat ;)
Signed-off-by: Matteo Croce <technoboy85@gmail.com>
Signed-off-by: Eugene Konev <ejka@imfi.kspu.ru>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6a0863e..28ba0dc 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1822,6 +1822,15 @@ config SC92031
To compile this driver as a module, choose M here: the module
will be called sc92031. This is recommended.
+config CPMAC
+ tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
+ depends on NET_ETHERNET && EXPERIMENTAL && AR7
+ select PHYLIB
+ select FIXED_PHY
+ select FIXED_MII_100_FDX
+ help
+ TI AR7 CPMAC Ethernet support
+
config NET_POCKET
bool "Pocket and portable adapters"
depends on PARPORT
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 9501d64..b536934 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -157,6 +157,7 @@ obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ZNET) += znet.o
obj-$(CONFIG_LAN_SAA9730) += saa9730.o
+obj-$(CONFIG_CPMAC) += cpmac.o
obj-$(CONFIG_DEPCA) += depca.o
obj-$(CONFIG_EWRK3) += ewrk3.o
obj-$(CONFIG_ATP) += atp.o
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
new file mode 100644
index 0000000..50aad94
--- /dev/null
+++ b/drivers/net/cpmac.c
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <asm/gpio.h>
+
+MODULE_AUTHOR("Eugene Konev");
+MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
+MODULE_LICENSE("GPL");
+
+static int rx_ring_size = 64;
+static int disable_napi;
+static int debug_level = 8;
+static int dumb_switch;
+
+module_param(rx_ring_size, int, 0644);
+module_param(disable_napi, int, 0644);
+/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
+module_param(debug_level, int, 0444);
+module_param(dumb_switch, int, 0444);
+
+MODULE_PARM_DESC(rx_ring_size, "Size of rx ring (in skbs)");
+MODULE_PARM_DESC(disable_napi, "Disable NAPI polling");
+MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
+MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
+
+/* frame size + 802.1q tag */
+#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4)
+#define CPMAC_TX_RING_SIZE 8
+
+/* Ethernet registers */
+#define CPMAC_TX_CONTROL 0x0004
+#define CPMAC_TX_TEARDOWN 0x0008
+#define CPMAC_RX_CONTROL 0x0014
+#define CPMAC_RX_TEARDOWN 0x0018
+#define CPMAC_MBP 0x0100
+# define MBP_RXPASSCRC 0x40000000
+# define MBP_RXQOS 0x20000000
+# define MBP_RXNOCHAIN 0x10000000
+# define MBP_RXCMF 0x01000000
+# define MBP_RXSHORT 0x00800000
+# define MBP_RXCEF 0x00400000
+# define MBP_RXPROMISC 0x00200000
+# define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
+# define MBP_RXBCAST 0x00002000
+# define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
+# define MBP_RXMCAST 0x00000020
+# define MBP_MCASTCHAN(channel) ((channel) & 0x7)
+#define CPMAC_UNICAST_ENABLE 0x0104
+#define CPMAC_UNICAST_CLEAR 0x0108
+#define CPMAC_MAX_LENGTH 0x010c
+#define CPMAC_BUFFER_OFFSET 0x0110
+#define CPMAC_MAC_CONTROL 0x0160
+# define MAC_TXPTYPE 0x00000200
+# define MAC_TXPACE 0x00000040
+# define MAC_MII 0x00000020
+# define MAC_TXFLOW 0x00000010
+# define MAC_RXFLOW 0x00000008
+# define MAC_MTEST 0x00000004
+# define MAC_LOOPBACK 0x00000002
+# define MAC_FDX 0x00000001
+#define CPMAC_MAC_STATUS 0x0164
+# define MAC_STATUS_QOS 0x00000004
+# define MAC_STATUS_RXFLOW 0x00000002
+# define MAC_STATUS_TXFLOW 0x00000001
+#define CPMAC_TX_INT_ENABLE 0x0178
+#define CPMAC_TX_INT_CLEAR 0x017c
+#define CPMAC_MAC_INT_VECTOR 0x0180
+# define MAC_INT_STATUS 0x00080000
+# define MAC_INT_HOST 0x00040000
+# define MAC_INT_RX 0x00020000
+# define MAC_INT_TX 0x00010000
+#define CPMAC_MAC_EOI_VECTOR 0x0184
+#define CPMAC_RX_INT_ENABLE 0x0198
+#define CPMAC_RX_INT_CLEAR 0x019c
+#define CPMAC_MAC_INT_ENABLE 0x01a8
+#define CPMAC_MAC_INT_CLEAR 0x01ac
+#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
+#define CPMAC_MAC_ADDR_MID 0x01d0
+#define CPMAC_MAC_ADDR_HI 0x01d4
+#define CPMAC_MAC_HASH_LO 0x01d8
+#define CPMAC_MAC_HASH_HI 0x01dc
+#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
+#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
+#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
+#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
+#define CPMAC_REG_END 0x0680
+/*
+ * Rx/Tx statistics
+ * TODO: use some of them to fill stats in cpmac_stats()
+ */
+#define CPMAC_STATS_RX_GOOD 0x0200
+#define CPMAC_STATS_RX_BCAST 0x0204
+#define CPMAC_STATS_RX_MCAST 0x0208
+#define CPMAC_STATS_RX_PAUSE 0x020c
+#define CPMAC_STATS_RX_CRC 0x0210
+#define CPMAC_STATS_RX_ALIGN 0x0214
+#define CPMAC_STATS_RX_OVER 0x0218
+#define CPMAC_STATS_RX_JABBER 0x021c
+#define CPMAC_STATS_RX_UNDER 0x0220
+#define CPMAC_STATS_RX_FRAG 0x0224
+#define CPMAC_STATS_RX_FILTER 0x0228
+#define CPMAC_STATS_RX_QOSFILTER 0x022c
+#define CPMAC_STATS_RX_OCTETS 0x0230
+
+#define CPMAC_STATS_TX_GOOD 0x0234
+#define CPMAC_STATS_TX_BCAST 0x0238
+#define CPMAC_STATS_TX_MCAST 0x023c
+#define CPMAC_STATS_TX_PAUSE 0x0240
+#define CPMAC_STATS_TX_DEFER 0x0244
+#define CPMAC_STATS_TX_COLLISION 0x0248
+#define CPMAC_STATS_TX_SINGLECOLL 0x024c
+#define CPMAC_STATS_TX_MULTICOLL 0x0250
+#define CPMAC_STATS_TX_EXCESSCOLL 0x0254
+#define CPMAC_STATS_TX_LATECOLL 0x0258
+#define CPMAC_STATS_TX_UNDERRUN 0x025c
+#define CPMAC_STATS_TX_CARRIERSENSE 0x0260
+#define CPMAC_STATS_TX_OCTETS 0x0264
+
+#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
+#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
+ (reg)))
+
+/* MDIO bus */
+#define CPMAC_MDIO_VERSION 0x0000
+#define CPMAC_MDIO_CONTROL 0x0004
+# define MDIOC_IDLE 0x80000000
+# define MDIOC_ENABLE 0x40000000
+# define MDIOC_PREAMBLE 0x00100000
+# define MDIOC_FAULT 0x00080000
+# define MDIOC_FAULTDETECT 0x00040000
+# define MDIOC_INTTEST 0x00020000
+# define MDIOC_CLKDIV(div) ((div) & 0xff)
+#define CPMAC_MDIO_ALIVE 0x0008
+#define CPMAC_MDIO_LINK 0x000c
+#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
+# define MDIO_BUSY 0x80000000
+# define MDIO_WRITE 0x40000000
+# define MDIO_REG(reg) (((reg) & 0x1f) << 21)
+# define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
+# define MDIO_DATA(data) ((data) & 0xffff)
+#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
+# define PHYSEL_LINKSEL 0x00000040
+# define PHYSEL_LINKINT 0x00000020
+
+struct cpmac_desc {
+ u32 hw_next;
+ u32 hw_data;
+ u16 buflen;
+ u16 bufflags;
+ u16 datalen;
+ u16 dataflags;
+#define CPMAC_SOP 0x8000
+#define CPMAC_EOP 0x4000
+#define CPMAC_OWN 0x2000
+#define CPMAC_EOQ 0x1000
+ struct sk_buff *skb;
+ struct cpmac_desc *next;
+ dma_addr_t mapping;
+ dma_addr_t data_mapping;
+};
+
+struct cpmac_priv {
+ struct net_device_stats stats;
+ spinlock_t lock;
+ struct cpmac_desc *rx_head;
+ int tx_head, tx_tail;
+ struct cpmac_desc *desc_ring;
+ dma_addr_t dma_ring;
+ void __iomem *regs;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy;
+ char phy_name[BUS_ID_SIZE];
+ struct plat_cpmac_data *config;
+ int oldlink, oldspeed, oldduplex;
+ u32 msg_enable;
+ struct net_device *dev;
+ struct work_struct alloc_work;
+};
+
+static irqreturn_t cpmac_irq(int, void *);
+static void cpmac_reset(struct net_device *dev);
+static void cpmac_hw_init(struct net_device *dev);
+static int cpmac_stop(struct net_device *dev);
+static int cpmac_open(struct net_device *dev);
+
+static void cpmac_dump_regs(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ for (i = 0; i < CPMAC_REG_END; i += 4) {
+ if (i % 16 == 0) {
+ if (i)
+ printk("\n");
+ printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
+ priv->regs + i);
+ }
+ printk(" %08x", cpmac_read(priv->regs, i));
+ }
+ printk("\n");
+}
+
+static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
+{
+ int i;
+ printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
+ for (i = 0; i < sizeof(*desc) / 4; i++)
+ printk(" %08x", ((u32 *)desc)[i]);
+ printk("\n");
+}
+
+static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ int i;
+ printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
+ for (i = 0; i < skb->len; i++) {
+ if (i % 16 == 0) {
+ if (i)
+ printk("\n");
+ printk(KERN_DEBUG "%s: data[%p]:", dev->name,
+ skb->data + i);
+ }
+ printk(" %02x", ((u8 *)skb->data)[i]);
+ }
+ printk("\n");
+}
+
+static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ u32 val;
+
+ while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
+ cpu_relax();
+ cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
+ MDIO_PHY(phy_id));
+ while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
+ cpu_relax();
+ return MDIO_DATA(val);
+}
+
+static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 val)
+{
+ while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
+ cpu_relax();
+ cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
+ MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
+ return 0;
+}
+
+static int cpmac_mdio_reset(struct mii_bus *bus)
+{
+ ar7_device_reset(AR7_RESET_BIT_MDIO);
+ cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
+ MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1));
+ return 0;
+}
+
+static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
+
+static struct mii_bus cpmac_mii = {
+ .name = "cpmac-mii",
+ .read = cpmac_mdio_read,
+ .write = cpmac_mdio_write,
+ .reset = cpmac_mdio_reset,
+ .irq = mii_irqs,
+};
+
+static int cpmac_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr)
+ return -EOPNOTSUPP;
+
+ /* ignore other fields */
+ return 0;
+}
+
+static int cpmac_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+static void cpmac_set_multicast_list(struct net_device *dev)
+{
+ struct dev_mc_list *iter;
+ int i;
+ u8 tmp;
+ u32 mbp, bit, hash[2] = { 0, };
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ mbp = cpmac_read(priv->regs, CPMAC_MBP);
+ if (dev->flags & IFF_PROMISC) {
+ cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
+ MBP_RXPROMISC);
+ } else {
+ cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
+ if (dev->flags & IFF_ALLMULTI) {
+ /* enable all multicast mode */
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
+ } else {
+ /*
+ * cpmac uses some strange mac address hashing
+ * (not crc32)
+ */
+ for (i = 0, iter = dev->mc_list; i < dev->mc_count;
+ i++, iter = iter->next) {
+ bit = 0;
+ tmp = iter->dmi_addr[0];
+ bit ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = iter->dmi_addr[1];
+ bit ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = iter->dmi_addr[2];
+ bit ^= (tmp >> 6) ^ tmp;
+ tmp = iter->dmi_addr[3];
+ bit ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = iter->dmi_addr[4];
+ bit ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = iter->dmi_addr[5];
+ bit ^= (tmp >> 6) ^ tmp;
+ bit &= 0x3f;
+ hash[bit / 32] |= 1 << (bit % 32);
+ }
+
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
+ }
+ }
+}
+
+static struct sk_buff *cpmac_rx_one(struct net_device *dev,
+ struct cpmac_priv *priv,
+ struct cpmac_desc *desc)
+{
+ unsigned long flags;
+ struct sk_buff *skb, *result = NULL;
+
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(dev, desc);
+ cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
+ if (unlikely(!desc->datalen)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: rx: spurious interrupt\n",
+ dev->name);
+ return NULL;
+ }
+
+ skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE);
+ spin_lock_irqsave(&priv->lock, flags);
+ if (likely(skb)) {
+ skb_reserve(skb, 2);
+ skb_put(desc->skb, desc->datalen);
+ desc->skb->protocol = eth_type_trans(desc->skb, dev);
+ desc->skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += desc->datalen;
+ result = desc->skb;
+ dma_unmap_single(&dev->dev, desc->data_mapping, CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&dev->dev, skb->data,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ if (unlikely(netif_msg_pktdata(priv))) {
+ printk(KERN_DEBUG "%s: received packet:\n", dev->name);
+ cpmac_dump_skb(dev, result);
+ }
+ } else {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING
+ "%s: low on skbs, dropping packet\n", dev->name);
+ priv->stats.rx_dropped++;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+
+ return result;
+}
+
+static void cpmac_rx(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ if (unlikely(!priv->rx_head)) {
+ spin_unlock(&priv->lock);
+ return;
+ }
+
+ desc = priv->rx_head;
+
+ while ((desc->dataflags & CPMAC_OWN) == 0) {
+ skb = cpmac_rx_one(dev, priv, desc);
+ if (likely(skb))
+ netif_rx(skb);
+ desc = desc->next;
+ }
+
+ priv->rx_head = desc;
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
+ spin_unlock(&priv->lock);
+}
+
+static int cpmac_poll(struct net_device *dev, int *budget)
+{
+ struct sk_buff *skb;
+ struct cpmac_desc *desc;
+ int received = 0, quota = min(dev->quota, *budget);
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!priv->rx_head)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: rx: polling, but no queue\n",
+ dev->name);
+ netif_rx_complete(dev);
+ return 0;
+ }
+
+ desc = priv->rx_head;
+
+ while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) {
+ skb = cpmac_rx_one(dev, priv, desc);
+ if (likely(skb)) {
+ netif_receive_skb(skb);
+ received++;
+ }
+ desc = desc->next;
+ }
+
+ priv->rx_head = desc;
+ *budget -= received;
+ dev->quota -= received;
+ if (unlikely(netif_msg_rx_status(priv)))
+ printk(KERN_DEBUG "%s: poll processed %d packets\n", dev->name,
+ received);
+ if (desc->dataflags & CPMAC_OWN) {
+ netif_rx_complete(dev);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
+ cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ int channel, len;
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(skb_padto(skb, ETH_ZLEN))) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING"%s: tx: padding failed, dropping\n",
+ dev->name);
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->stats.tx_dropped++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -ENOMEM;
+ }
+
+ len = max(skb->len, ETH_ZLEN);
+ spin_lock_irqsave(&priv->lock, flags);
+ channel = priv->tx_tail++;
+ priv->tx_tail %= CPMAC_TX_RING_SIZE;
+ if (priv->tx_tail == priv->tx_head)
+ netif_stop_queue(dev);
+
+ desc = &priv->desc_ring[channel];
+ if (desc->dataflags & CPMAC_OWN) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: tx dma ring full, dropping\n",
+ dev->name);
+ priv->stats.tx_dropped++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ desc->datalen = len;
+ desc->buflen = len;
+ if (unlikely(netif_msg_tx_queued(priv)))
+ printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
+ skb->len);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(dev, desc);
+ if (unlikely(netif_msg_pktdata(priv)))
+ cpmac_dump_skb(dev, skb);
+ cpmac_write(priv->regs, CPMAC_TX_PTR(channel), (u32)desc->mapping);
+
+ return 0;
+}
+
+static void cpmac_end_xmit(struct net_device *dev, int channel)
+{
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ desc = &priv->desc_ring[channel];
+ cpmac_write(priv->regs, CPMAC_TX_ACK(channel), (u32)desc->mapping);
+ if (likely(desc->skb)) {
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += desc->skb->len;
+ dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(netif_msg_tx_done(priv)))
+ printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
+ desc->skb, desc->skb->len);
+
+ dev_kfree_skb_irq(desc->skb);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ } else
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING
+ "%s: end_xmit: spurious interrupt\n", dev->name);
+ spin_unlock(&priv->lock);
+}
+
+static void cpmac_reset(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ ar7_device_reset(priv->config->reset_bit);
+ cpmac_write(priv->regs, CPMAC_RX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
+ cpmac_write(priv->regs, CPMAC_TX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
+ for (i = 0; i < 8; i++) {
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
+ }
+ cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
+ cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
+}
+
+static inline void cpmac_free_rx_ring(struct net_device *dev)
+{
+ struct cpmac_desc *desc;
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!priv->rx_head))
+ return;
+
+ desc = priv->rx_head;
+
+ for (i = 0; i < rx_ring_size; i++) {
+ desc->buflen = CPMAC_SKB_SIZE;
+ if ((desc->dataflags & CPMAC_OWN) == 0) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: packet dropped\n",
+ dev->name);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(dev, desc);
+ desc->dataflags = CPMAC_OWN;
+ priv->stats.rx_dropped++;
+ }
+ desc = desc->next;
+ }
+}
+
+static irqreturn_t cpmac_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cpmac_priv *priv;
+ u32 status;
+
+ if (!dev)
+ return IRQ_NONE;
+
+ priv = netdev_priv(dev);
+
+ status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
+
+ if (unlikely(netif_msg_intr(priv)))
+ printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
+ status);
+
+ if (status & MAC_INT_TX)
+ cpmac_end_xmit(dev, (status & 7));
+
+ if (status & MAC_INT_RX) {
+ if (disable_napi)
+ cpmac_rx(dev);
+ else {
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1);
+ netif_rx_schedule(dev);
+ }
+ }
+
+ cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
+
+ if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) {
+ if (netif_msg_drv(priv) && net_ratelimit())
+ printk(KERN_ERR "%s: hw error, resetting...\n",
+ dev->name);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_regs(dev);
+ spin_lock(&priv->lock);
+ phy_stop(priv->phy);
+ cpmac_reset(dev);
+ cpmac_free_rx_ring(dev);
+ cpmac_hw_init(dev);
+ spin_unlock(&priv->lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cpmac_tx_timeout(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *desc;
+
+ priv->stats.tx_errors++;
+ desc = &priv->desc_ring[priv->tx_head++];
+ priv->tx_head %= 8;
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
+ if (desc->skb)
+ dev_kfree_skb_any(desc->skb);
+ netif_wake_queue(dev);
+}
+
+static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ if (!(netif_running(dev)))
+ return -EINVAL;
+ if (!priv->phy)
+ return -EINVAL;
+ if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) ||
+ (cmd == SIOCSMIIREG))
+ return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd);
+
+ return -EINVAL;
+}
+
+static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (priv->phy)
+ return phy_ethtool_gset(priv->phy, cmd);
+
+ return -EINVAL;
+}
+
+static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (priv->phy)
+ return phy_ethtool_sset(priv->phy, cmd);
+
+ return -EINVAL;
+}
+
+static void cpmac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "cpmac");
+ strcpy(info->version, "0.0.3");
+ info->fw_version[0] = '\0';
+ sprintf(info->bus_info, "%s", "cpmac");
+ info->regdump_len = 0;
+}
+
+static const struct ethtool_ops cpmac_ethtool_ops = {
+ .get_settings = cpmac_get_settings,
+ .set_settings = cpmac_set_settings,
+ .get_drvinfo = cpmac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static struct net_device_stats *cpmac_stats(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (netif_device_present(dev))
+ return &priv->stats;
+
+ return NULL;
+}
+
+static int cpmac_change_mtu(struct net_device *dev, int mtu)
+{
+ unsigned long flags;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ spinlock_t *lock = &priv->lock;
+
+ if ((mtu < 68) || (mtu > 1500))
+ return -EINVAL;
+
+ spin_lock_irqsave(lock, flags);
+ dev->mtu = mtu;
+ spin_unlock_irqrestore(lock, flags);
+
+ return 0;
+}
+
+static void cpmac_adjust_link(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ int new_state = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->phy->link) {
+ if (priv->phy->duplex != priv->oldduplex) {
+ new_state = 1;
+ priv->oldduplex = priv->phy->duplex;
+ }
+
+ if (priv->phy->speed != priv->oldspeed) {
+ new_state = 1;
+ priv->oldspeed = priv->phy->speed;
+ }
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ netif_schedule(dev);
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv) && net_ratelimit())
+ phy_print_status(priv->phy);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void cpmac_hw_init(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ for (i = 0; i < 8; i++) {
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
+ }
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
+
+ cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
+ MBP_RXMCAST);
+ cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
+ cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xfe);
+ cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
+ for (i = 0; i < 8; i++)
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
+ (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
+ (dev->dev_addr[3] << 24));
+ cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
+ cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
+
+ cpmac_write(priv->regs, CPMAC_RX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
+ cpmac_write(priv->regs, CPMAC_TX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
+ cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
+ cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
+ MAC_FDX);
+
+ priv->phy->state = PHY_CHANGELINK;
+ phy_start(priv->phy);
+}
+
+static int cpmac_open(struct net_device *dev)
+{
+ int i, size, res;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *desc;
+ struct sk_buff *skb;
+
+ priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link,
+ 0, PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(priv->phy)) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: Could not attach to PHY\n",
+ dev->name);
+ return PTR_ERR(priv->phy);
+ }
+
+ if (!request_mem_region(dev->mem_start, dev->mem_end -
+ dev->mem_start, dev->name)) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: failed to request registers\n",
+ dev->name);
+ res = -ENXIO;
+ goto fail_reserve;
+ }
+
+ priv->regs = ioremap(dev->mem_start, dev->mem_end -
+ dev->mem_start);
+ if (!priv->regs) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: failed to remap registers\n",
+ dev->name);
+ res = -ENXIO;
+ goto fail_remap;
+ }
+
+ priv->rx_head = NULL;
+ size = rx_ring_size + CPMAC_TX_RING_SIZE;
+ priv->desc_ring = dma_alloc_coherent(&dev->dev,
+ sizeof(struct cpmac_desc) * size,
+ &priv->dma_ring,
+ GFP_KERNEL);
+ if (!priv->desc_ring) {
+ res = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ priv->rx_head = &priv->desc_ring[CPMAC_TX_RING_SIZE];
+ for (i = 0; i < size; i++)
+ priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
+
+ for (i = 0, desc = &priv->rx_head[i]; i < rx_ring_size; i++, desc++) {
+ skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE);
+ if (unlikely(!skb)) {
+ res = -ENOMEM;
+ goto fail_desc;
+ }
+ skb_reserve(skb, 2);
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&dev->dev, skb->data,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+ desc->next = &priv->rx_head[(i + 1) % rx_ring_size];
+ desc->hw_next = (u32)desc->next->mapping;
+ }
+
+ if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
+ dev->name, dev))) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: failed to obtain irq\n",
+ dev->name);
+ goto fail_irq;
+ }
+
+ cpmac_reset(dev);
+ cpmac_hw_init(dev);
+
+ netif_start_queue(dev);
+ return 0;
+
+fail_irq:
+fail_desc:
+ for (i = 0; i < rx_ring_size; i++) {
+ if (priv->rx_head[i].skb) {
+ kfree_skb(priv->rx_head[i].skb);
+ dma_unmap_single(&dev->dev,
+ priv->rx_head[i].data_mapping,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ }
+ }
+fail_alloc:
+ kfree(priv->desc_ring);
+ iounmap(priv->regs);
+
+fail_remap:
+ release_mem_region(dev->mem_start, dev->mem_end -
+ dev->mem_start);
+
+fail_reserve:
+ phy_disconnect(priv->phy);
+
+ return res;
+}
+
+static int cpmac_stop(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ phy_stop(priv->phy);
+ phy_disconnect(priv->phy);
+ priv->phy = NULL;
+
+ cpmac_reset(dev);
+
+ for (i = 0; i < 8; i++)
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
+ cpmac_write(priv->regs, CPMAC_MBP, 0);
+
+ free_irq(dev->irq, dev);
+ release_mem_region(dev->mem_start, dev->mem_end -
+ dev->mem_start);
+ priv->rx_head = &priv->desc_ring[CPMAC_TX_RING_SIZE];
+ for (i = 0; i < rx_ring_size; i++) {
+ if (priv->rx_head[i].skb) {
+ kfree_skb(priv->rx_head[i].skb);
+ dma_unmap_single(&dev->dev,
+ priv->rx_head[i].data_mapping,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
+ (CPMAC_TX_RING_SIZE + rx_ring_size),
+ priv->desc_ring, priv->dma_ring);
+ return 0;
+}
+
+static int external_switch;
+
+static int __devinit cpmac_probe(struct platform_device *pdev)
+{
+ int i, rc, phy_id;
+ struct resource *res;
+ struct cpmac_priv *priv;
+ struct net_device *dev;
+ struct plat_cpmac_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+
+ for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+ if (!(pdata->phy_mask & (1 << phy_id)))
+ continue;
+ if (!cpmac_mii.phy_map[phy_id])
+ continue;
+ break;
+ }
+
+ if (phy_id == PHY_MAX_ADDR) {
+ if (external_switch || dumb_switch)
+ phy_id = 0;
+ else {
+ printk(KERN_ERR "cpmac: no PHY present\n");
+ return -ENODEV;
+ }
+ }
+
+ dev = alloc_etherdev(sizeof(struct cpmac_priv));
+
+ if (!dev) {
+ printk(KERN_ERR "cpmac: Unable to allocate net_device\n");
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ platform_set_drvdata(pdev, dev);
+ priv = netdev_priv(dev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res) {
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ dev->mem_start = res->start;
+ dev->mem_end = res->end;
+ dev->irq = platform_get_irq_byname(pdev, "irq");
+
+ dev->open = cpmac_open;
+ dev->stop = cpmac_stop;
+ dev->set_config = cpmac_config;
+ dev->hard_start_xmit = cpmac_start_xmit;
+ dev->do_ioctl = cpmac_ioctl;
+ dev->get_stats = cpmac_stats;
+ dev->change_mtu = cpmac_change_mtu;
+ dev->set_mac_address = cpmac_set_mac_address;
+ dev->set_multicast_list = cpmac_set_multicast_list;
+ dev->tx_timeout = cpmac_tx_timeout;
+ dev->ethtool_ops = &cpmac_ethtool_ops;
+ if (!disable_napi) {
+ dev->poll = cpmac_poll;
+ dev->weight = min(rx_ring_size, 64);
+ }
+
+ spin_lock_init(&priv->lock);
+ priv->msg_enable = netif_msg_init(debug_level, 0xff);
+ priv->config = pdata;
+ priv->dev = dev;
+ memcpy(dev->dev_addr, priv->config->dev_addr, sizeof(dev->dev_addr));
+ if (phy_id == 31) {
+ snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT,
+ cpmac_mii.id, phy_id);
+/* cpmac_write(cpmac_mii.priv, CPMAC_MDIO_PHYSEL(0), PHYSEL_LINKSEL
+ | PHYSEL_LINKINT | phy_id);*/
+ } else
+ snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1);
+
+ if ((rc = register_netdev(dev))) {
+ printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
+ dev->name);
+ goto fail;
+ }
+
+ if (netif_msg_probe(priv)) {
+ printk(KERN_INFO
+ "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: ",
+ dev->name, (u32 *)dev->mem_start, dev->irq,
+ priv->phy_name);
+ for (i = 0; i < 6; i++)
+ printk("%02x%s", dev->dev_addr[i], i < 5 ? ":" : ")\n");
+ }
+ return 0;
+
+fail:
+ free_netdev(dev);
+ return rc;
+}
+
+static int __devexit cpmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver cpmac_driver = {
+ .driver.name = "cpmac",
+ .probe = cpmac_probe,
+ .remove = cpmac_remove,
+};
+
+int __devinit cpmac_init(void)
+{
+ u32 mask;
+ int i, res;
+
+ cpmac_mii.priv = ioremap(AR7_REGS_MDIO, 256);
+
+ if (!cpmac_mii.priv) {
+ printk(KERN_ERR "Can't ioremap mdio registers\n");
+ return -ENXIO;
+ }
+
+#warning FIXME: unhardcode gpio&reset bits
+ ar7_gpio_disable(26);
+ ar7_gpio_disable(27);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
+ ar7_device_reset(AR7_RESET_BIT_EPHY);
+
+ cpmac_mii.reset(&cpmac_mii);
+
+ for (i = 0; i < 300000; i++)
+ if ((mask = cpmac_read(cpmac_mii.priv, CPMAC_MDIO_ALIVE)))
+ break;
+ else
+ cpu_relax();
+
+ mask &= 0x7fffffff;
+ if (mask & (mask - 1)) {
+ external_switch = 1;
+ mask = 0;
+ }
+
+ cpmac_mii.phy_mask = ~(mask | 0x80000000);
+
+ res = mdiobus_register(&cpmac_mii);
+ if (res)
+ goto fail_mii;
+
+ res = platform_driver_register(&cpmac_driver);
+ if (res)
+ goto fail_cpmac;
+
+ return 0;
+
+fail_cpmac:
+ mdiobus_unregister(&cpmac_mii);
+
+fail_mii:
+ iounmap(cpmac_mii.priv);
+
+ return res;
+}
+
+void __devexit cpmac_exit(void)
+{
+ platform_driver_unregister(&cpmac_driver);
+ mdiobus_unregister(&cpmac_mii);
+ iounmap(cpmac_mii.priv);
+}
+
+module_init(cpmac_init);
+module_exit(cpmac_exit);
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH][MIPS][7/7] AR7: ethernet
2007-09-20 16:13 ` [PATCH][MIPS][7/7] AR7: ethernet Matteo Croce
@ 2007-09-29 5:39 ` Jeff Garzik
0 siblings, 0 replies; 13+ messages in thread
From: Jeff Garzik @ 2007-09-29 5:39 UTC (permalink / raw)
To: Matteo Croce
Cc: linux-mips, Eugene Konev, netdev, davem, kuznet, pekkas, jmorris,
yoshfuji, kaber, Andrew Morton
Overall, looks pretty clean, good job!
Comments:
1) [major issue] Don't take and release a heavy lock on every single RX
packet.
2) remove net_device_stats from private structure, and use net_device::stats
3) rx_ring_size should not be a module param, since that should be
supported via ethtool
4) cpmac_tx_timeout() doesn't really do anything to alleviate the condition
5) don't set dev->mem_start and dev->mem_end, those are fields that are
going away, and that's not their intended purpose. just store a pointer
to the resource info.
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2007-09-29 5:39 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <200709201728.10866.technoboy85@gmail.com>
2007-09-20 16:13 ` [PATCH][MIPS][7/7] AR7: ethernet Matteo Croce
2007-09-29 5:39 ` Jeff Garzik
[not found] <200709080143.12345.technoboy85@gmail.com>
2007-09-08 0:23 ` Matteo Croce
2007-09-12 16:50 ` Ralf Baechle
2007-09-13 1:42 ` Thiemo Seufer
2007-09-13 11:35 ` Ralf Baechle
[not found] <200708201704.11529.technoboy85@gmail.com>
2007-09-06 15:34 ` Matteo Croce
2007-09-06 22:30 ` Andrew Morton
2007-09-06 23:04 ` Randy Dunlap
2007-09-06 23:21 ` Matteo Croce
2007-09-07 0:41 ` Andrew Morton
2007-09-07 23:04 ` Jeff Garzik
2007-09-07 7:10 ` Geert Uytterhoeven
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).