From: "David H. Lynch Jr." <dhlii@dlasys.net>
To: linuxppc-embedded <linuxppc-embedded@ozlabs.org>, netdev@vger.kernel.org
Subject: [PATCH] Linux Device Driver for Xilinx LL TEMAC 10/100/1000 Ethernet NIC
Date: Tue, 19 Aug 2008 05:34:04 -0400 [thread overview]
Message-ID: <48AA938C.4060309@dlasys.net> (raw)
[-- Attachment #1: Type: text/plain, Size: 8 bytes --]
Pass II
[-- Attachment #2: lltemac --]
[-- Type: text/plain, Size: 45237 bytes --]
Linux Device Driver for Xilinx LL TEMAC 10/100/1000 Ethernet NIC
Original Author Yoshio Kashiwagi
Updated and Maintained by David Lynch
Signed-off-by: David H. Lynch Jr. <dhlii@dlasys.net>
---
drivers/net/Kconfig | 5
drivers/net/Makefile | 1
drivers/net/xps_lltemac.c | 1562 ++++++++++++++++++++++++++++++++++++++++
include/linux/xilinx_devices.h | 2
4 files changed, 1569 insertions(+), 1 deletions(-)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 033e13f..71b4c17 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2332,6 +2332,11 @@ config MV643XX_ETH
Some boards that use the Discovery chipset are the Momenco
Ocelot C and Jaguar ATX and Pegasos II.
+config XPS_LLTEMAC
+ tristate "Xilinx LLTEMAC 10/100/1000 Ethernet MAC driver"
+ help
+ This driver supports the Xilinx 10/100/1000 LLTEMAC found in Virtex 4 FPGAs
+
config QLA3XXX
tristate "QLogic QLA3XXX Network Driver Support"
depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1f09934..9196bab 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -126,6 +126,7 @@ obj-$(CONFIG_AX88796) += ax88796.o
obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
obj-$(CONFIG_PICO_TEMAC) += pico_temac.o
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_XPS_LLTEMAC) += xps_lltemac.o
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
obj-$(CONFIG_PPP) += ppp_generic.o
diff --git a/drivers/net/xps_lltemac.c b/drivers/net/xps_lltemac.c
new file mode 100644
index 0000000..8af4e7a
--- /dev/null
+++ b/drivers/net/xps_lltemac.c
@@ -0,0 +1,1562 @@
+/*======================================================================
+
+ Driver for Xilinx temac ethernet NIC's
+
+ Author: Yoshio Kashiwagi
+ Copyright (c) 2008 Nissin Systems Co.,Ltd.
+
+ Revisons: David H. Lynch Jr. <dhlii@dlasys.net>
+ Copyright (C) 2005-2008 DLA Systems
+
+======================================================================*/
+/* DRV_NAME is for compatibility with existing xilinx ll temac driver
+ * also with of/dtc object names */
+#define DRV_NAME "xilinx_lltemac"
+#define DRV_AUTHOR "Yoshio Kashiwagi"
+#define DRV_EMAIL ""
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <linux/mii.h>
+#include <linux/in.h>
+#include <linux/pci.h>
+
+#include <linux/ip.h>
+#include <linux/tcp.h> /* just needed for sizeof(tcphdr) */
+#include <linux/udp.h> /* needed for sizeof(udphdr) */
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#define MII_ANI 0x10
+#define PHY_NUM 0
+#define PHY_TIMEOUT 10000
+
+#define MII_SSR 0x11
+#define MII_SSR_LINK (1 << 10)
+#define MII_SSR_SPDMASK 0xC000
+#define MII_SSR_SPD1000 (1 << 15)
+#define MII_SSR_SPD100 (1 << 14)
+#define MII_SSR_SPD10 0
+#define MII_SSR_FD (1 << 13)
+
+#define MII_ISR 0x13
+#define MII_REG_MAX 0xff
+#define TEMAC_REG_MAX 0x3ff
+#define PHY_ADDR_INVALID 0xff
+
+/* packet size info */
+#define XTE_MTU 1500 /* max MTU size of Ethernet frame */
+#define XTE_HDR_SIZE 14 /* size of Ethernet header */
+#define XTE_TRL_SIZE 4 /* size of Ethernet trailer (FCS) */
+#define XTE_MAX_FRAME_SIZE (XTE_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
+#define XTE_JUMBO_MTU 9000
+#define XTE_MAX_JUMBO_FRAME_SIZE (XTE_JUMBO_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
+
+/* Configuration options */
+
+/* Accept all incoming packets.
+ * This option defaults to disabled (cleared) */
+#define XTE_OPTION_PROMISC (1 << 0)
+/* Jumbo frame support for Tx & Rx.
+ * This option defaults to disabled (cleared) */
+#define XTE_OPTION_JUMBO (1 << 1)
+/* VLAN Rx & Tx frame support.
+ * This option defaults to disabled (cleared) */
+#define XTE_OPTION_VLAN (1 << 2)
+/* Enable recognition of flow control frames on Rx
+ * This option defaults to enabled (set) */
+#define XTE_OPTION_FLOW_CONTROL (1 << 4)
+/* Strip FCS and PAD from incoming frames.
+ * Note: PAD from VLAN frames is not stripped.
+ * This option defaults to disabled (set) */
+#define XTE_OPTION_FCS_STRIP (1 << 5)
+/* Generate FCS field and add PAD automatically for outgoing frames.
+ * This option defaults to enabled (set) */
+#define XTE_OPTION_FCS_INSERT (1 << 6)
+/* Enable Length/Type error checking for incoming frames. When this option is
+set, the MAC will filter frames that have a mismatched type/length field
+and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
+types of frames are encountered. When this option is cleared, the MAC will
+allow these types of frames to be received.
+This option defaults to enabled (set) */
+#define XTE_OPTION_LENTYPE_ERR (1 << 7)
+/* Enable the transmitter.
+ * This option defaults to enabled (set) */
+#define XTE_OPTION_TXEN (1 << 11)
+/* Enable the receiver
+* This option defaults to enabled (set) */
+#define XTE_OPTION_RXEN (1 << 12)
+
+/* Default options set when device is initialized or reset */
+#define XTE_OPTION_DEFAULTS \
+ (XTE_OPTION_TXEN | \
+ XTE_OPTION_FLOW_CONTROL | \
+ XTE_OPTION_RXEN)
+
+/* XPS_LL_TEMAC SDMA registers definition */
+
+#define TX_NXTDESC_PTR 0x00 /* r */
+#define TX_CURBUF_ADDR 0x04 /* r */
+#define TX_CURBUF_LENGTH 0x08 /* r */
+#define TX_CURDESC_PTR 0x0c /* rw */
+#define TX_TAILDESC_PTR 0x10 /* rw */
+#define TX_CHNL_CTRL 0x14 /* rw */
+/*
+ 0:7 24:31 IRQTimeout
+ 8:15 16:23 IRQCount
+ 16:20 11:15 Reserved
+ 21 10 0
+ 22 9 UseIntOnEnd
+ 23 8 LdIRQCnt
+ 24 7 IRQEn
+ 25:28 3:6 Reserved
+ 29 2 IrqErrEn
+ 30 1 IrqDlyEn
+ 31 0 IrqCoalEn
+*/
+#define CHNL_CTRL_IRQ_IOE (1 << 9)
+#define CHNL_CTRL_IRQ_EN (1 << 7)
+#define CHNL_CTRL_IRQ_ERR_EN (1 << 2)
+#define CHNL_CTRL_IRQ_DLY_EN (1 << 1)
+#define CHNL_CTRL_IRQ_COAL_EN (1 << 0)
+#define TX_IRQ_REG 0x18 /* rw */
+/*
+ 0:7 24:31 DltTmrValue
+ 8:15 16:23 ClscCntrValue
+ 16:17 14:15 Reserved
+ 18:21 10:13 ClscCnt
+ 22:23 8:9 DlyCnt
+ 24:28 3::7 Reserved
+ 29 2 ErrIrq
+ 30 1 DlyIrq
+ 31 0 CoalIrq
+ */
+#define TX_CHNL_STS 0x1c /* r */
+/*
+ 0:9 22:31 Reserved
+ 10 21 TailPErr
+ 11 20 CmpErr
+ 12 19 AddrErr
+ 13 18 NxtPErr
+ 14 17 CurPErr
+ 15 16 BsyWr
+ 16:23 8:15 Reserved
+ 24 7 Error
+ 25 6 IOE
+ 26 5 SOE
+ 27 4 Cmplt
+ 28 3 SOP
+ 29 2 EOP
+ 30 1 EngBusy
+ 31 0 Reserved
+*/
+
+#define RX_NXTDESC_PTR 0x20 /* r */
+#define RX_CURBUF_ADDR 0x24 /* r */
+#define RX_CURBUF_LENGTH 0x28 /* r */
+#define RX_CURDESC_PTR 0x2c /* rw */
+#define RX_TAILDESC_PTR 0x30 /* rw */
+#define RX_CHNL_CTRL 0x34 /* rw */
+/*
+ 0:7 24:31 IRQTimeout
+ 8:15 16:23 IRQCount
+ 16:20 11:15 Reserved
+ 21 10 0
+ 22 9 UseIntOnEnd
+ 23 8 LdIRQCnt
+ 24 7 IRQEn
+ 25:28 3:6 Reserved
+ 29 2 IrqErrEn
+ 30 1 IrqDlyEn
+ 31 0 IrqCoalEn
+ */
+#define RX_IRQ_REG 0x38 /* rw */
+#define IRQ_COAL (1 << 0)
+#define IRQ_DLY (1 << 1)
+#define IRQ_ERR (1 << 2)
+#define IRQ_DMAERR (1 << 7) /* this is not documented ??? */
+/*
+ 0:7 24:31 DltTmrValue
+ 8:15 16:23 ClscCntrValue
+ 16:17 14:15 Reserved
+ 18:21 10:13 ClscCnt
+ 22:23 8:9 DlyCnt
+ 24:28 3::7 Reserved
+*/
+#define RX_CHNL_STS 0x3c /* r */
+#define CHNL_STS_ENGBUSY (1 << 1)
+#define CHNL_STS_EOP (1 << 2)
+#define CHNL_STS_SOP (1 << 3)
+#define CHNL_STS_CMPLT (1 << 4)
+#define CHNL_STS_SOE (1 << 5)
+#define CHNL_STS_IOE (1 << 6)
+#define CHNL_STS_ERR (1 << 7)
+
+#define CHNL_STS_BSYWR (1 << 16)
+#define CHNL_STS_CURPERR (1 << 17)
+#define CHNL_STS_NXTPERR (1 << 18)
+#define CHNL_STS_ADDRERR (1 << 19)
+#define CHNL_STS_CMPERR (1 << 20)
+#define CHNL_STS_TAILERR (1 << 21)
+/*
+ 0:9 22:31 Reserved
+ 10 21 TailPErr
+ 11 20 CmpErr
+ 12 19 AddrErr
+ 13 18 NxtPErr
+ 14 17 CurPErr
+ 15 16 BsyWr
+ 16:23 8:15 Reserved
+ 24 7 Error
+ 25 6 IOE
+ 26 5 SOE
+ 27 4 Cmplt
+ 28 3 SOP
+ 29 2 EOP
+ 30 1 EngBusy
+ 31 0 Reserved
+*/
+
+#define DMA_CONTROL_REG 0x40 /* rw */
+#define DMA_CONTROL_RST (1 << 0)
+
+/* XPS_LL_TEMAC direct registers definition */
+
+#define XTE_RAF0_OFFSET 0x00
+#define RAF0_RST (1 << 0)
+#define RAF0_MCSTREJ (1 << 1)
+#define RAF0_BCSTREJ (1 << 2)
+#define XTE_TPF0_OFFSET 0x04
+#define XTE_IFGP0_OFFSET 0x08
+#define XTE_ISR0_OFFSET 0x0c
+#define ISR0_HARDACSCMPLT (1 << 0)
+#define ISR0_AUTONEG (1 << 1)
+#define ISR0_RXCMPLT (1 << 2)
+#define ISR0_RXREJ (1 << 3)
+#define ISR0_RXFIFOOVR (1 << 4)
+#define ISR0_TXCMPLT (1 << 5)
+#define ISR0_RXDCMLCK (1 << 6)
+
+#define XTE_IPR0_OFFSET 0x10
+#define XTE_IER0_OFFSET 0x14
+
+#define XTE_MSW0_OFFSET 0x20
+#define XTE_LSW0_OFFSET 0x24
+#define XTE_CTL0_OFFSET 0x28
+#define XTE_RDY0_OFFSET 0x2c
+
+#define XTE_RSE_MIIM_RR_MASK 0x0002
+#define XTE_RSE_MIIM_WR_MASK 0x0004
+#define XTE_RSE_CFG_RR_MASK 0x0020
+#define XTE_RSE_CFG_WR_MASK 0x0040
+
+/* XPS_LL_TEMAC indirect registers offset definition */
+
+/* Rx configuration word 0 */
+#define XTE_RXC0_OFFSET 0x00000200
+/* Rx configuration word 1 */
+#define XTE_RXC1_OFFSET 0x00000240
+/* Receiver reset */
+#define XTE_RXC1_RXRST_MASK (1 << 31)
+/* Jumbo frame enable */
+#define XTE_RXC1_RXJMBO_MASK (1 << 30)
+/* FCS not stripped */
+#define XTE_RXC1_RXFCS_MASK (1 << 29)
+/* Receiver enable */
+#define XTE_RXC1_RXEN_MASK (1 << 28)
+/* VLAN enable */
+#define XTE_RXC1_RXVLAN_MASK (1 << 27)
+/* Half duplex */
+#define XTE_RXC1_RXHD_MASK (1 << 26)
+/* Length/type check disable */
+#define XTE_RXC1_RXLT_MASK (1 << 25)
+
+/* Tx configuration */
+#define XTE_TXC_OFFSET 0x00000280
+/* Transmitter reset */
+#define XTE_TXC_TXRST_MASK (1 << 31)
+/* Jumbo frame enable */
+#define XTE_TXC_TXJMBO_MASK (1 << 30)
+/* Generate FCS */
+#define XTE_TXC_TXFCS_MASK (1 << 29)
+/* Transmitter enable */
+#define XTE_TXC_TXEN_MASK (1 << 28)
+/* VLAN enable */
+#define XTE_TXC_TXVLAN_MASK (1 << 27)
+/* Half duplex */
+#define XTE_TXC_TXHD_MASK (1 << 26)
+/* Flow control configuration */
+#define XTE_FCC_OFFSET 0x000002C0
+/* Rx flow control enable */
+#define XTE_FCC_RXFLO_MASK (1 << 29)
+/* Tx flow control enable */
+#define XTE_FCC_TXFLO_MASK (1 << 30)
+
+/* EMAC configuration */
+#define XTE_EMCFG_OFFSET 0x00000300
+/* Link speed */
+#define XTE_EMCFG_LINKSPD_MASK 0xC0000000
+/* Host interface enable */
+#define XTE_EMCFG_HOSTEN_MASK (1 << 26)
+/* XTE_EMCFG_LINKSPD_MASK for 10 Mbit */
+#define XTE_EMCFG_LINKSPD_10 0x00000000
+/* XTE_EMCFG_LINKSPD_MASK for 100 Mbit */
+#define XTE_EMCFG_LINKSPD_100 (1 << 30)
+/* XTE_EMCFG_LINKSPD_MASK for 1000 Mbit */
+#define XTE_EMCFG_LINKSPD_1000 (1 << 31)
+
+/* RGMII/SGMII configuration */
+#define XTE_GMIC_OFFSET 0x00000320
+/* Management configuration */
+#define XTE_MC_OFFSET 0x00000340
+/* MII management enable */
+#define XTE_MC_MDIO_MASK (1 << 6)
+/* 100 MHz host clock */
+#define XTE_MDIO_CLOCK_DIV_100MHz 0x28
+/* Default MDIO clock divisor */
+#define XTE_MDIO_DIV_DFT 29
+/* Unicast address word 0 */
+#define XTE_UAW0_OFFSET 0x00000380
+/* Unicast address word 1 */
+#define XTE_UAW1_OFFSET 0x00000384
+
+/* Multicast address word 0 */
+#define XTE_MAW0_OFFSET 0x00000388
+/* Multicast address word 1 */
+#define XTE_MAW1_OFFSET 0x0000038C
+/* Promisciuous mode */
+#define XTE_AFM_OFFSET 0x00000390
+/* Promiscuous mode enable */
+#define XTE_AFM_EPPRM_MASK (1 << 31)
+
+/* Interrupt Request status */
+#define XTE_TIS_OFFSET 0x000003A0
+#define TIS_FRIS (1 << 0)
+#define TIS_MRIS (1 << 1)
+#define TIS_MWIS (1 << 2)
+#define TIS_ARIS (1 << 3)
+#define TIS_AWIS (1 << 4)
+#define TIS_CRIS (1 << 5)
+#define TIS_CWIS (1 << 6)
+/* Interrupt Request enable */
+#define XTE_TIE_OFFSET 0x000003A4
+
+/** MII Mamagement Control register (MGTCR) */
+
+/* MII data */
+#define XTE_MGTDR_OFFSET 0x000003B0
+/* MII control */
+#define XTE_MIIMAI_OFFSET 0x000003B4
+
+#define CNTLREG_WRITE_ENABLE_MASK 0x8000
+#define CNTLREG_EMAC1SEL_MASK 0x0400
+#define CNTLREG_ADDRESSCODE_MASK 0x03ff
+
+/* CDMAC descriptor status bit definitions */
+
+#define STS_CTRL_APP0_ERR (1 << 31)
+#define STS_CTRL_APP0_IRQONEND (1 << 30)
+/* undoccumented */
+#define STS_CTRL_APP0_STOPONEND (1 << 29)
+#define STS_CTRL_APP0_CMPLT (1 << 28)
+#define STS_CTRL_APP0_SOP (1 << 27)
+#define STS_CTRL_APP0_EOP (1 << 26)
+#define STS_CTRL_APP0_ENGBUSY (1 << 25)
+/* undocumented */
+#define STS_CTRL_APP0_ENGRST (1 << 24)
+
+#define TX_CONTROL_CALC_CSUM_MASK 1
+
+#define XTE_ALIGN 32
+#define BUFFER_ALIGN(adr) ((XTE_ALIGN - ((u32) adr)) % XTE_ALIGN)
+
+#define MULTICAST_CAM_TABLE_NUM 4
+
+#define TX_BD_NUM 64
+#define RX_BD_NUM 128
+
+#define XILINX_GSRD3_NAPI
+
+
+/* TX/RX CURDESC_PTR points to first descriptor */
+/* TX/RX TAILDESC_PTR points to last descriptor in linked list */
+
+struct cdmac_bd {
+ struct cdmac_bd *next;
+ unsigned char *phys;
+ u32 len;
+ u32 app0;
+ u32 app1; /* TX start << 16 | insert */
+ u32 app2; /* TX csum */
+ u32 app3; /* unused ? */
+ u32 app4; /* skb for TX length for RX */
+} ;
+/* APP0 bits
+ 0 Error
+ 1 IrqOnEnd generate an interrupt at completion of DMA op
+ 2 reserved
+ 3 completed Current descriptor completed
+ 4 SOP TX - marks first desc/ RX marks first desct
+ 5 EOP TX marks last desc/RX marks last desc
+ 6 EngBusy DMA is processing
+ 7 reserved
+ 8:31 application specific
+ */
+
+struct temac_region {
+ void __iomem *addr;
+ void __iomem *base;
+ unsigned long len;
+};
+
+struct temac_local {
+ /* Statistics for this device */
+ struct net_device_stats stats;
+ struct net_device *dev;
+ struct temac_region regs;
+ struct temac_region sdma;
+ int tx_irq;
+ int rx_irq;
+
+ int emac_num;
+ u16 phy_addr;
+ /* Speed of link 10/100/1000 */
+ int LinkSpeed;
+ /* Current options word */
+ u32 options;
+ spinlock_t lock;
+ spinlock_t rx_lock;
+ struct cdmac_bd *tx_bd_v;
+ struct cdmac_bd *tx_bd_p;
+ struct cdmac_bd *rx_bd_v;
+ struct cdmac_bd *rx_bd_p;
+ int tx_bd_ci;
+ int tx_bd_next;
+ int tx_bd_tail;
+ int rx_bd_ci;
+ struct sk_buff **rx_skb;
+};
+
+
+static u32
+tior(struct net_device *ndev, int offset)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ return in_be32((u32 *)(lp->regs.addr + offset));
+}
+
+static void
+tiow(struct net_device *ndev, int offset, u32 value)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ out_be32((u32 *) (lp->regs.addr + offset), value);
+}
+
+static u32
+tio_setclr(struct net_device *ndev, u32 reg_num, u32 val, int flg)
+{
+ u32 Reg = tior(ndev, reg_num) & ~val;
+ if (flg)
+ Reg |= val;
+ tiow(ndev, reg_num, Reg);
+ return 0;
+}
+
+static u32
+sd_ior(struct net_device *ndev, int offset)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ return in_be32((u32 *)(lp->sdma.addr + offset));
+}
+
+static void
+sd_iow(struct net_device *ndev, int offset, u32 value)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ out_be32((u32 *) (lp->sdma.addr + offset), value);
+}
+
+static unsigned int
+mdio_read(struct net_device *ndev, int phy_id, int reg_num)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ u32 timeout = PHY_TIMEOUT;
+ u32 rv = 0;
+ unsigned long flags;
+
+ if ((reg_num > MII_REG_MAX) ||
+ (phy_id == PHY_ADDR_INVALID)) {
+ dev_err(&ndev->dev,
+ "mdio_read(%x, %x) invalid reg_num or invalid phy_id\n",
+ reg_num, phy_id);
+ return -1;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ tiow(ndev, XTE_LSW0_OFFSET,
+ ((phy_id << 5) | (reg_num)));
+ tiow(ndev, XTE_CTL0_OFFSET,
+ XTE_MIIMAI_OFFSET | (lp->emac_num << 10));
+ while (!(tior(ndev, XTE_RDY0_OFFSET) & XTE_RSE_MIIM_RR_MASK)) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev, "read_MII busy timeout!!\n");
+ return -1;
+ }
+ }
+ rv = tior(ndev, XTE_LSW0_OFFSET);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return rv;
+}
+
+static void
+mdio_write(struct net_device *ndev, int phy_id, int reg_num, int reg_val)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ u32 timeout = PHY_TIMEOUT, status;
+ unsigned long flags;
+
+ if ((reg_num > MII_REG_MAX) ||
+ (phy_id == PHY_ADDR_INVALID)) {
+ dev_err(&ndev->dev,
+ "mdio_write(%x, %x)invalid reg_num or invalid phy_id\n",
+ reg_num, phy_id);
+ return -1;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ tiow(ndev, XTE_LSW0_OFFSET,
+ reg_val);
+ tiow(ndev, XTE_CTL0_OFFSET,
+ CNTLREG_WRITE_ENABLE_MASK | XTE_MGTDR_OFFSET);
+ tiow(ndev, XTE_LSW0_OFFSET,
+ ((phy_id << 5) | (reg_num)));
+ tiow(ndev, XTE_CTL0_OFFSET,
+ CNTLREG_WRITE_ENABLE_MASK
+ | XTE_MIIMAI_OFFSET
+ | (lp->emac_num << 10));
+ while (!(status = tior(ndev, XTE_RDY0_OFFSET) & XTE_RSE_MIIM_WR_MASK)) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev, "write_MII busy timeout!!\n");
+ return ;
+ }
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static u32
+emac_cfg_read(struct net_device *ndev, u16 reg_num)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ u32 timeout = PHY_TIMEOUT;
+
+ if (reg_num > TEMAC_REG_MAX) {
+ dev_err(&ndev->dev,
+ "emac_cfg_read(%x) invalid reg_num\n",
+ reg_num);
+ return -1;
+ }
+
+ tiow(ndev, XTE_CTL0_OFFSET, (lp->emac_num << 10) | reg_num);
+ while (!(tior(ndev, XTE_RDY0_OFFSET) & XTE_RSE_CFG_RR_MASK)) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev, "read temac busy timeout!!\n");
+ return -1;
+ }
+ }
+ return (u32) tior(ndev, XTE_LSW0_OFFSET);
+
+}
+
+static void
+emac_cfg_write(struct net_device *ndev, u32 reg_num, u32 val)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ u32 timeout = PHY_TIMEOUT;
+
+ if (reg_num > TEMAC_REG_MAX) {
+ dev_err(&ndev->dev,
+ "emac_cfg_write(%x) invalid reg_num\n",
+ reg_num);
+ return -1;
+ }
+
+ tiow(ndev, XTE_LSW0_OFFSET, val);
+ tiow(ndev,
+ XTE_CTL0_OFFSET,
+ (CNTLREG_WRITE_ENABLE_MASK
+ | (lp->emac_num << 10)
+ | reg_num));
+ while (!(tior(ndev, XTE_RDY0_OFFSET) & XTE_RSE_CFG_WR_MASK)) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev, "write temac busy timeout!!\n");
+ return ;
+ }
+ }
+}
+
+static u32
+emac_cfg_setclr(struct net_device *ndev, u32 reg_num, u32 val, int flg)
+{
+ u32 Reg;
+ if (reg_num > TEMAC_REG_MAX) {
+ dev_err(&ndev->dev,
+ "emac_cfg_setclr(%x) invalid reg_num\n",
+ reg_num);
+ return -1;
+ }
+
+ Reg = emac_cfg_read(ndev, reg_num) & ~val;
+ if (flg)
+ Reg |= val;
+ emac_cfg_write(ndev, reg_num, Reg);
+ return 0;
+}
+
+static int
+temac_set_mac_address(struct net_device *ndev, void *address)
+{
+ if (address)
+ memcpy(ndev->dev_addr, address, ETH_ALEN);
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ random_ether_addr(ndev->dev_addr);
+
+ /* set up unicast MAC address filter set its mac address */
+ emac_cfg_write(ndev, XTE_UAW0_OFFSET,
+ ((ndev->dev_addr[0]) |
+ (ndev->dev_addr[1] << 8) |
+ (ndev->dev_addr[2] << 16) |
+ (ndev->dev_addr[3] << 24)));
+ /* There are reserved bits in EUAW1
+ * so don't affect them Set MAC bits [47:32] in EUAW1 */
+ emac_cfg_write(ndev, XTE_UAW1_OFFSET,
+ (ndev->dev_addr[4] & 0x000000ff) |
+ (ndev->dev_addr[5] << 8));
+
+ return 0;
+}
+
+static void
+temac_set_multicast_list(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ u32 multi_addr_msw, multi_addr_lsw;
+ int i;
+
+ spin_lock(&lp->lock);
+
+ if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)
+ || ndev->mc_count > MULTICAST_CAM_TABLE_NUM) {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. If it was a promisc request the
+ * flag is already set. If not we assert it.
+ */
+ ndev->flags |= IFF_PROMISC;
+ dev_info(&ndev->dev, "%s: Promiscuous mode enabled.\n",
+ ndev->name);
+ emac_cfg_write(ndev, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
+ } else if (ndev->mc_count) {
+ struct dev_mc_list *mclist;
+ for (i = 0, mclist = ndev->mc_list;
+ mclist && i < ndev->mc_count;
+ i++, mclist = mclist->next) {
+
+ if (i >= MULTICAST_CAM_TABLE_NUM)
+ break;
+ multi_addr_msw = ((mclist->dmi_addr[3] << 24)
+ | (mclist->dmi_addr[2] << 16)
+ | (mclist->dmi_addr[1] << 8)
+ | mclist->dmi_addr[0]);
+ emac_cfg_write(ndev, XTE_MAW0_OFFSET, multi_addr_msw);
+ multi_addr_lsw = ((mclist->dmi_addr[5] << 8)
+ | mclist->dmi_addr[4]);
+ multi_addr_lsw |= (i << 16);
+ emac_cfg_write(ndev, XTE_MAW1_OFFSET, multi_addr_lsw);
+ }
+ } else {
+ dev_info(&ndev->dev, "%s: Promiscuous mode disabled.\n",
+ ndev->name);
+ emac_cfg_write(ndev,
+ XTE_AFM_OFFSET,
+ emac_cfg_read(ndev, XTE_AFM_OFFSET)
+ & ~XTE_AFM_EPPRM_MASK);
+ emac_cfg_write(ndev, XTE_MAW0_OFFSET, 0);
+ emac_cfg_write(ndev, XTE_MAW1_OFFSET, 0);
+ }
+ spin_unlock(&lp->lock);
+}
+
+static void
+temac_phy_init(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ unsigned int ret, Reg;
+ int ii;
+
+ /* Set default MDIO divisor */
+ /* Set up MII management registers to write to PHY */
+ emac_cfg_write(ndev,
+ XTE_MC_OFFSET,
+ XTE_MC_MDIO_MASK | XTE_MDIO_DIV_DFT);
+
+ /*
+ Set A-N Advertisement Regs for Full Duplex modes ONLY
+ */
+ mdio_write(ndev,
+ PHY_NUM,
+ MII_ADVERTISE,
+ mdio_read(ndev,
+ PHY_NUM,
+ MII_ADVERTISE)
+ | ADVERTISE_10FULL
+ | ADVERTISE_100FULL
+ | ADVERTISE_CSMA);
+ mdio_write(ndev, PHY_NUM, MII_CTRL1000, ADVERTISE_1000FULL);
+
+ /*
+ Soft reset the PHY
+ */
+ mdio_write(ndev,
+ PHY_NUM,
+ MII_BMCR,
+ mdio_read(ndev, PHY_NUM, MII_BMCR)
+ | BMCR_RESET
+ | BMCR_ANENABLE
+ | BMCR_ANRESTART);
+
+ /* Wait for a PHY Link (auto-negotiation to complete)... */
+ ret = mdio_read(ndev, PHY_NUM, MII_BMSR);
+ ii = 64;
+ while (((ret & BMSR_LSTATUS) != BMSR_LSTATUS) && ii--) {
+ mdelay(500);
+ ret = mdio_read(ndev, PHY_NUM, MII_BMSR);
+ }
+ ret = mdio_read(ndev, PHY_NUM, MII_SSR);
+
+ Reg = emac_cfg_read(ndev, XTE_EMCFG_OFFSET) & ~XTE_EMCFG_LINKSPD_MASK;
+ if (ret & MII_SSR_LINK) {
+ switch (ret & MII_SSR_SPDMASK) {
+ case MII_SSR_SPD1000: /* 1000Base-T */
+ lp->LinkSpeed = 1000;
+ emac_cfg_write(ndev,
+ XTE_EMCFG_OFFSET,
+ Reg | (u32) XTE_EMCFG_LINKSPD_1000);
+ break;
+ case MII_SSR_SPD100: /* 100Base-T */
+ lp->LinkSpeed = 100;
+ emac_cfg_write(ndev,
+ XTE_EMCFG_OFFSET,
+ Reg | XTE_EMCFG_LINKSPD_100);
+ break;
+ case MII_SSR_SPD10: /* 10Base-T */
+ lp->LinkSpeed = 10;
+ break;
+ };
+ if ((ret & MII_SSR_FD) == 0x0) {
+ /* set up Tx/Rx config reg for half duplex */
+ ret = emac_cfg_read(ndev, XTE_TXC_OFFSET);
+ emac_cfg_write(ndev,
+ XTE_TXC_OFFSET,
+ ret | XTE_TXC_TXHD_MASK);
+ ret = emac_cfg_read(ndev, XTE_RXC1_OFFSET);
+ emac_cfg_write(ndev,
+ XTE_RXC1_OFFSET,
+ ret | XTE_RXC1_RXHD_MASK);
+ }
+ }
+}
+
+/* -----------------------------------------------------------------------------
+----------------------------------------------------------------------------- */
+static int
+temac_bd_init(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int ii;
+
+ lp->rx_skb = kzalloc(sizeof(struct sk_buff)*RX_BD_NUM, GFP_KERNEL);
+ /* allocate the tx and rx ring buffer descriptors. */
+ /* returns a virtual addres and a physical address. */
+ lp->tx_bd_v = dma_alloc_coherent(NULL,
+ sizeof(struct cdmac_bd) * TX_BD_NUM,
+ (dma_addr_t *)&lp->tx_bd_p,
+ GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(NULL,
+ sizeof(struct cdmac_bd) * RX_BD_NUM,
+ (dma_addr_t *)&lp->rx_bd_p,
+ GFP_KERNEL);
+
+ for (ii = 0; ii < TX_BD_NUM; ii++) {
+ memset((char *)&lp->tx_bd_v[ii], 0, sizeof(struct cdmac_bd));
+ if (ii == (TX_BD_NUM - 1))
+ lp->tx_bd_v[ii].next = &lp->tx_bd_p[0];
+ else
+ lp->tx_bd_v[ii].next = &lp->tx_bd_p[ii + 1];
+
+ }
+ for (ii = 0; ii < RX_BD_NUM; ii++) {
+ memset((char *)&lp->rx_bd_v[ii], 0, sizeof(struct cdmac_bd));
+ if (ii == (RX_BD_NUM - 1))
+ lp->rx_bd_v[ii].next = &lp->rx_bd_p[0];
+ else
+ lp->rx_bd_v[ii].next = &lp->rx_bd_p[ii + 1];
+
+ skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
+ + XTE_ALIGN, GFP_ATOMIC);
+ if (skb == 0) {
+ dev_err(&ndev->dev, "alloc_skb error %d\n", ii);
+ return -1;
+ }
+ lp->rx_skb[ii] = skb;
+ skb_reserve(skb, BUFFER_ALIGN(skb->data));
+ /* returns physical address of skb->data */
+ lp->rx_bd_v[ii].phys = (unsigned char *)pci_map_single(NULL,
+ skb->data,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ PCI_DMA_FROMDEVICE);
+ lp->rx_bd_v[ii].len = XTE_MAX_JUMBO_FRAME_SIZE;
+ lp->rx_bd_v[ii].app0 = STS_CTRL_APP0_IRQONEND;
+ }
+
+ sd_iow(ndev,
+ TX_CHNL_CTRL,
+ 0x10220400
+ | CHNL_CTRL_IRQ_EN
+ | CHNL_CTRL_IRQ_DLY_EN
+ | CHNL_CTRL_IRQ_COAL_EN);
+ /* 0x10220483 */
+ /* 0x00100483 */
+ sd_iow(ndev,
+ RX_CHNL_CTRL,
+ 0xff010000
+ | CHNL_CTRL_IRQ_EN
+ | CHNL_CTRL_IRQ_DLY_EN
+ | CHNL_CTRL_IRQ_COAL_EN
+ | CHNL_CTRL_IRQ_IOE);
+ /* 0xff010283 */
+
+ sd_iow(ndev, RX_CURDESC_PTR, (uintptr_t)&lp->rx_bd_p[0]);
+ sd_iow(ndev, RX_TAILDESC_PTR, (uintptr_t)&lp->rx_bd_p[RX_BD_NUM - 1]);
+
+ return 0;
+}
+
+struct temac_option {
+ int flg;
+ u32 opt;
+ u32 reg;
+ u32 m_or;
+ u32 m_and;
+
+};
+
+struct temac_option temac_options[] = {
+ /* Turn on jumbo packet support for both Rx and Tx */
+ { 0,
+ XTE_OPTION_JUMBO,
+ XTE_TXC_OFFSET,
+ XTE_TXC_TXJMBO_MASK,
+ 0
+ },
+ { 0,
+ XTE_OPTION_JUMBO,
+ XTE_RXC1_OFFSET,
+ XTE_RXC1_RXJMBO_MASK,
+ 0
+ },
+ /* Turn on VLAN packet support for both Rx and Tx */
+ { 0,
+ XTE_OPTION_VLAN,
+ XTE_TXC_OFFSET,
+ XTE_TXC_TXVLAN_MASK,
+ 0
+ },
+ { 0,
+ XTE_OPTION_VLAN,
+ XTE_RXC1_OFFSET,
+ XTE_RXC1_RXVLAN_MASK,
+ 0
+ },
+ /* Turn on FCS stripping on receive packets */
+ { 0,
+ XTE_OPTION_FCS_STRIP,
+ XTE_RXC1_OFFSET,
+ XTE_RXC1_RXFCS_MASK,
+ 0
+ },
+ /* Turn on FCS insertion on transmit packets */
+ { 0,
+ XTE_OPTION_FCS_INSERT,
+ XTE_TXC_OFFSET,
+ XTE_TXC_TXFCS_MASK,
+ 0
+ },
+ /* Turn on length/type field checking on receive packets */
+ { 0,
+ XTE_OPTION_LENTYPE_ERR,
+ XTE_RXC1_OFFSET,
+ XTE_RXC1_RXLT_MASK,
+ 0
+ },
+ /* Turn on flow control */
+ { 0,
+ XTE_OPTION_FLOW_CONTROL,
+ XTE_FCC_OFFSET,
+ XTE_FCC_RXFLO_MASK,
+ 0
+ },
+ /* Turn on flow control */
+ { 0,
+ XTE_OPTION_FLOW_CONTROL,
+ XTE_FCC_OFFSET,
+ XTE_FCC_TXFLO_MASK,
+ 0
+ },
+ /* Turn on promiscuous frame filtering (all frames are received ) */
+ { 0,
+ XTE_OPTION_PROMISC,
+ XTE_AFM_OFFSET,
+ XTE_AFM_EPPRM_MASK,
+ 0
+ },
+ /* Enable transmitter if not already enabled */
+ { 0,
+ XTE_OPTION_TXEN,
+ XTE_TXC_OFFSET,
+ XTE_TXC_TXEN_MASK,
+ 0
+ },
+ /* Enable receiver? */
+ { 0,
+ XTE_OPTION_RXEN,
+ XTE_RXC1_OFFSET,
+ XTE_RXC1_RXEN_MASK,
+ 0
+ },
+ { 0,
+ 0,
+ 0,
+ 0,
+ 0
+ }
+};
+
+static u32
+temac_setoptions(struct net_device *ndev, u32 Options) {
+ struct temac_local *lp = netdev_priv(ndev);
+ struct temac_option *tp = &temac_options[0];
+
+ while (tp->opt) {
+ if (tp->flg)
+ tio_setclr(ndev,
+ tp->reg,
+ tp->m_or,
+ (Options & tp->opt));
+ else
+ emac_cfg_setclr(ndev,
+ tp->reg,
+ tp->m_or,
+ (Options & tp->opt));
+ tp++;
+ }
+ lp->options |= Options;
+
+ return (0);
+}
+
+/* Initilize temac */
+static void
+temac_device_reset(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ u32 timeout = 1000;
+
+ /* Perform a software reset */
+
+ /* 0x300 host enable bit ? */
+ /* reset PHY through control register ?:1 */
+
+ /* Reset the device */
+ emac_cfg_write(ndev, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
+ /* Wait for the receiver to finish reset */
+ while (emac_cfg_read(ndev, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev,
+ "temac_device_reset RX reset timeout!!\n");
+ break;
+ }
+ }
+
+ emac_cfg_write(ndev, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
+ /* Wait for the transmitter to finish reset */
+ timeout = 1000;
+ while (emac_cfg_read(ndev, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev,
+ "temac_device_reset TX reset timeout!!\n");
+ break;
+ }
+ }
+
+ /* Disable the receiver */
+ emac_cfg_write(ndev,
+ XTE_RXC1_OFFSET,
+ emac_cfg_read(ndev, XTE_RXC1_OFFSET)
+ & ~XTE_RXC1_RXEN_MASK);
+
+ /* reset */
+ tiow(ndev, XTE_RAF0_OFFSET, 1);
+ /* wait for reset */
+ timeout = 1000;
+ while (tior(ndev, XTE_RAF0_OFFSET) & 1) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev,
+ "temac_device_reset RAF reset timeout!!\n");
+ break;
+ }
+ }
+
+ /* ISR0/IER0/IPR0 bits */
+ /* b1 autoneg complete */
+ /* b2 receive complete */
+ /* b5 transmit complete */
+ /* b0 = interrupts from TIS/TIE registers */
+
+
+ /* Reset */
+ sd_iow(ndev, DMA_CONTROL_REG, DMA_CONTROL_RST);
+ timeout = 1000;
+ while (sd_ior(ndev, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev,
+ "temac_device_reset DMA reset timeout!!\n");
+ break;
+ }
+ }
+
+ dev_info(&ndev->dev,
+ "%s: Xilinx Embedded Tri-Mode Ethernet MAC %s %s\n",
+ ndev->name,
+ __DATE__,
+ __TIME__);
+ dev_info(&ndev->dev, "temac %08x sdma %08x\n",
+ lp->regs.addr,
+ lp->sdma.addr);
+
+ temac_bd_init(ndev);
+
+ emac_cfg_write(ndev, XTE_RXC0_OFFSET, 0);
+ emac_cfg_write(ndev, XTE_RXC1_OFFSET, 0);
+ emac_cfg_write(ndev, XTE_TXC_OFFSET, 0);
+ emac_cfg_write(ndev, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
+
+ /* Sync default options with HW
+ * but leave receiver and transmitter disabled. */
+ temac_setoptions(ndev,
+ lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
+
+ temac_phy_init(ndev);
+
+ temac_set_mac_address(ndev, 0);
+ /* Set address filter table */
+ temac_set_multicast_list(ndev);
+ if (temac_setoptions(ndev, lp->options))
+ dev_err(&ndev->dev, "Error setting TEMAC options\n");
+
+ /* Init Driver variable */
+ ndev->trans_start = 0;
+ spin_lock_init(&lp->lock);
+ spin_lock_init(&lp->rx_lock);
+}
+
+static void
+temac_hard_start_xmit_done(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct cdmac_bd *cur_p;
+ unsigned int stat = 0;
+
+ cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ stat = cur_p->app0;
+
+ while (stat & STS_CTRL_APP0_CMPLT) {
+ pci_unmap_single(NULL,
+ (unsigned long)cur_p->phys,
+ cur_p->len,
+ PCI_DMA_TODEVICE);
+ if (cur_p->app4)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
+ cur_p->app0 = 0;
+
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += cur_p->len;
+
+ lp->tx_bd_ci++;
+ if (lp->tx_bd_ci >= TX_BD_NUM)
+ lp->tx_bd_ci = 0;
+
+ cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ stat = cur_p->app0;
+ }
+
+ netif_wake_queue(ndev);
+}
+
+static int
+temac_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct cdmac_bd *cur_p, *start_p, *tail_p;
+ int ii;
+ unsigned long num_frag;
+ skb_frag_t *frag;
+
+ num_frag = skb_shinfo(skb)->nr_frags;
+ frag = &skb_shinfo(skb)->frags[0];
+ start_p = &lp->tx_bd_p[lp->tx_bd_tail];
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+
+ if (cur_p->app0 & STS_CTRL_APP0_CMPLT) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ cur_p->app0 = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+ int length = 0, start, insert = 0, headlen;
+
+ switch (ip->protocol) {
+ case IPPROTO_TCP:
+ start = sizeof(struct iphdr) + ETH_HLEN;
+ insert = sizeof(struct iphdr) + ETH_HLEN + 16;
+ length = ip->tot_len - sizeof(struct iphdr);
+ headlen = ETH_HLEN
+ + sizeof(struct iphdr)
+ + sizeof(struct tcphdr);
+ break;
+ case IPPROTO_UDP:
+ start = sizeof(struct iphdr) + ETH_HLEN;
+ insert = sizeof(struct iphdr) + ETH_HLEN + 6;
+ length = ip->tot_len - sizeof(struct iphdr);
+ headlen = ETH_HLEN
+ + sizeof(struct iphdr)
+ + sizeof(struct udphdr);
+ break;
+ default:
+ break;
+ }
+ cur_p->app1 = ((start << 16) | insert);
+ cur_p->app2 = csum_tcpudp_magic(ip->saddr,
+ ip->daddr,
+ length,
+ ip->protocol,
+ 0);
+ skb->data[insert] = 0;
+ skb->data[insert + 1] = 0;
+ }
+ cur_p->app0 |= STS_CTRL_APP0_SOP;
+ cur_p->len = skb_headlen(skb);
+ cur_p->phys = (unsigned char *)pci_map_single(NULL,
+ skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ cur_p->app4 = (unsigned long)skb;
+
+ for (ii = 0; ii < num_frag; ii++) {
+ lp->tx_bd_tail++;
+ if (lp->tx_bd_tail >= TX_BD_NUM)
+ lp->tx_bd_tail = 0;
+
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ cur_p->phys = (unsigned char *)pci_map_single(NULL,
+ (void *)page_address(frag->page)
+ + frag->page_offset,
+ frag->size,
+ PCI_DMA_TODEVICE);
+ cur_p->len = frag->size;
+ cur_p->app0 = 0;
+ frag++;
+ }
+ cur_p->app0 |= STS_CTRL_APP0_EOP;
+
+ tail_p = &lp->tx_bd_p[lp->tx_bd_tail];
+ lp->tx_bd_tail++;
+ if (lp->tx_bd_tail >= TX_BD_NUM)
+ lp->tx_bd_tail = 0;
+
+ /* EngBusy ? */
+ if (!(sd_ior(ndev, TX_CHNL_STS) & CHNL_STS_ENGBUSY)) {
+ sd_iow(ndev, TX_CURDESC_PTR, (uintptr_t)start_p);
+ /* DMA start */
+ sd_iow(ndev, TX_TAILDESC_PTR, (uintptr_t)tail_p);
+ }
+
+ return 0;
+}
+
+
+static void
+ll_temac_recv(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct sk_buff *skb, *new_skb;
+ unsigned int bdstat;
+ struct cdmac_bd *cur_p, *tail_p;
+ int length;
+ unsigned long skb_vaddr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->rx_lock, flags);
+
+ tail_p = &lp->rx_bd_p[lp->rx_bd_ci];
+ cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+
+ bdstat = cur_p->app0;
+ while ((bdstat & STS_CTRL_APP0_CMPLT)) {
+
+ skb = lp->rx_skb[lp->rx_bd_ci];
+ length = cur_p->app4;
+
+ skb_vaddr = virt_to_bus(skb->data);
+ pci_unmap_single(NULL, skb_vaddr, length, PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, length);
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ netif_rx(skb);
+
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += length;
+
+ new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN,
+ GFP_ATOMIC);
+ if (new_skb == 0) {
+ dev_err(&ndev->dev, "no memory for new sk_buff\n");
+ spin_unlock_irqrestore(&lp->rx_lock, flags);
+ return;
+ }
+
+ skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
+
+ cur_p->app0 = STS_CTRL_APP0_IRQONEND;
+ cur_p->phys = (unsigned char *)
+ pci_map_single(NULL, new_skb->data,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ PCI_DMA_FROMDEVICE);
+ cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
+ lp->rx_skb[lp->rx_bd_ci] = new_skb;
+
+ lp->rx_bd_ci++;
+ if (lp->rx_bd_ci >= RX_BD_NUM)
+ lp->rx_bd_ci = 0;
+
+ cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ bdstat = cur_p->app0;
+ }
+ sd_iow(ndev, RX_TAILDESC_PTR, (uintptr_t)tail_p);
+
+ spin_unlock_irqrestore(&lp->rx_lock, flags);
+}
+
+static irqreturn_t
+ll_temac_tx_interrupt(int irq, void *dev_id)
+{
+ unsigned int status;
+ struct net_device *ndev = (struct net_device *)dev_id;
+
+ status = sd_ior(ndev, TX_IRQ_REG);
+ sd_iow(ndev, TX_IRQ_REG, status);
+
+ if (status & (IRQ_COAL | IRQ_DLY))
+ temac_hard_start_xmit_done(ndev);
+ if (status & 0x080)
+ dev_err(&ndev->dev, "DMA error 0x%x\n", status);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+ll_temac_rx_interrupt(int irq, void *dev_id)
+{
+ unsigned int status;
+ struct net_device *ndev = (struct net_device *)dev_id;
+
+ status = sd_ior(ndev, RX_IRQ_REG);
+ sd_iow(ndev, RX_IRQ_REG, status);
+
+ if (status & (IRQ_COAL | IRQ_DLY))
+ ll_temac_recv(ndev);
+
+ return IRQ_HANDLED;
+}
+
+static struct net_device_stats *
+temac_get_stats(struct net_device *ndev)
+{
+ return netdev_priv(ndev);
+}
+
+static int
+temac_open(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ int erC = 0;
+
+ /* if (netif_device_present(ndev))
+ * raise link
+ */
+ erC = request_irq(lp->tx_irq, ll_temac_tx_interrupt,
+ 0, ndev->name, ndev);
+ erC = request_irq(lp->rx_irq, ll_temac_rx_interrupt,
+ 0, ndev->name, ndev);
+ temac_device_reset(ndev);
+ return 0;
+}
+
+static int
+temac_stop(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ /* if (netif_device_present(ndev))
+ * drop link
+ */
+ free_irq(lp->tx_irq, ndev);
+ free_irq(lp->rx_irq, ndev);
+ /* free buffers */
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+temac_poll_controller(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ disable_irq(lp->tx_irq);
+ disable_irq(lp->rx_irq);
+
+ ll_temac_rx_interrupt(lp->tx_irq, ndev, 0);
+ ll_temac_tx_interrupt(lp->rx_irq, ndev, 0);
+
+ enable_irq(lp->tx_irq);
+ enable_irq(lp->rx_irq);
+}
+#endif
+
+static int __init
+temac_device_map(struct platform_device *pdev, struct net_device *ndev,
+ int num, struct temac_region *reg)
+{
+ struct resource *res;
+ int erC = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, num);
+ if (res == 0) {
+ erC = -ENODEV;
+ goto fail;
+ }
+ if (request_mem_region(res->start, res->end - res->start,
+ pdev->name) == 0) {
+ dev_err(&pdev->dev,
+ "%s: failed to request registers\n",
+ pdev->name);
+ erC = -ENXIO;
+ goto fail;
+ }
+
+ reg->base = (uintptr_t)res->start;
+ reg->len = res->end - res->start;
+ reg->addr = ioremap_nocache((uintptr_t)reg->base, reg->len);
+ if (reg->addr == 0) {
+ dev_err(&pdev->dev,
+ "%s: failed to remap registers\n", pdev->name);
+ erC = -ENXIO;
+ goto fail_remap;
+ }
+ return 0;
+fail_remap:
+ release_region((u32)reg->addr, reg->len);
+fail:
+ return erC;
+}
+/*
+Search TEMAC board, allocate space and register it
+ */
+static int __init
+temac_device_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev = alloc_etherdev(sizeof(struct temac_local));
+ struct xlltemac_platform_data *pdata =
+ (struct xlltemac_platform_data *) pdev->dev.platform_data;
+ struct temac_local *lp;
+ u8 addr[] = { 0x0, 0x50, 0xc2, 0x44, 0x2f, 0xff };
+ int erC = 0;
+
+ /* Init network device */
+ if (!ndev) {
+ dev_err(&pdev->dev, "could not allocate device.\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ /* setup board info structure */
+ /* Clear memory */
+ lp = netdev_priv(ndev);
+ memset(lp, 0, sizeof(struct temac_local));
+ erC = temac_device_map(pdev, ndev, 0, &lp->regs);
+ if (erC) {
+ dev_err(&pdev->dev, "could not allocate temac regs.\n");
+ goto nodev;
+ }
+ ndev->base_addr = (u32) lp->regs.addr;
+
+ erC = temac_device_map(pdev, ndev, 1, &lp->sdma);
+ if (erC) {
+ dev_err(&pdev->dev, "could not allocate sdma regs.\n");
+ goto nodev;
+ }
+ lp->rx_irq = platform_get_irq(pdev, 0);
+ if (lp->rx_irq < 0) {
+ dev_err(&pdev->dev, "could not allocate rx irq.\n");
+ erC = -ENOMEM;
+ goto nodev;
+ }
+ lp->tx_irq = platform_get_irq(pdev, 1);
+ if (lp->tx_irq < 0) {
+ dev_err(&pdev->dev, "could not allocate tx irq.\n");
+ erC = -ENOMEM;
+ goto nodev;
+ }
+
+ dev_info(&pdev->dev, DRV_NAME ".c:v " __DATE__ " "
+ DRV_AUTHOR " " DRV_EMAIL "\n");
+
+ lp->dev = ndev;
+ lp->emac_num = 0;
+ lp->options = XTE_OPTION_DEFAULTS;
+
+ temac_set_mac_address(ndev, addr);
+ /* from this point we assume that we have found a TEMAC */
+ /* driver system function */
+ ether_setup(ndev);
+ /* The TEMAC-specific entries in the device structure. */
+ ndev->open = &temac_open;
+ ndev->stop = &temac_stop;
+ ndev->get_stats = &temac_get_stats;
+ ndev->set_mac_address = &temac_set_mac_address;
+ ndev->hard_start_xmit = &temac_hard_start_xmit;
+ ndev->set_multicast_list = &temac_set_multicast_list;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ ndev->poll_controller = &temac_poll_controller;
+#endif
+ ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
+
+ /* Scatter/gather IO. */
+ ndev->features = NETIF_F_SG ;
+ /* Scatter/gather IO. */
+ ndev->features |= NETIF_F_FRAGLIST;
+#if 0
+ /* Can checksum TCP/UDP over IPv4. */
+ ndev->features |= NETIF_F_IP_CSUM
+ /* Can checksum all the packets. */
+ ndev->features |= NETIF_F_HW_CSUM
+
+ /* Can checksum TCP/UDP over IPV6 */
+ ndev->features |= NETIF_F_IPV6_CSUM
+ /* Can DMA to high memory. */
+ ndev->features |= NETIF_F_HIGHDMA
+ /* Transmit VLAN hw acceleration */
+ ndev->features |= NETIF_F_HW_VLAN_TX
+ /* Receive VLAN hw acceleration */
+ ndev->features |= NETIF_F_HW_VLAN_RX
+ /* Receive filtering on VLAN */
+ ndev->features |= NETIF_F_HW_VLAN_FILTER
+ /* Device cannot handle VLAN packets */
+ ndev->features |= NETIF_F_VLAN_CHALLENGED
+ /* Enable software GSO. */
+ ndev->features |= NETIF_F_GSO
+ /* Has multiple TX/RX queues */
+ ndev->features |= NETIF_F_MULTI_QUEUE
+ /* large receive offload */
+ ndev->features |= NETIF_F_LRO
+#endif
+
+
+ erC = register_netdev(ndev);
+ if (!erC)
+ return 0;
+
+nodev:
+ free_netdev(ndev);
+ ndev = NULL;
+ return erC;
+}
+
+static int __devexit temac_device_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ unregister_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+ return 0;
+}
+static struct platform_driver temac_device_driver = {
+ .probe = temac_device_probe,
+ .remove = __devexit_p(temac_device_remove),
+ .suspend = NULL,
+ .resume = NULL,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init
+temac_init_module(void)
+{
+ return platform_driver_register(&temac_device_driver);
+}
+
+static void __exit
+temac_cleanup_module(void)
+{
+ platform_driver_unregister(&temac_device_driver);
+}
+
+module_init(temac_init_module);
+module_exit(temac_cleanup_module);
+
+MODULE_DESCRIPTION("Xilinx Tri-Mode Eth MAC driver");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+/* vim:set ts=4 sts=4 sw=4 noet list nowrap: */
diff --git a/include/linux/xilinx_devices.h b/include/linux/xilinx_devices.h
index 41ad421..40512d5 100755
--- a/include/linux/xilinx_devices.h
+++ b/include/linux/xilinx_devices.h
@@ -94,7 +94,7 @@ struct xtemac_platform_data {
#define XTEMAC_DMA_SGDMA 3 /* scatter gather DMA */
#endif
-#if defined(CONFIG_XILINX_LLTEMAC)
+#if defined(CONFIG_XILINX_LLTEMAC) || defined(CONFIG_XPS_LLTEMAC)
/* LLTEMAC platform data */
struct xlltemac_platform_data {
u8 tx_csum;
next reply other threads:[~2008-08-19 9:41 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-08-19 9:34 David H. Lynch Jr. [this message]
[not found] ` <20080819201445.F39181468062@mail206-wa4.bigfish.com>
[not found] ` <48AB2E58.2050307@dlasys.net>
[not found] ` <20080819205148.82EAC50060@mail88-dub.bigfish.com>
[not found] ` <48AB38AF.6010007@dlasys.net>
2008-08-19 21:42 ` [PATCH] Linux Device Driver for Xilinx LL TEMAC 10/100/1000 EthernetNIC John Linn
2008-08-19 22:12 ` David H. Lynch Jr.
2008-08-20 17:39 ` John Linn
2008-08-22 16:10 ` [PATCH] Linux Device Driver for Xilinx LL TEMAC 10/100/1000 Ethernet NIC Sergey Temerkhanov
2008-08-22 16:14 ` Sergey Temerkhanov
2008-09-14 0:13 ` Jeff Garzik
-- strict thread matches above, loose matches on Subject: below --
2008-08-17 4:59 David H. Lynch Jr.
2008-08-18 12:30 ` Ben Hutchings
2008-08-18 14:36 ` Ben Hutchings
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=48AA938C.4060309@dlasys.net \
--to=dhlii@dlasys.net \
--cc=linuxppc-embedded@ozlabs.org \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).