netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] via-rhine: hardware VLAN support
@ 2010-11-05 10:43 Roger Luethi
  2010-11-05 18:31 ` Jesse Gross
  0 siblings, 1 reply; 7+ messages in thread
From: Roger Luethi @ 2010-11-05 10:43 UTC (permalink / raw)
  To: netdev; +Cc: David S. Miller

This patch adds VLAN hardware support for Rhine chips.

The driver uses up to 3 additional bytes of buffer space when extracting
802.1Q headers; PKT_BUF_SZ should still be sufficient.

The initial code was provided by David Lv. I reworked it to use standard
kernel facilities. Coding style clean up mostly follows via-velocity.

Signed-off-by: David Lv <DavidLv@viatech.com.cn>
Signed-off-by: Roger Luethi <rl@hellgate.ch>

 drivers/net/via-rhine.c |  351 +++++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 337 insertions(+), 14 deletions(-)

diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 4930f9d..67afc48 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
 */
 
 #define DRV_NAME	"via-rhine"
-#define DRV_VERSION	"1.4.3"
-#define DRV_RELDATE	"2007-03-06"
+#define DRV_VERSION	"1.5.0"
+#define DRV_RELDATE	"2010-10-09"
 
 
 /* A few user-configurable values.
@@ -100,6 +100,7 @@ static const int multicast_filter_limit = 32;
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/crc32.h>
+#include <linux/if_vlan.h>
 #include <linux/bitops.h>
 #include <linux/workqueue.h>
 #include <asm/processor.h>	/* Processor type for cache alignment. */
@@ -133,6 +134,9 @@ MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
 
+#define MCAM_SIZE	32
+#define VCAM_SIZE	32
+
 /*
 		Theory of Operation
 
@@ -279,15 +283,16 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
 /* Offsets to the device registers. */
 enum register_offsets {
 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
-	ChipCmd1=0x09,
+	ChipCmd1=0x09, TQWake=0x0A,
 	IntrStatus=0x0C, IntrEnable=0x0E,
 	MulticastFilter0=0x10, MulticastFilter1=0x14,
 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
-	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
 	StickyHW=0x83, IntrStatus2=0x84,
+	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
@@ -299,6 +304,40 @@ enum backoff_bits {
 	BackCaptureEffect=0x04, BackRandom=0x08
 };
 
+/* Bits in the TxConfig (TCR) register */
+enum tcr_bits {
+	TCR_PQEN=0x01,
+	TCR_LB0=0x02,		/* loopback[0] */
+	TCR_LB1=0x04,		/* loopback[1] */
+	TCR_OFSET=0x08,
+	TCR_RTGOPT=0x10,
+	TCR_RTFT0=0x20,
+	TCR_RTFT1=0x40,
+	TCR_RTSF=0x80,
+};
+
+/* Bits in the CamCon (CAMC) register */
+enum camcon_bits {
+	CAMC_CAMEN=0x01,
+	CAMC_VCAMSL=0x02,
+	CAMC_CAMWR=0x04,
+	CAMC_CAMRD=0x08,
+};
+
+/* Bits in the PCIBusConfig1 (BCR1) register */
+enum bcr1_bits {
+	BCR1_POT0=0x01,
+	BCR1_POT1=0x02,
+	BCR1_POT2=0x04,
+	BCR1_CTFT0=0x08,
+	BCR1_CTFT1=0x10,
+	BCR1_CTSF=0x20,
+	BCR1_TXQNOBK=0x40,	/* for VT6105 */
+	BCR1_VIDFR=0x80,	/* for VT6105 */
+	BCR1_MED0=0x40,		/* for VT6102 */
+	BCR1_MED1=0x80,		/* for VT6102 */
+};
+
 #ifdef USE_MMIO
 /* Registers we check that mmio and reg are the same. */
 static const int mmio_verify_registers[] = {
@@ -356,6 +395,11 @@ enum desc_status_bits {
 	DescOwn=0x80000000
 };
 
+/* Bits in *_desc.*_length */
+enum desc_length_bits {
+	DescTag=0x00010000
+};
+
 /* Bits in ChipCmd. */
 enum chip_cmd_bits {
 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
@@ -365,6 +409,7 @@ enum chip_cmd_bits {
 };
 
 struct rhine_private {
+	struct vlan_group *vlgrp;
 	/* Descriptor rings */
 	struct rx_desc *rx_ring;
 	struct tx_desc *tx_ring;
@@ -391,6 +436,9 @@ struct rhine_private {
 	spinlock_t lock;
 	struct work_struct reset_task;
 
+	u32 mCAMmask;			/* 32 CAMs each (6105M and better) */
+	u32 vCAMmask;
+
 	/* Frequently used values: keep some adjacent for cache effect. */
 	u32 quirks;
 	struct rx_desc *rx_head_desc;
@@ -405,6 +453,22 @@ struct rhine_private {
 	void __iomem *base;
 };
 
+#define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
+#define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
+#define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
+#define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
+#define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
+
+#define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
+#define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
+#define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
+#define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
+#define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
+
 static int  mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 static int  rhine_open(struct net_device *dev);
@@ -422,6 +486,15 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static const struct ethtool_ops netdev_ethtool_ops;
 static int  rhine_close(struct net_device *dev);
 static void rhine_shutdown (struct pci_dev *pdev);
+static void rhine_vlan_rx_register(struct net_device *dev, struct vlan_group *grp);
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_init_cam_filter(struct net_device *dev);
+static void rhine_update_vcam(struct rhine_private *rp);
 
 #define RHINE_WAIT_FOR(condition) do {					\
 	int i=1024;							\
@@ -629,6 +702,9 @@ static const struct net_device_ops rhine_netdev_ops = {
 	.ndo_set_mac_address 	 = eth_mac_addr,
 	.ndo_do_ioctl		 = netdev_ioctl,
 	.ndo_tx_timeout 	 = rhine_tx_timeout,
+	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
+	.ndo_vlan_rx_register	 = rhine_vlan_rx_register,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	 = rhine_poll,
 #endif
@@ -795,6 +871,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
 	if (rp->quirks & rqRhineI)
 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 
+	if (pdev->revision >= VT6105M)
+		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+		NETIF_F_HW_VLAN_FILTER;
+
 	/* dev->name not defined before register_netdev()! */
 	rc = register_netdev(dev);
 	if (rc)
@@ -1040,6 +1120,190 @@ static void rhine_set_carrier(struct mii_if_info *mii)
 		       netif_carrier_ok(mii->dev));
 }
 
+/**
+ * rhine_set_cam - set CAM multicast filters
+ * @ioaddr: register block of this Rhine
+ * @idx: multicast CAM index [0..MCAM_SIZE-1]
+ * @addr: multicast address (6 bytes)
+ *
+ * Load addresses into multicast filters.
+ */
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+	int i;
+
+	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+	wmb();
+
+	/* Paranoid -- idx out of range should never happen */
+	idx &= (MCAM_SIZE - 1);
+
+	iowrite8((u8) idx, ioaddr + CamAddr);
+
+	for (i = 0; i < 6; i++, addr++)
+		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
+	udelay(10);
+	wmb();
+
+	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+	udelay(10);
+
+	iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam - set CAM VLAN filters
+ * @ioaddr: register block of this Rhine
+ * @idx: VLAN CAM index [0..VCAM_SIZE-1]
+ * @addr: VLAN ID (2 bytes)
+ *
+ * Load addresses into VLAN filters.
+ */
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+	wmb();
+
+	/* Paranoid -- idx out of range should never happen */
+	idx &= (VCAM_SIZE - 1);
+
+	iowrite8((u8) idx, ioaddr + CamAddr);
+
+	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
+	udelay(10);
+	wmb();
+
+	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+	udelay(10);
+
+	iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_cam_mask - set multicast CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: multicast CAM mask
+ *
+ * Mask sets multicast filters active/inactive.
+ */
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+	wmb();
+
+	/* write mask */
+	iowrite32(mask, ioaddr + CamMask);
+
+	/* disable CAMEN */
+	iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam_mask - set VLAN CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: VLAN CAM mask
+ *
+ * Mask sets VLAN filters active/inactive.
+ */
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+	wmb();
+
+	/* write mask */
+	iowrite32(mask, ioaddr + CamMask);
+
+	/* disable CAMEN */
+	iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_init_cam_filter - initialize CAM filters
+ * @dev: network device
+ *
+ * Initialize (disable) hardware VLAN and multicast support on this
+ * Rhine.
+ */
+static void rhine_init_cam_filter(struct net_device *dev)
+{
+	struct rhine_private *rp = netdev_priv(dev);
+	void __iomem *ioaddr = rp->base;
+
+	/* Disable all CAMs */
+	rp->vCAMmask = 0;
+	rp->mCAMmask = 0;
+	rhine_set_vlan_cam_mask(ioaddr, rp->vCAMmask);
+	rhine_set_cam_mask(ioaddr, rp->mCAMmask);
+
+	/* disable hardware VLAN support */
+	BYTE_REG_BITS_OFF(TCR_PQEN, ioaddr + TxConfig);
+	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+}
+
+/**
+ * rhine_update_vcam - update VLAN CAM filters
+ * @rp: rhine_private data of this Rhine
+ *
+ * Update VLAN CAM filters to match configuration change.
+ */
+static void rhine_update_vcam(struct rhine_private *rp)
+{
+	void __iomem *ioaddr = rp->base;
+
+	rp->mCAMmask = 0;
+
+	if (rp->vlgrp) {
+		unsigned int vid, i = 0;
+
+		for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
+			if (vlan_group_get_device(rp->vlgrp, vid)) {
+				rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
+				rp->vCAMmask |= 1 << i;
+				if (++i >= VCAM_SIZE)
+					break;
+			}
+		}
+		rhine_set_vlan_cam_mask(ioaddr, rp->vCAMmask);
+	}
+}
+
+static void rhine_vlan_rx_register(struct net_device *dev,
+				      struct vlan_group *grp)
+{
+	struct rhine_private *rp = netdev_priv(dev);
+	void __iomem *ioaddr = rp->base;
+
+	rp->vlgrp = grp;
+	if (grp) {
+		BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
+		/* enable VLAN receive filtering */
+		if (!(dev->flags & IFF_PROMISC))
+			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+	} else {
+		BYTE_REG_BITS_OFF(TCR_PQEN, ioaddr + TxConfig);
+		BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+	}
+}
+
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+	struct rhine_private *rp = netdev_priv(dev);
+
+	spin_lock_irq(&rp->lock);
+	rhine_update_vcam(rp);
+	spin_unlock_irq(&rp->lock);
+}
+
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+	struct rhine_private *rp = netdev_priv(dev);
+
+	spin_lock_irq(&rp->lock);
+	vlan_group_set_device(rp->vlgrp, vid, NULL);
+	rhine_update_vcam(rp);
+	spin_unlock_irq(&rp->lock);
+}
+
 static void init_registers(struct net_device *dev)
 {
 	struct rhine_private *rp = netdev_priv(dev);
@@ -1061,6 +1325,9 @@ static void init_registers(struct net_device *dev)
 
 	rhine_set_rx_mode(dev);
 
+	if (rp->pdev->revision >= VT6105M)
+		rhine_init_cam_filter(dev);
+
 	napi_enable(&rp->napi);
 
 	/* Enable interrupts by setting the interrupt mask. */
@@ -1276,16 +1543,28 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 	rp->tx_ring[entry].desc_length =
 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
 
+	if (unlikely(rp->vlgrp && vlan_tx_tag_present(skb))) {
+		rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+		/* request tagging */
+		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
+	}
+	else
+		rp->tx_ring[entry].tx_status = 0;
+
 	/* lock eth irq */
 	spin_lock_irqsave(&rp->lock, flags);
 	wmb();
-	rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
 	wmb();
 
 	rp->cur_tx++;
 
 	/* Non-x86 Todo: explicitly flush cache lines here. */
 
+	if (rp->vlgrp && vlan_tx_tag_present(skb))
+		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
 	/* Wake the potentially-idle transmit channel */
 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
 	       ioaddr + ChipCmd1);
@@ -1437,6 +1716,21 @@ static void rhine_tx(struct net_device *dev)
 	spin_unlock(&rp->lock);
 }
 
+/**
+ * rhine_get_vlan_tci - extract TCI from Rx data buffer
+ * @skb: pointer to sk_buff
+ * @data_size: used data area of the buffer including CRC
+ *
+ * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
+ * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
+ * aligned following the CRC.
+ */
+static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
+{
+	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
+	return ntohs(*(u16 *)trailer);
+}
+
 /* Process up to limit frames from receive ring */
 static int rhine_rx(struct net_device *dev, int limit)
 {
@@ -1454,6 +1748,7 @@ static int rhine_rx(struct net_device *dev, int limit)
 	for (count = 0; count < limit; ++count) {
 		struct rx_desc *desc = rp->rx_head_desc;
 		u32 desc_status = le32_to_cpu(desc->rx_status);
+		u32 desc_length = le32_to_cpu(desc->desc_length);
 		int data_size = desc_status >> 16;
 
 		if (desc_status & DescOwn)
@@ -1498,6 +1793,7 @@ static int rhine_rx(struct net_device *dev, int limit)
 			struct sk_buff *skb = NULL;
 			/* Length should omit the CRC */
 			int pkt_len = data_size - 4;
+			u16 vlan_tci = 0;
 
 			/* Check if the packet is long enough to accept without
 			   copying to a minimally-sized skbuff. */
@@ -1532,8 +1828,16 @@ static int rhine_rx(struct net_device *dev, int limit)
 						 rp->rx_buf_sz,
 						 PCI_DMA_FROMDEVICE);
 			}
+
+			if (unlikely(rp->vlgrp && (desc_length & DescTag)))
+				vlan_tci = rhine_get_vlan_tci(skb, data_size);
+
 			skb->protocol = eth_type_trans(skb, dev);
-			netif_receive_skb(skb);
+
+			if (unlikely(rp->vlgrp && (desc_length & DescTag)))
+				vlan_hwaccel_receive_skb(skb, rp->vlgrp, vlan_tci);
+			else
+				netif_receive_skb(skb);
 			dev->stats.rx_bytes += pkt_len;
 			dev->stats.rx_packets++;
 		}
@@ -1596,6 +1900,11 @@ static void rhine_restart_tx(struct net_device *dev) {
 
 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
 		       ioaddr + ChipCmd);
+
+		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
+			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
 		       ioaddr + ChipCmd1);
 		IOSYNC;
@@ -1631,7 +1940,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
 	}
 	if (intr_status & IntrTxUnderrun) {
 		if (rp->tx_thresh < 0xE0)
-			iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+			BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
 		if (debug > 1)
 			printk(KERN_INFO "%s: Transmitter underrun, Tx "
 			       "threshold now %2.2x.\n",
@@ -1646,7 +1955,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
 	    (intr_status & (IntrTxAborted |
 	     IntrTxUnderrun | IntrTxDescRace)) == 0) {
 		if (rp->tx_thresh < 0xE0) {
-			iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+			BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
 		}
 		if (debug > 1)
 			printk(KERN_INFO "%s: Unspecified error. Tx "
@@ -1688,7 +1997,8 @@ static void rhine_set_rx_mode(struct net_device *dev)
 	struct rhine_private *rp = netdev_priv(dev);
 	void __iomem *ioaddr = rp->base;
 	u32 mc_filter[2];	/* Multicast hash filter */
-	u8 rx_mode;		/* Note: 0x02=accept runt, 0x01=accept errs */
+	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
+	struct netdev_hw_addr *ha;
 
 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
 		rx_mode = 0x1C;
@@ -1699,10 +2009,17 @@ static void rhine_set_rx_mode(struct net_device *dev)
 		/* Too many to match, or accept all multicasts. */
 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
-		rx_mode = 0x0C;
+	} else if (rp->pdev->revision >= VT6105M) {
+		int i = 0;
+		netdev_for_each_mc_addr(ha, dev) {
+			if (i == MCAM_SIZE)
+				break;
+			rhine_set_cam(ioaddr, i, ha->addr);
+			rp->mCAMmask |= 1 << i;
+			i++;
+		}
+		rhine_set_cam_mask(ioaddr, rp->mCAMmask);
 	} else {
-		struct netdev_hw_addr *ha;
-
 		memset(mc_filter, 0, sizeof(mc_filter));
 		netdev_for_each_mc_addr(ha, dev) {
 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
@@ -1711,9 +2028,15 @@ static void rhine_set_rx_mode(struct net_device *dev)
 		}
 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
-		rx_mode = 0x0C;
 	}
-	iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
+	/* enable/disable VLAN receive filtering */
+	if (rp->pdev->revision >= VT6105M) {
+		if (dev->flags & IFF_PROMISC || !rp->vlgrp)
+			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+		else
+			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+	}
+	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
 }
 
 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-- 
1.7.2.2


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] via-rhine: hardware VLAN support
  2010-11-05 10:43 [PATCH] via-rhine: hardware VLAN support Roger Luethi
@ 2010-11-05 18:31 ` Jesse Gross
  2010-11-08 16:21   ` Roger Luethi
  0 siblings, 1 reply; 7+ messages in thread
From: Jesse Gross @ 2010-11-05 18:31 UTC (permalink / raw)
  To: Roger Luethi; +Cc: netdev, David S. Miller

On Fri, Nov 5, 2010 at 3:43 AM, Roger Luethi <rl@hellgate.ch> wrote:
> This patch adds VLAN hardware support for Rhine chips.
>
> The driver uses up to 3 additional bytes of buffer space when extracting
> 802.1Q headers; PKT_BUF_SZ should still be sufficient.
>
> The initial code was provided by David Lv. I reworked it to use standard
> kernel facilities. Coding style clean up mostly follows via-velocity.

This uses the old interfaces for vlan acceleration.  We're working to
switch drivers over to use the new methods and the old ones will be
going away in the future.  It would be great if we can avoid adding
more code that uses those interfaces.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] via-rhine: hardware VLAN support
  2010-11-05 18:31 ` Jesse Gross
@ 2010-11-08 16:21   ` Roger Luethi
  2010-11-08 20:53     ` Jesse Gross
  0 siblings, 1 reply; 7+ messages in thread
From: Roger Luethi @ 2010-11-08 16:21 UTC (permalink / raw)
  To: Jesse Gross; +Cc: netdev, David S. Miller

On Fri, 05 Nov 2010 11:31:56 -0700, Jesse Gross wrote:
> On Fri, Nov 5, 2010 at 3:43 AM, Roger Luethi <rl@hellgate.ch> wrote:
> > This patch adds VLAN hardware support for Rhine chips.
> 
> This uses the old interfaces for vlan acceleration.  We're working to
> switch drivers over to use the new methods and the old ones will be
> going away in the future.  It would be great if we can avoid adding
> more code that uses those interfaces.

Can you point me to a driver that has been switched to use the new methods
already? Is there some other form of documentation?

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] via-rhine: hardware VLAN support
  2010-11-08 16:21   ` Roger Luethi
@ 2010-11-08 20:53     ` Jesse Gross
  2010-11-09  6:18       ` Roger Luethi
  2010-11-21 13:17       ` Roger Luethi
  0 siblings, 2 replies; 7+ messages in thread
From: Jesse Gross @ 2010-11-08 20:53 UTC (permalink / raw)
  To: Roger Luethi; +Cc: netdev, David S. Miller

On Mon, Nov 8, 2010 at 8:21 AM, Roger Luethi <rl@hellgate.ch> wrote:
> On Fri, 05 Nov 2010 11:31:56 -0700, Jesse Gross wrote:
>> On Fri, Nov 5, 2010 at 3:43 AM, Roger Luethi <rl@hellgate.ch> wrote:
>> > This patch adds VLAN hardware support for Rhine chips.
>>
>> This uses the old interfaces for vlan acceleration.  We're working to
>> switch drivers over to use the new methods and the old ones will be
>> going away in the future.  It would be great if we can avoid adding
>> more code that uses those interfaces.
>
> Can you point me to a driver that has been switched to use the new methods
> already? Is there some other form of documentation?

bnx2 is an example of a driver that has been converted.  The commit
that actually made the change was
7d0fd2117e3d0550d7987b3aff2bfbc0244cf7c6, which should highlight the
differences.  A key point is that drivers should no longer reference
vlan groups at all.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] via-rhine: hardware VLAN support
  2010-11-08 20:53     ` Jesse Gross
@ 2010-11-09  6:18       ` Roger Luethi
  2010-11-21 13:17       ` Roger Luethi
  1 sibling, 0 replies; 7+ messages in thread
From: Roger Luethi @ 2010-11-09  6:18 UTC (permalink / raw)
  To: Jesse Gross; +Cc: netdev, David S. Miller

On Mon, 08 Nov 2010 12:53:57 -0800, Jesse Gross wrote:
> On Mon, Nov 8, 2010 at 8:21 AM, Roger Luethi <rl@hellgate.ch> wrote:
> > On Fri, 05 Nov 2010 11:31:56 -0700, Jesse Gross wrote:
> >> On Fri, Nov 5, 2010 at 3:43 AM, Roger Luethi <rl@hellgate.ch> wrote:
> >> > This patch adds VLAN hardware support for Rhine chips.
> >>
> >> This uses the old interfaces for vlan acceleration.  We're working to
> >> switch drivers over to use the new methods and the old ones will be
> >> going away in the future.  It would be great if we can avoid adding
> >> more code that uses those interfaces.
> >
> > Can you point me to a driver that has been switched to use the new methods
> > already? Is there some other form of documentation?
> 
> bnx2 is an example of a driver that has been converted.  The commit
> that actually made the change was
> 7d0fd2117e3d0550d7987b3aff2bfbc0244cf7c6, which should highlight the
> differences.  A key point is that drivers should no longer reference
> vlan groups at all.

Thank you. I will take a look and submit a revised patch.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] via-rhine: hardware VLAN support
  2010-11-08 20:53     ` Jesse Gross
  2010-11-09  6:18       ` Roger Luethi
@ 2010-11-21 13:17       ` Roger Luethi
  2010-11-23  2:02         ` Jesse Gross
  1 sibling, 1 reply; 7+ messages in thread
From: Roger Luethi @ 2010-11-21 13:17 UTC (permalink / raw)
  To: Jesse Gross; +Cc: netdev, David S. Miller

On Mon, 08 Nov 2010 12:53:57 -0800, Jesse Gross wrote:
> On Mon, Nov 8, 2010 at 8:21 AM, Roger Luethi <rl@hellgate.ch> wrote:
> > On Fri, 05 Nov 2010 11:31:56 -0700, Jesse Gross wrote:
> > Can you point me to a driver that has been switched to use the new methods
> > already? Is there some other form of documentation?
> 
> bnx2 is an example of a driver that has been converted.  The commit
> that actually made the change was
> 7d0fd2117e3d0550d7987b3aff2bfbc0244cf7c6, which should highlight the
> differences.  A key point is that drivers should no longer reference
> vlan groups at all.

bnx2 does not support hardware VLAN filters, but ixgbe does (converted by
commit f62bbb5e62c6e4a91fb222d22bc46e8d4d7e59ef). ixgbe keeps a list of
configured VLANs in a device private data structure (active_vlans). Is that
the model to follow?


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] via-rhine: hardware VLAN support
  2010-11-21 13:17       ` Roger Luethi
@ 2010-11-23  2:02         ` Jesse Gross
  0 siblings, 0 replies; 7+ messages in thread
From: Jesse Gross @ 2010-11-23  2:02 UTC (permalink / raw)
  To: Roger Luethi; +Cc: netdev, David S. Miller

On Sun, Nov 21, 2010 at 5:17 AM, Roger Luethi <rl@hellgate.ch> wrote:
> On Mon, 08 Nov 2010 12:53:57 -0800, Jesse Gross wrote:
>> On Mon, Nov 8, 2010 at 8:21 AM, Roger Luethi <rl@hellgate.ch> wrote:
>> > On Fri, 05 Nov 2010 11:31:56 -0700, Jesse Gross wrote:
>> > Can you point me to a driver that has been switched to use the new methods
>> > already? Is there some other form of documentation?
>>
>> bnx2 is an example of a driver that has been converted.  The commit
>> that actually made the change was
>> 7d0fd2117e3d0550d7987b3aff2bfbc0244cf7c6, which should highlight the
>> differences.  A key point is that drivers should no longer reference
>> vlan groups at all.
>
> bnx2 does not support hardware VLAN filters, but ixgbe does (converted by
> commit f62bbb5e62c6e4a91fb222d22bc46e8d4d7e59ef). ixgbe keeps a list of
> configured VLANs in a device private data structure (active_vlans). Is that
> the model to follow?

Yes, that's right.  The driver should store whatever information it
requires to manage the CAM or restore the state after a board reset.

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2010-11-23  2:02 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-11-05 10:43 [PATCH] via-rhine: hardware VLAN support Roger Luethi
2010-11-05 18:31 ` Jesse Gross
2010-11-08 16:21   ` Roger Luethi
2010-11-08 20:53     ` Jesse Gross
2010-11-09  6:18       ` Roger Luethi
2010-11-21 13:17       ` Roger Luethi
2010-11-23  2:02         ` Jesse Gross

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).