* [PATCH v2 net-next 0/2] qlge: feature update
@ 2013-08-29 4:51 Jitendra Kalsaria
2013-08-29 4:51 ` [PATCH v2 net-next 1/2] qlge: Enhance nested VLAN (Q-in-Q) handling Jitendra Kalsaria
2013-08-29 4:51 ` [PATCH v2 net-next 2/2] qlge: Update version to 1.0.0.33 Jitendra Kalsaria
0 siblings, 2 replies; 4+ messages in thread
From: Jitendra Kalsaria @ 2013-08-29 4:51 UTC (permalink / raw)
To: davem; +Cc: netdev, ron.mercer, Dept_NX_Linux_NIC_Driver, Jitendra Kalsaria
From: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
This patch series enhance the handling of nested vlan tags in Rx path.
V2 changes:
* removed module parameter.
Please apply it to net-next.
Jitendra Kalsaria (2):
qlge: Enhance nested VLAN (Q-in-Q) handling.
qlge: Update version to 1.0.0.33
drivers/net/ethernet/qlogic/qlge/qlge.h | 2 +-
drivers/net/ethernet/qlogic/qlge/qlge_main.c | 142 ++++++++++----------------
2 files changed, 54 insertions(+), 90 deletions(-)
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v2 net-next 1/2] qlge: Enhance nested VLAN (Q-in-Q) handling.
2013-08-29 4:51 [PATCH v2 net-next 0/2] qlge: feature update Jitendra Kalsaria
@ 2013-08-29 4:51 ` Jitendra Kalsaria
2013-08-29 5:31 ` David Miller
2013-08-29 4:51 ` [PATCH v2 net-next 2/2] qlge: Update version to 1.0.0.33 Jitendra Kalsaria
1 sibling, 1 reply; 4+ messages in thread
From: Jitendra Kalsaria @ 2013-08-29 4:51 UTC (permalink / raw)
To: davem; +Cc: netdev, ron.mercer, Dept_NX_Linux_NIC_Driver, Jitendra Kalsaria
From: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
Adapter doesn’t handle packets with nested VLAN tags in
Rx path. Turn off VLAN tag stripping in the hardware and
let the stack handle stripping of VLAN tags in the Rx path.
Signed-off-by: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
---
drivers/net/ethernet/qlogic/qlge/qlge_main.c | 142 ++++++++++----------------
1 files changed, 53 insertions(+), 89 deletions(-)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 2553cf4..332cdc9 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -409,8 +409,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(qdev->
func << CAM_OUT_FUNC_SHIFT) |
(0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- cam_output |= CAM_OUT_RV;
/* route to NIC core */
ql_write32(qdev, MAC_ADDR_DATA, cam_output);
break;
@@ -1464,12 +1462,31 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
}
}
+/*
+ * This routine will update the mac header length based on
+ * single or nested vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ib_mac_iocb_rsp *ib_mac_rsp,
+ void *page, size_t *len)
+{
+ u16 *tags;
+
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+ tags = (u16 *)page;
+ /* Look for stacked vlan tags in ethertype field */
+ if (tags[6] == htons(ETH_P_8021Q) &&
+ tags[8] == htons(ETH_P_8021Q))
+ *len += 2 * VLAN_HLEN;
+ else
+ *len += VLAN_HLEN;
+ }
+}
+
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
+ u32 length)
{
struct sk_buff *skb;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
@@ -1506,8 +1523,6 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
rx_ring->rx_bytes += length;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
napi_gro_frags(napi);
}
@@ -1515,14 +1530,14 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
+ u32 length)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
void *addr;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
+ size_t hlen = ETH_HLEN;
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
@@ -1540,25 +1555,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
goto err_out;
}
+ /* Update the MAC header length*/
+ ql_update_mac_hdr_len(ib_mac_rsp, addr, &hlen);
+
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
- if (skb->len > ndev->mtu + ETH_HLEN) {
+ if (skb->len > ndev->mtu + hlen) {
netif_err(qdev, drv, qdev->ndev,
"Segment too small, dropping.\n");
rx_ring->rx_dropped++;
goto err_out;
}
- memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ memcpy(skb_put(skb, hlen), addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset+ETH_HLEN,
- length-ETH_HLEN);
- skb->len += length-ETH_HLEN;
- skb->data_len += length-ETH_HLEN;
- skb->truesize += length-ETH_HLEN;
+ lbq_desc->p.pg_chunk.offset + hlen,
+ length - hlen);
+ skb->len += length - hlen;
+ skb->data_len += length - hlen;
+ skb->truesize += length - hlen;
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
@@ -1576,7 +1594,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph =
- (struct iphdr *) ((u8 *)addr + ETH_HLEN);
+ (struct iphdr *) ((u8 *)addr + hlen);
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1588,8 +1606,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
}
skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(napi, skb);
else
@@ -1604,8 +1620,7 @@ err_out:
static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
+ u32 length)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
@@ -1691,8 +1706,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
}
skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
@@ -1726,7 +1739,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct bq_desc *sbq_desc;
struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ size_t hlen = ETH_HLEN;
/*
* Handle the header buffer if present.
@@ -1853,9 +1867,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->data_len += length;
skb->truesize += length;
length -= length;
- __pskb_pull_tail(skb,
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- VLAN_ETH_HLEN : ETH_HLEN);
+ ql_update_mac_hdr_len(ib_mac_rsp,
+ lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
}
} else {
/*
@@ -1910,8 +1925,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
length -= size;
i++;
}
- __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- VLAN_ETH_HLEN : ETH_HLEN);
+ ql_update_mac_hdr_len(ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
}
return skb;
}
@@ -1919,8 +1935,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u16 vlan_id)
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
@@ -2003,8 +2018,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
- if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
@@ -2017,9 +2030,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- ((le16_to_cpu(ib_mac_rsp->vlan_id) &
- IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
@@ -2027,35 +2037,30 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
/* The data and headers are split into
* separate buffers.
*/
- ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
/* The data fit in a single small buffer.
* Allocate a new skb, copy the data and
* return the buffer to the free pool.
*/
- ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
+ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length);
} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
/* TCP packet in a page chunk that's been checksummed.
* Tack it on to our GRO skb and let it go.
*/
- ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
+ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
/* Non-TCP packet in a page chunk. Allocate an
* skb, tack it on frags, and send it up.
*/
- ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
+ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length);
} else {
/* Non-TCP/UDP large frames that span multiple buffers
* can be processed corrrectly by the split frame logic.
*/
- ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp);
}
return (unsigned long)length;
@@ -2298,44 +2303,6 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
return work_done;
}
-static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- if (features & NETIF_F_HW_VLAN_CTAG_RX) {
- ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
- NIC_RCV_CFG_VLAN_MATCH_AND_NON);
- } else {
- ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
- }
-}
-
-static netdev_features_t qlge_fix_features(struct net_device *ndev,
- netdev_features_t features)
-{
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
- */
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- features |= NETIF_F_HW_VLAN_CTAG_TX;
- else
- features &= ~NETIF_F_HW_VLAN_CTAG_TX;
-
- return features;
-}
-
-static int qlge_set_features(struct net_device *ndev,
- netdev_features_t features)
-{
- netdev_features_t changed = ndev->features ^ features;
-
- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
- qlge_vlan_mode(ndev, features);
-
- return 0;
-}
-
static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = MAC_ADDR_E;
@@ -3704,8 +3671,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
ql_write32(qdev, SYS, mask | value);
/* Set the default queue, and VLAN behavior. */
- value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
- mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
+ value = NIC_RCV_CFG_DFQ;
+ mask = NIC_RCV_CFG_DFQ_MASK;
ql_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */
@@ -4651,8 +4618,6 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
- .ndo_fix_features = qlge_fix_features,
- .ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
@@ -4695,8 +4660,7 @@ static int qlge_probe(struct pci_dev *pdev,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
- ndev->features = ndev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->vlan_features = ndev->hw_features;
if (test_bit(QL_DMA64, &qdev->flags))
--
1.7.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH v2 net-next 2/2] qlge: Update version to 1.0.0.33
2013-08-29 4:51 [PATCH v2 net-next 0/2] qlge: feature update Jitendra Kalsaria
2013-08-29 4:51 ` [PATCH v2 net-next 1/2] qlge: Enhance nested VLAN (Q-in-Q) handling Jitendra Kalsaria
@ 2013-08-29 4:51 ` Jitendra Kalsaria
1 sibling, 0 replies; 4+ messages in thread
From: Jitendra Kalsaria @ 2013-08-29 4:51 UTC (permalink / raw)
To: davem; +Cc: netdev, ron.mercer, Dept_NX_Linux_NIC_Driver, Jitendra Kalsaria
From: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
Signed-off-by: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
---
drivers/net/ethernet/qlogic/qlge/qlge.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 8994337..1a36b9c 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.32"
+#define DRV_VERSION "1.0.0.33"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
--
1.7.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH v2 net-next 1/2] qlge: Enhance nested VLAN (Q-in-Q) handling.
2013-08-29 4:51 ` [PATCH v2 net-next 1/2] qlge: Enhance nested VLAN (Q-in-Q) handling Jitendra Kalsaria
@ 2013-08-29 5:31 ` David Miller
0 siblings, 0 replies; 4+ messages in thread
From: David Miller @ 2013-08-29 5:31 UTC (permalink / raw)
To: jitendra.kalsaria; +Cc: netdev, ron.mercer, Dept_NX_Linux_NIC_Driver
From: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
Date: Thu, 29 Aug 2013 00:51:07 -0400
> + u16 *tags;
> +
> + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
> + tags = (u16 *)page;
> + /* Look for stacked vlan tags in ethertype field */
> + if (tags[6] == htons(ETH_P_8021Q) &&
> + tags[8] == htons(ETH_P_8021Q))
htons() returns a __be16, there is not way that this code doesn't
generate sparse endianness errors.
Also, your change is sacrificing single layered VLAN performance
for the sake of Q-in-Q as far as I can tell. I don't think that's
a very wise tradeoff if so.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2013-08-29 5:31 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-08-29 4:51 [PATCH v2 net-next 0/2] qlge: feature update Jitendra Kalsaria
2013-08-29 4:51 ` [PATCH v2 net-next 1/2] qlge: Enhance nested VLAN (Q-in-Q) handling Jitendra Kalsaria
2013-08-29 5:31 ` David Miller
2013-08-29 4:51 ` [PATCH v2 net-next 2/2] qlge: Update version to 1.0.0.33 Jitendra Kalsaria
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).