From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, gospo@redhat.com,
Alexander Duyck <alexander.h.duyck@intel.com>,
Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Subject: [net-next-2.6 PATCH 09/20] igb: move rx_buffer_len into the ring structure
Date: Tue, 27 Oct 2009 18:52:07 -0700 [thread overview]
Message-ID: <20091028015206.12470.50840.stgit@localhost.localdomain> (raw)
In-Reply-To: <20091028014858.12470.99520.stgit@localhost.localdomain>
From: Alexander Duyck <alexander.h.duyck@intel.com>
This patch moves the rx_buffer_len value into the ring structure. This allows
greater flexibility and the option of doing things such as supporting packet
split only on some queues, or enabling virtualization.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/igb/igb.h | 3 +--
drivers/net/igb/igb_main.c | 41 ++++++++++++++++++++++-------------------
2 files changed, 23 insertions(+), 21 deletions(-)
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index de26862..00ff274 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -198,7 +198,7 @@ struct igb_ring {
/* RX */
struct {
struct igb_rx_queue_stats rx_stats;
- u64 rx_queue_drops;
+ u32 rx_buffer_len;
};
};
};
@@ -218,7 +218,6 @@ struct igb_adapter {
struct vlan_group *vlgrp;
u16 mng_vlan_id;
u32 bd_number;
- u32 rx_buffer_len;
u32 wol;
u32 en_mng_pt;
u16 link_speed;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ff16b7a..04e860d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -443,6 +443,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
ring->pdev = adapter->pdev;
+ ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
}
igb_cache_ring_register(adapter);
@@ -1863,7 +1864,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
adapter->tx_ring_count = IGB_DEFAULT_TXD;
adapter->rx_ring_count = IGB_DEFAULT_RXD;
- adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
@@ -2358,8 +2358,8 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter,
writel(0, ring->tail);
/* set descriptor configuration */
- if (adapter->rx_buffer_len < IGB_RXBUFFER_1024) {
- srrctl = ALIGN(adapter->rx_buffer_len, 64) <<
+ if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
+ srrctl = ALIGN(ring->rx_buffer_len, 64) <<
E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
srrctl |= IGB_RXBUFFER_16384 >>
@@ -2370,7 +2370,7 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter,
#endif
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
} else {
- srrctl = ALIGN(adapter->rx_buffer_len, 1024) >>
+ srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
@@ -2619,7 +2619,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
**/
static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
- struct igb_adapter *adapter = rx_ring->q_vector->adapter;
struct igb_buffer *buffer_info;
unsigned long size;
unsigned int i;
@@ -2632,7 +2631,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
if (buffer_info->dma) {
pci_unmap_single(rx_ring->pdev,
buffer_info->dma,
- adapter->rx_buffer_len,
+ rx_ring->rx_buffer_len,
PCI_DMA_FROMDEVICE);
buffer_info->dma = 0;
}
@@ -3746,6 +3745,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ u32 rx_buffer_len, i;
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3763,9 +3763,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
- if (netif_running(netdev))
- igb_down(adapter);
-
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
@@ -3773,16 +3770,22 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
*/
if (max_frame <= IGB_RXBUFFER_1024)
- adapter->rx_buffer_len = IGB_RXBUFFER_1024;
+ rx_buffer_len = IGB_RXBUFFER_1024;
else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
- adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
- adapter->rx_buffer_len = IGB_RXBUFFER_128;
+ rx_buffer_len = IGB_RXBUFFER_128;
+
+ if (netif_running(netdev))
+ igb_down(adapter);
dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
+
if (netif_running(netdev))
igb_up(adapter);
else
@@ -4828,7 +4831,7 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err);
}
-static inline u16 igb_get_hlen(struct igb_adapter *adapter,
+static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc)
{
/* HW will not DMA in data larger than the given buffer, even if it
@@ -4837,8 +4840,8 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
*/
u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > adapter->rx_buffer_len)
- hlen = adapter->rx_buffer_len;
+ if (hlen > rx_ring->rx_buffer_len)
+ hlen = rx_ring->rx_buffer_len;
return hlen;
}
@@ -4888,14 +4891,14 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
if (buffer_info->dma) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_buffer_len,
+ rx_ring->rx_buffer_len,
PCI_DMA_FROMDEVICE);
buffer_info->dma = 0;
- if (adapter->rx_buffer_len >= IGB_RXBUFFER_1024) {
+ if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
skb_put(skb, length);
goto send_up;
}
- skb_put(skb, igb_get_hlen(adapter, rx_desc));
+ skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
}
if (length) {
@@ -5034,7 +5037,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
- bufsz = adapter->rx_buffer_len;
+ bufsz = rx_ring->rx_buffer_len;
while (cleaned_count--) {
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
next prev parent reply other threads:[~2009-10-28 1:55 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-10-28 1:49 [net-next-2.6 PATCH 01/20] igb: add new data structure for handling interrupts and NAPI Jeff Kirsher
2009-10-28 1:49 ` [net-next-2.6 PATCH 02/20] igb: remove rx checksum good counter Jeff Kirsher
2009-10-28 1:50 ` [net-next-2.6 PATCH 03/20] igb: increase minimum rx buffer size to 1K Jeff Kirsher
2009-10-28 1:50 ` [net-next-2.6 PATCH 04/20] igb: move the tx and rx ring specific config into seperate functions Jeff Kirsher
2009-10-28 1:50 ` [net-next-2.6 PATCH 05/20] igb: remove rx_ps_hdr_len Jeff Kirsher
2009-10-28 1:51 ` [net-next-2.6 PATCH 06/20] igb: move SRRCTL register configuration into ring specific config Jeff Kirsher
2009-10-28 1:51 ` [net-next-2.6 PATCH 07/20] igb: change the head and tail offsets into pointers Jeff Kirsher
2009-10-28 1:51 ` [net-next-2.6 PATCH 08/20] igb: add pci device pointer to ring structure Jeff Kirsher
2009-10-28 1:52 ` Jeff Kirsher [this message]
2009-10-28 1:52 ` [net-next-2.6 PATCH 10/20] igb: move alloc_failed and csum_err stats into per rx-ring stat Jeff Kirsher
2009-10-28 1:52 ` [net-next-2.6 PATCH 11/20] igb: add a flags value to the ring Jeff Kirsher
2009-10-28 1:53 ` [net-next-2.6 PATCH 12/20] igb: place a pointer to the netdev struct in the ring itself Jeff Kirsher
2009-10-28 1:53 ` [net-next-2.6 PATCH 13/20] igb: move the multiple receive queue configuration into seperate function Jeff Kirsher
2009-10-28 1:53 ` [net-next-2.6 PATCH 14/20] igb: delay VF reset notification until after interrupts are enabed Jeff Kirsher
2009-10-28 1:54 ` [net-next-2.6 PATCH 15/20] igb: setup vlan tag replication stripping in igb_vmm_control Jeff Kirsher
2009-10-28 1:54 ` [net-next-2.6 PATCH 16/20] igb: re-use ring configuration code in ethtool testing Jeff Kirsher
2009-10-28 1:54 ` [net-next-2.6 PATCH 17/20] igb: make tx ring map and free functionality non-static Jeff Kirsher
2009-10-28 1:55 ` [net-next-2.6 PATCH 18/20] igb: make ethtool use core xmit map and free functionality Jeff Kirsher
2009-10-28 1:55 ` [net-next-2.6 PATCH 19/20] igb: add single vector msi-x testing to interrupt test Jeff Kirsher
2009-10-28 1:55 ` [net-next-2.6 PATCH 20/20] igb: cleanup "todo" code found in igb_ethtool.c Jeff Kirsher
2009-10-28 8:28 ` [net-next-2.6 PATCH 01/20] igb: add new data structure for handling interrupts and NAPI David Miller
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20091028015206.12470.50840.stgit@localhost.localdomain \
--to=jeffrey.t.kirsher@intel.com \
--cc=alexander.h.duyck@intel.com \
--cc=davem@davemloft.net \
--cc=gospo@redhat.com \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).