* [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled
@ 2009-12-03 21:32 Jeff Kirsher
2009-12-03 21:32 ` [net-next-2.6 PATCH 2/5] ixgbe: select FCoE Tx queue in ndo_select_queue Jeff Kirsher
` (4 more replies)
0 siblings, 5 replies; 6+ messages in thread
From: Jeff Kirsher @ 2009-12-03 21:32 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Yi Zou, Peter P Waskiewicz Jr, Jeff Kirsher
From: Yi Zou <yi.zou@intel.com>
Store the user priority for FCoE and use it directly for outgoing
FCoE traffic when DCB is enabled.
Signed-off-by: Yi Zou <yi.zou@intel.com>
Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe_fcoe.c | 30 +++++++++++++++++-------------
drivers/net/ixgbe/ixgbe_fcoe.h | 3 +++
drivers/net/ixgbe/ixgbe_main.c | 8 ++++++++
3 files changed, 28 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index edecdc8..da32a10 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -499,6 +499,10 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+#ifdef CONFIG_IXGBE_DCB
+ u8 tc;
+ u32 up2tc;
+#endif
/* create the pool for ddp if not created yet */
if (!fcoe->pool) {
@@ -540,6 +544,17 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_FCRXCTRL_FCOELLI |
IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
+#ifdef CONFIG_IXGBE_DCB
+ up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
+ for (i = 0; i < MAX_USER_PRIORITY; i++) {
+ tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
+ tc &= (MAX_TRAFFIC_CLASS - 1);
+ if (fcoe->tc == tc) {
+ fcoe->up = i;
+ break;
+ }
+ }
+#endif
}
/**
@@ -671,19 +686,7 @@ out_disable:
*/
u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
{
- int i;
- u8 tc;
- u32 up2tc;
-
- up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
- for (i = 0; i < MAX_USER_PRIORITY; i++) {
- tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
- tc &= (MAX_TRAFFIC_CLASS - 1);
- if (adapter->fcoe.tc == tc)
- return 1 << i;
- }
-
- return 0;
+ return 1 << adapter->fcoe.up;
}
/**
@@ -710,6 +713,7 @@ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
up2tc &= (MAX_TRAFFIC_CLASS - 1);
adapter->fcoe.tc = (u8)up2tc;
+ adapter->fcoe.up = i;
return 0;
}
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index b5dee7b..de8ff53 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -62,7 +62,10 @@ struct ixgbe_fcoe_ddp {
};
struct ixgbe_fcoe {
+#ifdef CONFIG_IXGBE_DCB
u8 tc;
+ u8 up;
+#endif
spinlock_t lock;
struct pci_pool *pool;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 9ba506f..09990ed 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3958,8 +3958,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = 0;
+#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
+#endif
#endif /* IXGBE_FCOE */
}
@@ -5332,6 +5334,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
(skb->protocol == htons(ETH_P_FCOE))) {
tx_flags |= IXGBE_TX_FLAGS_FCOE;
#ifdef IXGBE_FCOE
+#ifdef CONFIG_IXGBE_DCB
+ tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+ << IXGBE_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= ((adapter->fcoe.up << 13)
+ << IXGBE_TX_FLAGS_VLAN_SHIFT);
+#endif
r_idx = smp_processor_id();
r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
r_idx += adapter->ring_feature[RING_F_FCOE].mask;
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [net-next-2.6 PATCH 2/5] ixgbe: select FCoE Tx queue in ndo_select_queue
2009-12-03 21:32 [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled Jeff Kirsher
@ 2009-12-03 21:32 ` Jeff Kirsher
2009-12-03 21:33 ` [net-next-2.6 PATCH 3/5] ixgbe: change default ring size Jeff Kirsher
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Jeff Kirsher @ 2009-12-03 21:32 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Yi Zou, Peter P Waskiewicz Jr, Jeff Kirsher
From: Yi Zou <yi.zou@intel.com>
This removes the Tx queue selection for FCoE traffic from ixgbe_xmit_frame()
and does it in the ndo_select_queue() call, moving all Tx queue selection
into a single routine.
Signed-off-by: Yi Zou <yi.zou@intel.com>
Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe_main.c | 24 ++++++++++++++----------
1 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 09990ed..e3dc68b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5286,10 +5286,19 @@ static int ixgbe_maybe_stop_tx(struct net_device *netdev,
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int txq = smp_processor_id();
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- return smp_processor_id();
+ return txq;
+#ifdef IXGBE_FCOE
+ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ (skb->protocol == htons(ETH_P_FCOE))) {
+ txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+ txq += adapter->ring_feature[RING_F_FCOE].mask;
+ return txq;
+ }
+#endif
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
@@ -5304,7 +5313,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
unsigned int first;
unsigned int tx_flags = 0;
u8 hdr_len = 0;
- int r_idx = 0, tso;
+ int tso;
int count = 0;
unsigned int f;
@@ -5312,13 +5321,13 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
- tx_flags |= (skb->queue_mapping << 13);
+ tx_flags |= ((skb->queue_mapping & 0x7) << 13);
}
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
if (skb->priority != TC_PRIO_CONTROL) {
- tx_flags |= (skb->queue_mapping << 13);
+ tx_flags |= ((skb->queue_mapping & 0x7) << 13);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
} else {
@@ -5327,8 +5336,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
}
- r_idx = skb->queue_mapping;
- tx_ring = &adapter->tx_ring[r_idx];
+ tx_ring = &adapter->tx_ring[skb->queue_mapping];
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
(skb->protocol == htons(ETH_P_FCOE))) {
@@ -5340,10 +5348,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_flags |= ((adapter->fcoe.up << 13)
<< IXGBE_TX_FLAGS_VLAN_SHIFT);
#endif
- r_idx = smp_processor_id();
- r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
- r_idx += adapter->ring_feature[RING_F_FCOE].mask;
- tx_ring = &adapter->tx_ring[r_idx];
#endif
}
/* four things can cause us to need a context descriptor */
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [net-next-2.6 PATCH 3/5] ixgbe: change default ring size
2009-12-03 21:32 [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled Jeff Kirsher
2009-12-03 21:32 ` [net-next-2.6 PATCH 2/5] ixgbe: select FCoE Tx queue in ndo_select_queue Jeff Kirsher
@ 2009-12-03 21:33 ` Jeff Kirsher
2009-12-03 21:33 ` [net-next-2.6 PATCH 4/5] ixgbe: performance tweaks Jeff Kirsher
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Jeff Kirsher @ 2009-12-03 21:33 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Jesse Brandeburg, Peter P Waskiewicz Jr,
Jeff Kirsher
From: Jesse Brandeburg <jesse.brandeburg@intel.com>
decrease the memory utilization of the tx / rx queue allocation
by changing the default ring size to 512 (from 1024). At
1024 rx entries of 2KB each (from 4kB slab) with 16 queues
ixgbe was using 64 MB of memory per port, which is not
necessary.
Users can still change queue lengths with ethtool -k.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 7e35e97..91d80b7 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -51,11 +51,11 @@
__func__ , ## args)))
/* TX/RX descriptor defines */
-#define IXGBE_DEFAULT_TXD 1024
+#define IXGBE_DEFAULT_TXD 512
#define IXGBE_MAX_TXD 4096
#define IXGBE_MIN_TXD 64
-#define IXGBE_DEFAULT_RXD 1024
+#define IXGBE_DEFAULT_RXD 512
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [net-next-2.6 PATCH 4/5] ixgbe: performance tweaks
2009-12-03 21:32 [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled Jeff Kirsher
2009-12-03 21:32 ` [net-next-2.6 PATCH 2/5] ixgbe: select FCoE Tx queue in ndo_select_queue Jeff Kirsher
2009-12-03 21:33 ` [net-next-2.6 PATCH 3/5] ixgbe: change default ring size Jeff Kirsher
@ 2009-12-03 21:33 ` Jeff Kirsher
2009-12-03 21:33 ` [net-next-2.6 PATCH 5/5] ixgbe: use EIAM to automask MSI-X Jeff Kirsher
2009-12-03 23:43 ` [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled David Miller
4 siblings, 0 replies; 6+ messages in thread
From: Jeff Kirsher @ 2009-12-03 21:33 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Jesse Brandeburg, Peter P Waskiewicz Jr,
Jeff Kirsher
From: Jesse Brandeburg <jesse.brandeburg@intel.com>
drop variables that had cache lines modified in simultaneous hot paths.
keep some variables modified on hot paths but make their storage per queue.
cache align DMA data buffer start addresses.
cache align (padding) some structures that end within a cacheline.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe.h | 20 +++++++++-----------
drivers/net/ixgbe/ixgbe_ethtool.c | 5 -----
drivers/net/ixgbe/ixgbe_main.c | 32 ++++++++++++++++++--------------
3 files changed, 27 insertions(+), 30 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 91d80b7..8da8eb5 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -161,10 +161,12 @@ struct ixgbe_ring {
unsigned long reinit_state;
u64 rsc_count; /* stat for coalesced packets */
u64 rsc_flush; /* stats for flushed packets */
+ u32 restart_queue; /* track tx queue restarts */
+ u32 non_eop_descs; /* track hardware descriptor chaining */
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
-};
+} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
@@ -189,7 +191,7 @@ enum ixgbe_ring_f_enum {
struct ixgbe_ring_feature {
int indices;
int mask;
-};
+} ____cacheline_internodealigned_in_smp;
#define MAX_RX_QUEUES 128
#define MAX_TX_QUEUES 128
@@ -275,29 +277,25 @@ struct ixgbe_adapter {
u16 eitr_high;
/* TX */
- struct ixgbe_ring *tx_ring; /* One per active queue */
+ struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */
int num_tx_queues;
- u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
bool detect_tx_hung;
+ u64 restart_queue;
+ u64 lsc_int;
+
/* RX */
- struct ixgbe_ring *rx_ring; /* One per active queue */
+ struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
int num_rx_queues;
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
int max_msix_q_vectors; /* true count of q_vectors for device */
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 1928d55..06a9d18 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -93,16 +93,11 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_restart_queue", IXGBE_STAT(restart_queue)},
{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
- {"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)},
- {"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)},
{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
- {"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)},
{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
- {"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)},
- {"rx_header_split", IXGBE_STAT(rx_hdr_split)},
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e3dc68b..db05030 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -413,7 +413,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) {
netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->restart_queue;
}
}
@@ -624,7 +624,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- adapter->hw_csum_rx_good++;
}
static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
@@ -681,14 +680,19 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
if (!bi->skb) {
struct sk_buff *skb;
- skb = netdev_alloc_skb_ip_align(adapter->netdev,
- rx_ring->rx_buf_len);
+ /* netdev_alloc_skb reserves 32 bytes up front!! */
+ uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES;
+ skb = netdev_alloc_skb(adapter->netdev, bufsz);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
+ /* advance the data pointer to the next cache line */
+ skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES)
+ - skb->data));
+
bi->skb = skb;
bi->dma = pci_map_single(pdev, skb->data,
rx_ring->rx_buf_len,
@@ -801,8 +805,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hdr_info & IXGBE_RXDADV_SPH)
- adapter->rx_hdr_split++;
if (len > IXGBE_RX_HDR_SIZE)
len = IXGBE_RX_HDR_SIZE;
upper_len = le16_to_cpu(rx_desc->wb.upper.length);
@@ -812,7 +814,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
cleaned = true;
skb = rx_buffer_info->skb;
- prefetch(skb->data - NET_IP_ALIGN);
+ prefetch(skb->data);
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
@@ -884,7 +886,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->next = next_buffer->skb;
skb->next->prev = skb;
}
- adapter->non_eop_descs++;
+ rx_ring->non_eop_descs++;
goto next_desc;
}
@@ -4511,6 +4513,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->rsc_total_flush = rsc_flush;
}
+ /* gather some stats to the adapter struct that are per queue */
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->restart_queue += adapter->tx_ring[i].restart_queue;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->non_eop_descs += adapter->tx_ring[i].non_eop_descs;
+
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
/* for packet buffers not used, the register should read 0 */
@@ -4893,14 +4902,12 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
- adapter->hw_tso_ctxt++;
} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
- adapter->hw_tso6_ctxt++;
}
i = tx_ring->next_to_use;
@@ -5019,7 +5026,6 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
- adapter->hw_csum_tx_good++;
i++;
if (i == tx_ring->count)
i = 0;
@@ -5256,8 +5262,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
struct ixgbe_ring *tx_ring, int size)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
netif_stop_subqueue(netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
@@ -5271,7 +5275,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->restart_queue;
return 0;
}
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [net-next-2.6 PATCH 5/5] ixgbe: use EIAM to automask MSI-X
2009-12-03 21:32 [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled Jeff Kirsher
` (2 preceding siblings ...)
2009-12-03 21:33 ` [net-next-2.6 PATCH 4/5] ixgbe: performance tweaks Jeff Kirsher
@ 2009-12-03 21:33 ` Jeff Kirsher
2009-12-03 23:43 ` [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled David Miller
4 siblings, 0 replies; 6+ messages in thread
From: Jeff Kirsher @ 2009-12-03 21:33 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Jesse Brandeburg, Peter P Waskiewicz Jr,
Jeff Kirsher
From: Jesse Brandeburg <jesse.brandeburg@intel.com>
when disabling interrupts, driver was writing with IO, this is no
necessary because on ixgbe parts the hardware can "oneshot"
disable and clear the interrupt. So on 82598/82599 use of EIAM
should avoid one posted write per interrupt when in MSI-X mode.
This should improve performance and seems to in my limited
testing, reduce CPU utilization VERY slightly.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe_main.c | 25 +++++++++++++++++++------
1 files changed, 19 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index db05030..247ed2a 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1329,8 +1329,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
r_idx + 1);
}
- /* disable interrupts on this vector only */
- ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -1362,7 +1361,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
return IRQ_HANDLED;
/* disable interrupts on this vector only */
- ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -1397,8 +1396,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx + 1);
}
- /* disable interrupts on this vector only */
- ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -2716,7 +2714,22 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
}
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ /*
+ * use EIAM to auto-mask when MSI-X interrupt is asserted
+ * this saves a register write for every interrupt
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ break;
+ default:
+ case ixgbe_mac_82599EB:
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+ break;
+ }
+ } else {
/* legacy interrupts, use EIAM to auto-mask when reading EICR,
* specifically only auto mask tx and rx interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled
2009-12-03 21:32 [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled Jeff Kirsher
` (3 preceding siblings ...)
2009-12-03 21:33 ` [net-next-2.6 PATCH 5/5] ixgbe: use EIAM to automask MSI-X Jeff Kirsher
@ 2009-12-03 23:43 ` David Miller
4 siblings, 0 replies; 6+ messages in thread
From: David Miller @ 2009-12-03 23:43 UTC (permalink / raw)
To: jeffrey.t.kirsher; +Cc: netdev, gospo, yi.zou, peter.p.waskiewicz.jr
All 5 patche applied, thanks.
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2009-12-03 23:43 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-12-03 21:32 [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled Jeff Kirsher
2009-12-03 21:32 ` [net-next-2.6 PATCH 2/5] ixgbe: select FCoE Tx queue in ndo_select_queue Jeff Kirsher
2009-12-03 21:33 ` [net-next-2.6 PATCH 3/5] ixgbe: change default ring size Jeff Kirsher
2009-12-03 21:33 ` [net-next-2.6 PATCH 4/5] ixgbe: performance tweaks Jeff Kirsher
2009-12-03 21:33 ` [net-next-2.6 PATCH 5/5] ixgbe: use EIAM to automask MSI-X Jeff Kirsher
2009-12-03 23:43 ` [net-next-2.6 PATCH 1/5] ixgbe: use known user priority for FCoE when DCB is enabled David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).