* [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599
@ 2009-04-28 8:42 Jeff Kirsher
2009-04-28 8:42 ` [net-next PATCH 2/3] ixgbe: Interrupt management update " Jeff Kirsher
` (2 more replies)
0 siblings, 3 replies; 10+ messages in thread
From: Jeff Kirsher @ 2009-04-28 8:42 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Alexander Duyck, Jeff Kirsher
From: Alexander Duyck <alexander.h.duyck@intel.com>
This patch enables hardware receive side coalescing for 82599 hardware.
82599 can merge multiple frames from the same TCP/IP flow into a single
structure that can span one ore more descriptors. The accumulated data is
arranged similar to how jumbo frames are arranged with the exception that
other packets can be interlaced inbetween. To overcome this issue a next
pointer is included in the written back descriptor which indicates the next
descriptor in the writeback sequence.
This feature sets the NETIF_F_LRO flag and clearing it via the ethtool set
flags operation will also disable hardware RSC.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe.h | 4 +
drivers/net/ixgbe/ixgbe_ethtool.c | 24 +++++++
drivers/net/ixgbe/ixgbe_main.c | 121 ++++++++++++++++++++++++++++++++++---
drivers/net/ixgbe/ixgbe_type.h | 15 +++++
4 files changed, 152 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index c26433d..4b44a8e 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -147,6 +147,7 @@ struct ixgbe_ring {
u16 work_limit; /* max work per interrupt */
u16 rx_buf_len;
+ u64 rsc_count; /* stat for coalesced packets */
};
enum ixgbe_ring_f_enum {
@@ -294,6 +295,8 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24)
#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
+#define IXGBE_FLAG_RSC_CAPABLE (u32)(1 << 26)
+#define IXGBE_FLAG_RSC_ENABLED (u32)(1 << 27)
/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
@@ -325,6 +328,7 @@ struct ixgbe_adapter {
struct timer_list sfp_timer;
struct work_struct multispeed_fiber_task;
struct work_struct sfp_config_module_task;
+ u64 rsc_count;
u32 wol;
u16 eeprom_version;
};
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index a499b6b..d822c92 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -67,6 +67,7 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
{"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
{"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
+ {"hw_rsc_count", IXGBE_STAT(rsc_count)},
{"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
{"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
{"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
@@ -1127,6 +1128,27 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return 0;
}
+static int ixgbe_set_flags(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ ethtool_op_set_flags(netdev, data);
+
+ if (!(adapter->flags & IXGBE_FLAG_RSC_CAPABLE))
+ return 0;
+
+ /* if state changes we need to update adapter->flags and reset */
+ if ((!!(data & ETH_FLAG_LRO)) !=
+ (!!(adapter->flags & IXGBE_FLAG_RSC_ENABLED))) {
+ adapter->flags ^= IXGBE_FLAG_RSC_ENABLED;
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+ return 0;
+
+}
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
@@ -1161,7 +1183,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
.get_flags = ethtool_op_get_flags,
- .set_flags = ethtool_op_set_flags,
+ .set_flags = ixgbe_set_flags,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ad43181..599808b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -622,6 +622,40 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
}
+static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK) >>
+ IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/**
+ * ixgbe_transform_rsc_queue - change rsc queue into a full packet
+ * @skb: pointer to the last skb in the rsc queue
+ *
+ * This function changes a queue full of hw rsc buffers into a completed
+ * packet. It uses the ->prev pointers to find the first packet and then
+ * turns it into the frag list owner.
+ **/
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+{
+ unsigned int frag_list_size = 0;
+
+ while (skb->prev) {
+ struct sk_buff *prev = skb->prev;
+ frag_list_size += skb->len;
+ skb->prev = NULL;
+ skb = prev;
+ }
+
+ skb_shinfo(skb)->frag_list = skb->next;
+ skb->next = NULL;
+ skb->len += frag_list_size;
+ skb->data_len += frag_list_size;
+ skb->truesize += frag_list_size;
+ return skb;
+}
+
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
@@ -631,7 +665,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
- unsigned int i;
+ unsigned int i, rsc_count = 0;
u32 len, staterr;
u16 hdr_info;
bool cleaned = false;
@@ -697,20 +731,38 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
i++;
if (i == rx_ring->count)
i = 0;
- next_buffer = &rx_ring->rx_buffer_info[i];
next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
prefetch(next_rxd);
-
cleaned_count++;
+
+ if (adapter->flags & IXGBE_FLAG_RSC_CAPABLE)
+ rsc_count = ixgbe_get_rsc_count(rx_desc);
+
+ if (rsc_count) {
+ u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
+ IXGBE_RXDADV_NEXTP_SHIFT;
+ next_buffer = &rx_ring->rx_buffer_info[nextp];
+ rx_ring->rsc_count += (rsc_count - 1);
+ } else {
+ next_buffer = &rx_ring->rx_buffer_info[i];
+ }
+
if (staterr & IXGBE_RXD_STAT_EOP) {
+ if (skb->prev)
+ skb = ixgbe_transform_rsc_queue(skb);
rx_ring->stats.packets++;
rx_ring->stats.bytes += skb->len;
} else {
- rx_buffer_info->skb = next_buffer->skb;
- rx_buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ } else {
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
+ }
adapter->non_eop_descs++;
goto next_desc;
}
@@ -740,7 +792,7 @@ next_desc:
/* use prefetched values */
rx_desc = next_rxd;
- rx_buffer_info = next_buffer;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
@@ -1736,6 +1788,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
u32 fctrl, hlreg0;
u32 reta = 0, mrqc = 0;
u32 rdrxctl;
+ u32 rscctrl;
int rx_buf_len;
/* Decide whether to use packet split mode or not */
@@ -1753,7 +1806,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
}
} else {
- if (netdev->mtu <= ETH_DATA_LEN)
+ if (!(adapter->flags & IXGBE_FLAG_RSC_ENABLED) &&
+ (netdev->mtu <= ETH_DATA_LEN))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
rx_buf_len = ALIGN(max_frame, 1024);
@@ -1875,8 +1929,38 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82599EB) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
+
+ if (adapter->flags & IXGBE_FLAG_RSC_ENABLED) {
+ /* Enable 82599 HW-RSC */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
+ rscctrl |= IXGBE_RSCCTL_RSCEN;
+ /*
+ * if packet split is enabled we can only support up
+ * to max frags + 1 descriptors.
+ */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
+#if (MAX_SKB_FRAGS < 3)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#elif (MAX_SKB_FRAGS < 7)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#elif (MAX_SKB_FRAGS < 15)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#endif
+ else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
+ }
+ /* Disable RSC for ACK packets */
+ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+ (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+ }
}
static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -2445,8 +2529,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
- dev_kfree_skb(rx_buffer_info->skb);
+ struct sk_buff *skb = rx_buffer_info->skb;
rx_buffer_info->skb = NULL;
+ do {
+ struct sk_buff *this = skb;
+ skb = skb->prev;
+ dev_kfree_skb(this);
+ } while (skb);
}
if (!rx_buffer_info->page)
continue;
@@ -3187,8 +3276,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
if (hw->mac.type == ixgbe_mac_82598EB)
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
- else if (hw->mac.type == ixgbe_mac_82599EB)
+ else if (hw->mac.type == ixgbe_mac_82599EB) {
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+ adapter->flags |= IXGBE_FLAG_RSC_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_RSC_ENABLED;
+ }
#ifdef CONFIG_IXGBE_DCB
/* Configure DCB traffic classes */
@@ -3772,9 +3864,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
if (hw->mac.type == ixgbe_mac_82599EB) {
+ u64 rsc_count = 0;
for (i = 0; i < 16; i++)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rsc_count += adapter->rx_ring[i].rsc_count;
+ adapter->rsc_count = rsc_count;
}
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -4749,6 +4845,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ if (adapter->flags & IXGBE_FLAG_RSC_ENABLED)
+ netdev->features |= NETIF_F_LRO;
+
/* make sure the EEPROM is good */
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 375f0d4..bdfdf3b 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -443,6 +443,21 @@
#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
+/* HW RSC registers */
+#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+ (0x0D02C + ((_i - 64) * 0x40)))
+#define IXGBE_RSCDBU 0x03028
+#define IXGBE_RSCCTL_RSCEN 0x01
+#define IXGBE_RSCCTL_MAXDESC_1 0x00
+#define IXGBE_RSCCTL_MAXDESC_4 0x04
+#define IXGBE_RSCCTL_MAXDESC_8 0x08
+#define IXGBE_RSCCTL_MAXDESC_16 0x0C
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
+#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000
+
/* DCB registers */
#define IXGBE_RTRPCS 0x02430
#define IXGBE_RTTDCS 0x04900
^ permalink raw reply related [flat|nested] 10+ messages in thread* [net-next PATCH 2/3] ixgbe: Interrupt management update for 82599
2009-04-28 8:42 [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599 Jeff Kirsher
@ 2009-04-28 8:42 ` Jeff Kirsher
2009-04-28 8:55 ` David Miller
2009-04-28 8:43 ` [net-next PATCH 3/3] ixgbe: Clear out stray tx work on link down Jeff Kirsher
2009-04-28 8:55 ` [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599 David Miller
2 siblings, 1 reply; 10+ messages in thread
From: Jeff Kirsher @ 2009-04-28 8:42 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Shannon Nelson, Jeff Kirsher
From: Nelson, Shannon <shannon.nelson@intel.com>
Update the interrupt management to correctly handle greater
than 16 queue vectors.
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe_main.c | 90 ++++++++++++++++++++++------------------
1 files changed, 50 insertions(+), 40 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 599808b..60c3a21 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -326,8 +326,18 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
}
/* re-arm the interrupt */
- if (count >= tx_ring->work_limit)
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
+ if (count >= tx_ring->work_limit) {
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+ tx_ring->v_idx);
+ else if (tx_ring->v_idx & 0xFFFFFFFF)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0),
+ tx_ring->v_idx);
+ else
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1),
+ (tx_ring->v_idx >> 32));
+ }
+
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets;
@@ -1173,7 +1183,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
rx_ring = &(adapter->rx_ring[r_idx]);
/* disable interrupts on this vector only */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
+ else if (rx_ring->v_idx & 0xFFFFFFFF)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), rx_ring->v_idx);
+ else
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1),
+ (rx_ring->v_idx >> 32));
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -1187,6 +1203,23 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
return IRQ_HANDLED;
}
+static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ } else {
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+ }
+ /* skip the flush */
+}
+
/**
* ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
* @napi: napi struct with our devices info in it
@@ -1219,7 +1252,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
if (adapter->itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
+ ixgbe_irq_enable_queues(adapter, rx_ring->v_idx);
}
return work_done;
@@ -1241,7 +1274,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
struct ixgbe_ring *rx_ring = NULL;
int work_done = 0, i;
long r_idx;
- u16 enable_mask = 0;
+ u64 enable_mask = 0;
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */
@@ -1268,7 +1301,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
if (adapter->itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
+ ixgbe_irq_enable_queues(adapter, enable_mask);
return 0;
}
@@ -1488,7 +1521,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
{
u32 mask;
- mask = IXGBE_EIMS_ENABLE_MASK;
+
+ mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
mask |= IXGBE_EIMS_GPI_SDP1;
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -1498,14 +1532,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- /* enable the rest of the queue vectors */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1),
- (IXGBE_EIMS_RTX_QUEUE << 16));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
- ((IXGBE_EIMS_RTX_QUEUE << 16) |
- IXGBE_EIMS_RTX_QUEUE));
- }
+ ixgbe_irq_enable_queues(adapter, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
}
@@ -1629,10 +1656,12 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+ } else {
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(2), ~0);
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1644,18 +1673,6 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
}
}
-static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter)
-{
- u32 mask = IXGBE_EIMS_RTX_QUEUE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask << 16);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
- (mask << 16 | mask));
- }
- /* skip the flush */
-}
-
/**
* ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
*
@@ -2721,7 +2738,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
if (adapter->itr_setting & 1)
ixgbe_set_itr(adapter);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter);
+ ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
}
return work_done;
}
@@ -4012,16 +4029,9 @@ static void ixgbe_watchdog(unsigned long data)
break;
case ixgbe_mac_82599EB:
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- /*
- * EICS(0..15) first 0-15 q vectors
- * EICS[1] (16..31) q vectors 16-31
- * EICS[2] (0..31) q vectors 32-63
- */
- IXGBE_WRITE_REG(hw, IXGBE_EICS,
- (u32)(eics & 0xFFFF));
+ IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(0),
+ (u32)(eics & 0xFFFFFFFF));
IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1),
- (u32)(eics & 0xFFFF0000));
- IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(2),
(u32)(eics >> 32));
} else {
/*
^ permalink raw reply related [flat|nested] 10+ messages in thread* [net-next PATCH 3/3] ixgbe: Clear out stray tx work on link down
2009-04-28 8:42 [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599 Jeff Kirsher
2009-04-28 8:42 ` [net-next PATCH 2/3] ixgbe: Interrupt management update " Jeff Kirsher
@ 2009-04-28 8:43 ` Jeff Kirsher
2009-04-28 8:55 ` David Miller
2009-04-28 8:55 ` [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599 David Miller
2 siblings, 1 reply; 10+ messages in thread
From: Jeff Kirsher @ 2009-04-28 8:43 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Shannon Nelson, Jeff Kirsher
From: Nelson, Shannon <shannon.nelson@intel.com>
Ayyappan at VMware noticed that we're missing this check from ixgbe which
is in our other drivers. The difference with this implementation from our
other drivers is that this checks all the tx queues rather than just tx[0].
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe_main.c | 22 ++++++++++++++++++++++
1 files changed, 22 insertions(+), 0 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 60c3a21..35ff703 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4117,6 +4117,9 @@ static void ixgbe_watchdog_task(struct work_struct *work)
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = adapter->link_speed;
bool link_up = adapter->link_up;
+ int i;
+ struct ixgbe_ring *tx_ring;
+ int some_tx_pending = 0;
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
@@ -4174,6 +4177,25 @@ static void ixgbe_watchdog_task(struct work_struct *work)
}
}
+ if (!netif_carrier_ok(netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring = &adapter->tx_ring[i];
+ if (tx_ring->next_to_use != tx_ring->next_to_clean) {
+ some_tx_pending = 1;
+ break;
+ }
+ }
+
+ if (some_tx_pending) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ schedule_work(&adapter->reset_task);
+ }
+ }
+
ixgbe_update_stats(adapter);
adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
}
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599
2009-04-28 8:42 [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599 Jeff Kirsher
2009-04-28 8:42 ` [net-next PATCH 2/3] ixgbe: Interrupt management update " Jeff Kirsher
2009-04-28 8:43 ` [net-next PATCH 3/3] ixgbe: Clear out stray tx work on link down Jeff Kirsher
@ 2009-04-28 8:55 ` David Miller
2009-04-29 0:12 ` Brandeburg, Jesse
2 siblings, 1 reply; 10+ messages in thread
From: David Miller @ 2009-04-28 8:55 UTC (permalink / raw)
To: jeffrey.t.kirsher; +Cc: netdev, gospo, alexander.h.duyck
From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Date: Tue, 28 Apr 2009 01:42:37 -0700
> From: Alexander Duyck <alexander.h.duyck@intel.com>
>
> This patch enables hardware receive side coalescing for 82599 hardware.
> 82599 can merge multiple frames from the same TCP/IP flow into a single
> structure that can span one ore more descriptors. The accumulated data is
> arranged similar to how jumbo frames are arranged with the exception that
> other packets can be interlaced inbetween. To overcome this issue a next
> pointer is included in the written back descriptor which indicates the next
> descriptor in the writeback sequence.
>
> This feature sets the NETIF_F_LRO flag and clearing it via the ethtool set
> flags operation will also disable hardware RSC.
>
> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Applied, but.........
Since IXGBE has been converted to GRO, all of this fiddling with
LRO is a complete NOP. All of that code can be removed completely.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599
2009-04-28 8:55 ` [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599 David Miller
@ 2009-04-29 0:12 ` Brandeburg, Jesse
2009-04-29 0:54 ` David Miller
0 siblings, 1 reply; 10+ messages in thread
From: Brandeburg, Jesse @ 2009-04-29 0:12 UTC (permalink / raw)
To: David Miller
Cc: Kirsher, Jeffrey T, netdev@vger.kernel.org, gospo@redhat.com,
Duyck, Alexander H
On Tue, 28 Apr 2009, David Miller wrote:
> > This patch enables hardware receive side coalescing for 82599 hardware.
> > 82599 can merge multiple frames from the same TCP/IP flow into a single
> > structure that can span one ore more descriptors. The accumulated data is
> > arranged similar to how jumbo frames are arranged with the exception that
> > other packets can be interlaced inbetween. To overcome this issue a next
> > pointer is included in the written back descriptor which indicates the next
> > descriptor in the writeback sequence.
> >
> > This feature sets the NETIF_F_LRO flag and clearing it via the ethtool set
> > flags operation will also disable hardware RSC.
> >
> > Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
> > Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
>
> Applied, but.........
>
> Since IXGBE has been converted to GRO, all of this fiddling with
> LRO is a complete NOP. All of that code can be removed completely.
ah, you're right, but this features enables our new hardware that can do
LRO in hardware, and then GRO can work on top of it if necessary. We
wanted some way to turn it off and overloaded the LRO flag to do so.
FYI This is not a TOE, it just recognizes a flow and will pack data frames
together in a list of descriptors, which the driver then hands to the
stack as a large receive.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599
2009-04-29 0:12 ` Brandeburg, Jesse
@ 2009-04-29 0:54 ` David Miller
2009-04-29 16:16 ` Alexander Duyck
2009-04-29 16:40 ` Roland Dreier
0 siblings, 2 replies; 10+ messages in thread
From: David Miller @ 2009-04-29 0:54 UTC (permalink / raw)
To: jesse.brandeburg; +Cc: jeffrey.t.kirsher, netdev, gospo, alexander.h.duyck
From: "Brandeburg, Jesse" <jesse.brandeburg@intel.com>
Date: Tue, 28 Apr 2009 17:12:10 -0700 (Pacific Daylight Time)
> ah, you're right, but this features enables our new hardware that can do
> LRO in hardware, and then GRO can work on top of it if necessary. We
> wanted some way to turn it off and overloaded the LRO flag to do so.
That ugly. And the kernel is going to turn this off on you
when the user enables either forwarding or bridging.
> FYI This is not a TOE, it just recognizes a flow and will pack data frames
> together in a list of descriptors, which the driver then hands to the
> stack as a large receive.
I know what it is.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599
2009-04-29 0:54 ` David Miller
@ 2009-04-29 16:16 ` Alexander Duyck
2009-04-29 16:40 ` Roland Dreier
1 sibling, 0 replies; 10+ messages in thread
From: Alexander Duyck @ 2009-04-29 16:16 UTC (permalink / raw)
To: David Miller
Cc: Brandeburg, Jesse, Kirsher, Jeffrey T, netdev@vger.kernel.org,
gospo@redhat.com
David Miller wrote:
> From: "Brandeburg, Jesse" <jesse.brandeburg@intel.com>
> Date: Tue, 28 Apr 2009 17:12:10 -0700 (Pacific Daylight Time)
>
>> ah, you're right, but this features enables our new hardware that can do
>> LRO in hardware, and then GRO can work on top of it if necessary. We
>> wanted some way to turn it off and overloaded the LRO flag to do so.
>
> That ugly. And the kernel is going to turn this off on you
> when the user enables either forwarding or bridging.
That would likely be a desirable consequence. If the interface is used
for forwarding or bridging we probably would want to turn the feature
off since it has many of the same limitations as software LRO. Then we
can let software GRO take over and handle all of the coalescing.
Thanks,
Alex
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599
2009-04-29 0:54 ` David Miller
2009-04-29 16:16 ` Alexander Duyck
@ 2009-04-29 16:40 ` Roland Dreier
1 sibling, 0 replies; 10+ messages in thread
From: Roland Dreier @ 2009-04-29 16:40 UTC (permalink / raw)
To: David Miller
Cc: jesse.brandeburg, jeffrey.t.kirsher, netdev, gospo,
alexander.h.duyck
> > ah, you're right, but this features enables our new hardware that can do
> > LRO in hardware, and then GRO can work on top of it if necessary. We
> > wanted some way to turn it off and overloaded the LRO flag to do so.
> That ugly. And the kernel is going to turn this off on you
> when the user enables either forwarding or bridging.
No comment on ugliness. However it seems like a good feature if the
kernel turns off hardware LRO when forwarding/bridging is turned on --
after all, we don't want the hardware munging wire frames in that case,
for exactly the same reasons that we don't want the kernel LRO munging
frames.
- R.
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2009-04-29 16:40 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-04-28 8:42 [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599 Jeff Kirsher
2009-04-28 8:42 ` [net-next PATCH 2/3] ixgbe: Interrupt management update " Jeff Kirsher
2009-04-28 8:55 ` David Miller
2009-04-28 8:43 ` [net-next PATCH 3/3] ixgbe: Clear out stray tx work on link down Jeff Kirsher
2009-04-28 8:55 ` David Miller
2009-04-28 8:55 ` [net-next PATCH 1/3] ixgbe: enable HW RSC for 82599 David Miller
2009-04-29 0:12 ` Brandeburg, Jesse
2009-04-29 0:54 ` David Miller
2009-04-29 16:16 ` Alexander Duyck
2009-04-29 16:40 ` Roland Dreier
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).