* [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node
@ 2010-01-07 4:48 Jeff Kirsher
2010-01-07 4:48 ` [net-next-2.6 PATCH 2/5] ixgbe: Make descriptor ring allocations NUMA-aware Jeff Kirsher
` (4 more replies)
0 siblings, 5 replies; 25+ messages in thread
From: Jeff Kirsher @ 2010-01-07 4:48 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Peter P Waskiewicz Jr, Jeff Kirsher
From: Jesse Brandeburg <jesse.brandeburg@intel.com>
The default policy for the current driver is to do all its memory
allocation on whatever processor is running insmod/modprobe. This
is less than optimal.
This driver's default mode of operation will be to use each node for each
subsequent transmit/receive queue. The most efficient allocation will be
to then have the interrupts bound in such a way as to match up the interrupt
of the queue to the cpu where its memory was allocated.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe.h | 2 ++
drivers/net/ixgbe/ixgbe_main.c | 30 +++++++++++++++++++++++++++---
2 files changed, 29 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 8da8eb5..998b8d9 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -379,6 +379,8 @@ struct ixgbe_adapter {
u64 rsc_total_flush;
u32 wol;
u16 eeprom_version;
+
+ int node;
};
enum ixbge_state_t {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2ad754c..6895de7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3741,7 +3741,8 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
}
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+ q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL, adapter->node);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
@@ -4041,6 +4042,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* enable rx csum by default */
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ /* get assigned NUMA node */
+ adapter->node = dev_to_node(&pdev->dev);
+
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -4060,7 +4064,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc(size);
+ tx_ring->tx_buffer_info = vmalloc_node(size, adapter->node);
if (!tx_ring->tx_buffer_info)
goto err;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -4100,8 +4104,15 @@ err:
static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
{
int i, err = 0;
+ int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
if (!err)
continue;
@@ -4109,6 +4120,9 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
break;
}
+ /* reset the node back to its starting value */
+ adapter->node = orig_node;
+
return err;
}
@@ -4126,7 +4140,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc(size);
+ rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
if (!rx_ring->rx_buffer_info) {
DPRINTK(PROBE, ERR,
"vmalloc allocation failed for the rx desc ring\n");
@@ -4170,8 +4184,15 @@ alloc_failed:
static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i, err = 0;
+ int orig_node = adapter->node;
for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
if (!err)
continue;
@@ -4179,6 +4200,9 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
break;
}
+ /* reset the node back to its starting value */
+ adapter->node = orig_node;
+
return err;
}
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [net-next-2.6 PATCH 2/5] ixgbe: Make descriptor ring allocations NUMA-aware
2010-01-07 4:48 [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node Jeff Kirsher
@ 2010-01-07 4:48 ` Jeff Kirsher
2010-01-08 8:21 ` David Miller
2010-01-07 4:49 ` [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support Jeff Kirsher
` (3 subsequent siblings)
4 siblings, 1 reply; 25+ messages in thread
From: Jeff Kirsher @ 2010-01-07 4:48 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Peter P Waskiewicz Jr, Jeff Kirsher
From: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
This patch allocates the ring structures themselves on each
NUMA node along with the buffer_info structures. This way we
don't allocate the entire ring memory on a single node in one
big block, thus reducing NUMA node memory crosstalk.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe.h | 7 +
drivers/net/ixgbe/ixgbe_ethtool.c | 71 +++++-----
drivers/net/ixgbe/ixgbe_fcoe.c | 4 -
drivers/net/ixgbe/ixgbe_main.c | 270 +++++++++++++++++++------------------
4 files changed, 185 insertions(+), 167 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 998b8d9..5e60358 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -147,7 +147,7 @@ struct ixgbe_ring {
#ifdef CONFIG_IXGBE_DCA
/* cpu for tx queue */
- int cpu;
+ u8 cpu;
#endif
u16 work_limit; /* max work per interrupt */
@@ -156,6 +156,7 @@ struct ixgbe_ring {
* associated with this ring, which is
* different for DCB and RSS modes
*/
+ u8 numa_node;
struct ixgbe_queue_stats stats;
unsigned long reinit_state;
@@ -277,7 +278,7 @@ struct ixgbe_adapter {
u16 eitr_high;
/* TX */
- struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */
+ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
int num_tx_queues;
u32 tx_timeout_count;
bool detect_tx_hung;
@@ -286,7 +287,7 @@ struct ixgbe_adapter {
u64 lsc_int;
/* RX */
- struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
+ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
int num_rx_queues;
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 0bd49d3..d468c03 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -834,8 +834,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *tx_ring = adapter->tx_ring;
- struct ixgbe_ring *rx_ring = adapter->rx_ring;
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
ring->rx_max_pending = IXGBE_MAX_RXD;
ring->tx_max_pending = IXGBE_MAX_TXD;
@@ -867,8 +867,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
+ if ((new_tx_count == adapter->tx_ring[0]->count) &&
+ (new_rx_count == adapter->rx_ring[0]->count)) {
/* nothing to do */
return 0;
}
@@ -878,25 +878,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
- goto err_setup;
+ goto clear_reset;
}
- temp_tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
+ temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
if (!temp_tx_ring) {
err = -ENOMEM;
- goto err_setup;
+ goto clear_reset;
}
if (new_tx_count != adapter->tx_ring_count) {
- memcpy(temp_tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+ sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count;
err = ixgbe_setup_tx_resources(adapter,
&temp_tx_ring[i]);
@@ -904,28 +903,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
while (i) {
i--;
ixgbe_free_tx_resources(adapter,
- &temp_tx_ring[i]);
+ &temp_tx_ring[i]);
}
- goto err_setup;
+ goto clear_reset;
}
}
need_update = true;
}
- temp_rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if ((!temp_rx_ring) && (need_update)) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
- kfree(temp_tx_ring);
+ temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+ if (!temp_rx_ring) {
err = -ENOMEM;
goto err_setup;
}
if (new_rx_count != adapter->rx_ring_count) {
- memcpy(temp_rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+ sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(adapter,
&temp_rx_ring[i]);
@@ -947,22 +942,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
- kfree(adapter->tx_ring);
- adapter->tx_ring = temp_tx_ring;
- temp_tx_ring = NULL;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbe_free_tx_resources(adapter,
+ adapter->tx_ring[i]);
+ memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->tx_ring_count = new_tx_count;
}
/* rx */
if (new_rx_count != adapter->rx_ring_count) {
- kfree(adapter->rx_ring);
- adapter->rx_ring = temp_rx_ring;
- temp_rx_ring = NULL;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbe_free_rx_resources(adapter,
+ adapter->rx_ring[i]);
+ memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->rx_ring_count = new_rx_count;
}
ixgbe_up(adapter);
}
+
+ vfree(temp_rx_ring);
err_setup:
+ vfree(temp_tx_ring);
+clear_reset:
clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
}
@@ -1007,13 +1012,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < adapter->num_tx_queues; j++) {
- queue_stat = (u64 *)&adapter->tx_ring[j].stats;
+ queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- queue_stat = (u64 *)&adapter->rx_ring[j].stats;
+ queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
@@ -1627,7 +1632,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
reg_data |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- int j = adapter->rx_ring[0].reg_idx;
+ int j = adapter->rx_ring[0]->reg_idx;
u32 k;
for (k = 0; k < 10; k++) {
if (IXGBE_READ_REG(&adapter->hw,
@@ -2000,7 +2005,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
+ ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
/* only valid if in constant ITR mode */
switch (adapter->rx_itr_setting) {
@@ -2053,7 +2058,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
+ adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
/* check the limits */
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index da32a10..ff09706 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -525,7 +525,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
fcoe_i = f->mask + i % f->indices;
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
@@ -533,7 +533,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
IXGBE_ETQS_QUEUE_EN |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6895de7..ceeef52 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -451,7 +451,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
{
u32 rxctrl;
int cpu = get_cpu();
- int q = rx_ring - adapter->rx_ring;
+ int q = rx_ring->reg_idx;
if (rx_ring->cpu != cpu) {
rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
@@ -479,7 +479,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
{
u32 txctrl;
int cpu = get_cpu();
- int q = tx_ring - adapter->tx_ring;
+ int q = tx_ring->reg_idx;
struct ixgbe_hw *hw = &adapter->hw;
if (tx_ring->cpu != cpu) {
@@ -513,12 +513,12 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].cpu = -1;
- ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
+ adapter->tx_ring[i]->cpu = -1;
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].cpu = -1;
- ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
+ adapter->rx_ring[i]->cpu = -1;
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
}
}
@@ -989,7 +989,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx].reg_idx;
+ j = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, j, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
adapter->num_rx_queues,
@@ -999,7 +999,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx].reg_idx;
+ j = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, j, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
adapter->num_tx_queues,
@@ -1134,7 +1134,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->tx_itr,
tx_ring->total_packets,
@@ -1149,7 +1149,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->rx_itr,
rx_ring->total_packets,
@@ -1268,7 +1268,7 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
- &adapter->tx_ring[i];
+ adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
&tx_ring->reinit_state))
schedule_work(&adapter->fdir_reinit_task);
@@ -1327,7 +1327,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1355,7 +1355,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1385,7 +1385,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1394,7 +1394,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1425,7 +1425,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
long r_idx;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, rx_ring);
@@ -1466,7 +1466,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, ring);
@@ -1482,7 +1482,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
budget = max(budget, 1);
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, ring);
@@ -1493,7 +1493,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
@@ -1526,7 +1526,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
long r_idx;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, tx_ring);
@@ -1711,8 +1711,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u8 current_itr;
u32 new_itr = q_vector->eitr;
- struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
- struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->tx_itr,
@@ -1817,10 +1817,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
if (napi_schedule_prep(&(q_vector->napi))) {
- adapter->tx_ring[0].total_packets = 0;
- adapter->tx_ring[0].total_bytes = 0;
- adapter->rx_ring[0].total_packets = 0;
- adapter->rx_ring[0].total_bytes = 0;
+ adapter->tx_ring[0]->total_packets = 0;
+ adapter->tx_ring[0]->total_bytes = 0;
+ adapter->rx_ring[0]->total_packets = 0;
+ adapter->rx_ring[0]->total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
__napi_schedule(&(q_vector->napi));
}
@@ -1950,7 +1950,7 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
/* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = &adapter->tx_ring[i];
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
j = ring->reg_idx;
tdba = ring->dma;
tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -1960,8 +1960,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_TDH(j);
- adapter->tx_ring[i].tail = IXGBE_TDT(j);
+ adapter->tx_ring[i]->head = IXGBE_TDH(j);
+ adapter->tx_ring[i]->tail = IXGBE_TDT(j);
/*
* Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren't delivered in order.
@@ -2090,7 +2090,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
u32 rscctrl;
int rx_buf_len;
- rx_ring = &adapter->rx_ring[index];
+ rx_ring = adapter->rx_ring[index];
j = rx_ring->reg_idx;
rx_buf_len = rx_ring->rx_buf_len;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
@@ -2184,7 +2184,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
#endif
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -2194,7 +2194,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring = &adapter->rx_ring[i];
+ rx_ring = adapter->rx_ring[i];
rdba = rx_ring->dma;
j = rx_ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
@@ -2361,7 +2361,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 ctrl;
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
@@ -2522,7 +2522,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* PThresh workaround for Tx hang with DFP enabled. */
txdctl |= 32;
@@ -2539,7 +2539,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -2579,7 +2579,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].atr_sample_rate =
+ adapter->tx_ring[i]->atr_sample_rate =
adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
@@ -2589,8 +2589,8 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
- (adapter->rx_ring[i].count - 1));
+ ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
+ (adapter->rx_ring[i]->count - 1));
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -2673,7 +2673,7 @@ link_cfg_out:
static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
int rxr)
{
- int j = adapter->rx_ring[rxr].reg_idx;
+ int j = adapter->rx_ring[rxr]->reg_idx;
int k;
for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
@@ -2687,8 +2687,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
"not set within the polling period\n", rxr);
}
- ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr]->count - 1));
}
static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -2770,7 +2770,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
txdctl |= (8 << 16);
@@ -2784,14 +2784,14 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
}
for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
/* enable PTHRESH=32 descriptors (half the internal cache)
* and HTHRESH=0 descriptors (to minimize latency on fetch),
@@ -2865,7 +2865,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
/* enable transmits */
netif_tx_start_all_queues(netdev);
@@ -3029,7 +3029,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+ ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
}
/**
@@ -3041,7 +3041,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+ ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3081,7 +3081,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
(txdctl & ~IXGBE_TXDCTL_ENABLE));
@@ -3121,13 +3121,13 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring);
- ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
}
#endif
- tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
- ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
+ tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
+ ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
if (!tx_clean_complete)
work_done = budget;
@@ -3393,9 +3393,9 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
} else {
ret = false;
@@ -3422,8 +3422,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* the number of queues is assumed to be symmetric */
for (i = 0; i < dcb_i; i++) {
- adapter->rx_ring[i].reg_idx = i << 3;
- adapter->tx_ring[i].reg_idx = i << 2;
+ adapter->rx_ring[i]->reg_idx = i << 3;
+ adapter->tx_ring[i]->reg_idx = i << 2;
}
ret = true;
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -3441,18 +3441,18 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
* Rx TC0-TC7 are offset by 16 queues each
*/
for (i = 0; i < 3; i++) {
- adapter->tx_ring[i].reg_idx = i << 5;
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->tx_ring[i]->reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < 5; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 2) << 4);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < dcb_i; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 8) << 3);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
ret = true;
@@ -3465,12 +3465,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
*
* Rx TC0-TC3 are offset by 32 queues each
*/
- adapter->tx_ring[0].reg_idx = 0;
- adapter->tx_ring[1].reg_idx = 64;
- adapter->tx_ring[2].reg_idx = 96;
- adapter->tx_ring[3].reg_idx = 112;
+ adapter->tx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[1]->reg_idx = 64;
+ adapter->tx_ring[2]->reg_idx = 96;
+ adapter->tx_ring[3]->reg_idx = 112;
for (i = 0 ; i < dcb_i; i++)
- adapter->rx_ring[i].reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 5;
ret = true;
} else {
@@ -3503,9 +3503,9 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
}
@@ -3533,8 +3533,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_dcb(adapter);
/* find out queues in TC for FCoE */
- fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
- fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
+ fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+ fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
/*
* In 82599, the number of Tx queues for each traffic
* class for both 8-TC and 4-TC modes are:
@@ -3565,8 +3565,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
fcoe_tx_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
}
ret = true;
}
@@ -3588,8 +3588,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
/* start with default case */
- adapter->rx_ring[0].reg_idx = 0;
- adapter->tx_ring[0].reg_idx = 0;
+ adapter->rx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[0]->reg_idx = 0;
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
@@ -3619,33 +3619,59 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
int i;
-
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err_tx_ring_allocation;
-
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err_rx_ring_allocation;
+ int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].count = adapter->tx_ring_count;
- adapter->tx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ goto err_tx_ring_allocation;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->tx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].count = adapter->rx_ring_count;
- adapter->rx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ goto err_rx_ring_allocation;
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->rx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
ixgbe_cache_ring_register(adapter);
return 0;
err_rx_ring_allocation:
- kfree(adapter->tx_ring);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ kfree(adapter->tx_ring[i]);
err_tx_ring_allocation:
return -ENOMEM;
}
@@ -3869,10 +3895,16 @@ err_set_interrupt:
**/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
@@ -4064,7 +4096,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc_node(size, adapter->node);
+ tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
if (!tx_ring->tx_buffer_info)
goto err;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -4104,25 +4136,15 @@ err:
static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
{
int i, err = 0;
- int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
break;
}
- /* reset the node back to its starting value */
- adapter->node = orig_node;
-
return err;
}
@@ -4140,7 +4162,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+ rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
if (!rx_ring->rx_buffer_info) {
DPRINTK(PROBE, ERR,
"vmalloc allocation failed for the rx desc ring\n");
@@ -4184,25 +4206,15 @@ alloc_failed:
static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i, err = 0;
- int orig_node = adapter->node;
for (i = 0; i < adapter->num_rx_queues; i++) {
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
break;
}
- /* reset the node back to its starting value */
- adapter->node = orig_node;
-
return err;
}
@@ -4239,8 +4251,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i].desc)
- ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (adapter->tx_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
}
/**
@@ -4276,8 +4288,8 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i].desc)
- ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (adapter->rx_ring[i]->desc)
+ ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
}
/**
@@ -4554,8 +4566,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i].rsc_count;
- rsc_flush += adapter->rx_ring[i].rsc_flush;
+ rsc_count += adapter->rx_ring[i]->rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
@@ -4563,11 +4575,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++)
- restart_queue += adapter->tx_ring[i].restart_queue;
+ restart_queue += adapter->tx_ring[i]->restart_queue;
adapter->restart_queue = restart_queue;
for (i = 0; i < adapter->num_rx_queues; i++)
- non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+ non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
adapter->non_eop_descs = non_eop_descs;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -4806,7 +4818,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
} else {
DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters \n");
@@ -4903,7 +4915,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring = &adapter->tx_ring[i];
+ tx_ring = adapter->tx_ring[i];
if (tx_ring->next_to_use != tx_ring->next_to_clean) {
some_tx_pending = 1;
break;
@@ -5402,7 +5414,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
}
- tx_ring = &adapter->tx_ring[skb->queue_mapping];
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
(skb->protocol == htons(ETH_P_FCOE))) {
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support
2010-01-07 4:48 [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node Jeff Kirsher
2010-01-07 4:48 ` [net-next-2.6 PATCH 2/5] ixgbe: Make descriptor ring allocations NUMA-aware Jeff Kirsher
@ 2010-01-07 4:49 ` Jeff Kirsher
2010-01-08 8:23 ` David Miller
2010-01-11 19:32 ` Ben Hutchings
2010-01-07 4:49 ` [net-next-2.6 PATCH 4/5] ixgbe: Add Flow Director configuration support as a mod parameter Jeff Kirsher
` (2 subsequent siblings)
4 siblings, 2 replies; 25+ messages in thread
From: Jeff Kirsher @ 2010-01-07 4:49 UTC (permalink / raw)
To: davem
Cc: netdev, gospo, Peter P Waskiewicz Jr, Jeff Kirsher, Luca Deri,
Joseph Gasparakis
From: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
This patchset enables the ethtool layer to program n-tuple
filters to an underlying device. The idea is to allow capable
hardware to have static rules applied that can assist steering
flows into appropriate queues.
Hardware that is known to support these types of filters today
are ixgbe and niu.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
CC: Luca Deri <deri@ntop.org>
CC: Joseph Gasparakis <joseph.gasparakis@intel.com>
---
include/linux/ethtool.h | 34 ++++++++++++++++++++++++++++++++++
net/core/ethtool.c | 16 ++++++++++++++++
2 files changed, 50 insertions(+), 0 deletions(-)
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index ef4a2d8..dd65192 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -363,6 +363,35 @@ struct ethtool_rxnfc {
__u32 rule_locs[0];
};
+struct ethtool_rx_ntuple_flow_spec {
+ __u32 flow_type;
+ union {
+ struct ethtool_tcpip4_spec tcp_ip4_spec;
+ struct ethtool_tcpip4_spec udp_ip4_spec;
+ struct ethtool_tcpip4_spec sctp_ip4_spec;
+ struct ethtool_ah_espip4_spec ah_ip4_spec;
+ struct ethtool_ah_espip4_spec esp_ip4_spec;
+ struct ethtool_rawip4_spec raw_ip4_spec;
+ struct ethtool_ether_spec ether_spec;
+ struct ethtool_usrip4_spec usr_ip4_spec;
+ __u8 hdata[64];
+ } h_u, m_u; /* entry, mask */
+
+ __u16 vlan_tag;
+ __u16 vlan_tag_mask;
+ __u64 data; /* user-defined flow spec data */
+ __u64 data_mask; /* user-defined flow spec mask */
+
+ /* signed to distinguish between queue and actions (DROP) */
+ __s32 action;
+#define ETHTOOL_RXNTUPLE_ACTION_DROP -1
+};
+
+struct ethtool_rx_ntuple {
+ __u32 cmd;
+ struct ethtool_rx_ntuple_flow_spec fs;
+};
+
#define ETHTOOL_FLASH_MAX_FILENAME 128
enum ethtool_flash_op_type {
ETHTOOL_FLASH_ALL_REGIONS = 0,
@@ -500,6 +529,8 @@ struct ethtool_ops {
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
+ int (*set_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *);
+ int (*get_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *, void *);
};
#endif /* __KERNEL__ */
@@ -559,6 +590,9 @@ struct ethtool_ops {
#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */
#define ETHTOOL_RESET 0x00000034 /* Reset hardware */
+#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */
+#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */
+
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
#define SPARC_ETH_SSET ETHTOOL_SSET
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d8aee58..96b3144 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -266,6 +266,19 @@ err_out:
return ret;
}
+static int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_rx_ntuple cmd;
+
+ if (!dev->ethtool_ops->set_rx_ntuple)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_rx_ntuple(dev, &cmd);
+}
+
static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
{
struct ethtool_regs regs;
@@ -1112,6 +1125,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_RESET:
rc = ethtool_reset(dev, useraddr);
break;
+ case ETHTOOL_SRXNTUPLE:
+ rc = ethtool_set_rx_ntuple(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [net-next-2.6 PATCH 4/5] ixgbe: Add Flow Director configuration support as a mod parameter
2010-01-07 4:48 [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node Jeff Kirsher
2010-01-07 4:48 ` [net-next-2.6 PATCH 2/5] ixgbe: Make descriptor ring allocations NUMA-aware Jeff Kirsher
2010-01-07 4:49 ` [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support Jeff Kirsher
@ 2010-01-07 4:49 ` Jeff Kirsher
2010-01-08 8:24 ` David Miller
2010-01-07 4:49 ` [net-next-2.6 PATCH 5/5] ixgbe: Add support for the new ethtool n-tuple programming interface Jeff Kirsher
2010-01-14 11:02 ` [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node Andi Kleen
4 siblings, 1 reply; 25+ messages in thread
From: Jeff Kirsher @ 2010-01-07 4:49 UTC (permalink / raw)
To: davem
Cc: netdev, gospo, Peter P Waskiewicz Jr, Jeff Kirsher, Luca Deri,
Joseph Gasparakis
From: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
This patch adds the ability to change the Flow Director behavior in
ixgbe through a module parameter. ixgbe, on 82599 hardware, can support
hash-based or perfect-based filtering. It currently uses hash-based, and
should have a knob to enable perfect-based filtering.
This will be used in conjunction with the ethtool n-tuple filter
programming support.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
CC: Luca Deri <deri@ntop.org>
CC: Joseph Gasparakis <joseph.gasparakis@intel.com>
---
drivers/net/ixgbe/ixgbe_main.c | 16 ++++++++++++++--
1 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ceeef52..13d413a 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -124,6 +124,11 @@ static struct notifier_block dca_notifier = {
};
#endif
+static unsigned int fdir_filter_mode;
+module_param(fdir_filter_mode, uint, 0);
+MODULE_PARM_DESC(fdir_filter_mode, "Flow Director filtering mode: 0 - hash, "
+ "1 - perfect");
+
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
@@ -4002,10 +4007,17 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ if (fdir_filter_mode == 0) {
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
+ } else {
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ spin_lock_init(&adapter->fdir_perfect_lock);
+ }
+
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
- adapter->atr_sample_rate = 20;
adapter->fdir_pballoc = 0;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [net-next-2.6 PATCH 5/5] ixgbe: Add support for the new ethtool n-tuple programming interface
2010-01-07 4:48 [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node Jeff Kirsher
` (2 preceding siblings ...)
2010-01-07 4:49 ` [net-next-2.6 PATCH 4/5] ixgbe: Add Flow Director configuration support as a mod parameter Jeff Kirsher
@ 2010-01-07 4:49 ` Jeff Kirsher
2010-01-08 8:26 ` David Miller
2010-01-14 11:02 ` [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node Andi Kleen
4 siblings, 1 reply; 25+ messages in thread
From: Jeff Kirsher @ 2010-01-07 4:49 UTC (permalink / raw)
To: davem
Cc: netdev, gospo, Peter P Waskiewicz Jr, Jeff Kirsher, Luca Deri,
Joseph Gasparakis
From: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
This patch adds n-tuple filter programming to 82599.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
CC: Luca Deri <deri@ntop.org>
CC: Joseph Gasparakis <joseph.gasparakis@intel.com>
---
drivers/net/ixgbe/ixgbe.h | 4 +
drivers/net/ixgbe/ixgbe_82599.c | 106 ++++++++++++++++++++++++++++++++-----
drivers/net/ixgbe/ixgbe_ethtool.c | 81 ++++++++++++++++++++++++++++
drivers/net/ixgbe/ixgbe_type.h | 9 +++
4 files changed, 187 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 5e60358..eba2880 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -429,6 +429,10 @@ extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input,
u8 queue);
+extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ struct ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ u16 soft_id, u8 queue);
extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
u16 vlan_id);
extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 5383405..57c92f7 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1434,6 +1434,9 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Send interrupt when 64 filters are left */
fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
+ /* Initialize the drop queue to Rx queue 127 */
+ fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+
switch (pballoc) {
case IXGBE_FDIR_PBALLOC_64K:
/* 2k - 1 perfect filters */
@@ -1623,6 +1626,7 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
* ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
* @input: input stream to modify
* @vlan: the VLAN id to load
+ * @vlan_mask: bitwise mask for the VLAN
**/
s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
{
@@ -1636,6 +1640,7 @@ s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
* ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
* @input: input stream to modify
* @src_addr: the IP address to load
+ * @src_addr_mask: bitwise mask for the source IP address
**/
s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
{
@@ -1653,6 +1658,7 @@ s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
* ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
* @input: input stream to modify
* @dst_addr: the IP address to load
+ * @dst_addr_mask: bitwise mask for the destination IP address
**/
s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
{
@@ -1675,8 +1681,8 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
* @src_addr_4: the fourth 4 bytes of the IP address to load
**/
s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 src_addr_1, u32 src_addr_2,
- u32 src_addr_3, u32 src_addr_4)
+ u32 src_addr_1, u32 src_addr_2,
+ u32 src_addr_3, u32 src_addr_4)
{
input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
@@ -1718,8 +1724,8 @@ s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
* @dst_addr_4: the fourth 4 bytes of the IP address to load
**/
s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
- u32 dst_addr_1, u32 dst_addr_2,
- u32 dst_addr_3, u32 dst_addr_4)
+ u32 dst_addr_1, u32 dst_addr_2,
+ u32 dst_addr_3, u32 dst_addr_4)
{
input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
@@ -1756,6 +1762,7 @@ s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
* ixgbe_atr_set_src_port_82599 - Sets the source port
* @input: input stream to modify
* @src_port: the source port to load
+ * @src_port_mask: bitwise mask for the source port
**/
s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
{
@@ -1769,6 +1776,7 @@ s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
* ixgbe_atr_set_dst_port_82599 - Sets the destination port
* @input: input stream to modify
* @dst_port: the destination port to load
+ * @dst_port_mask: bitwise mask for the destination port
**/
s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
{
@@ -1797,7 +1805,7 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
* @vm_pool: the Virtual Machine pool to load
**/
s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
- u8 vm_pool)
+ u8 vm_pool)
{
input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
@@ -1821,8 +1829,7 @@ s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
* @input: input stream to search
* @vlan: the VLAN id to load
**/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input,
- u16 *vlan)
+static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
{
*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
@@ -2078,23 +2085,26 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
+ * @input_masks: bitwise masks for relevant fields
+ * @soft_id: software index into the silicon hash tables for filter storage
* @queue: queue index to direct traffic to
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
- u16 soft_id,
- u8 queue)
+ struct ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ u16 soft_id, u8 queue)
{
u32 fdircmd = 0;
u32 fdirhash;
- u32 src_ipv4, dst_ipv4;
+ u32 src_ipv4 = 0, dst_ipv4 = 0;
u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
u16 src_port, dst_port, vlan_id, flex_bytes;
u16 bucket_hash;
u8 l4type;
+ u8 fdirm = 0;
/* Get our input values */
ixgbe_atr_get_l4type_82599(input, &l4type);
@@ -2149,7 +2159,6 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* IPv4 */
ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
-
}
ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
@@ -2158,7 +2167,78 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
(flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
- (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+ (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+ if (src_ipv4 == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
+
+ if (dst_ipv4 == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
+
+ switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
+ case IXGBE_ATR_L4TYPE_TCP:
+ if (src_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ input_masks->src_port_mask);
+
+ if (dst_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (0xffff << 16)));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (input_masks->dst_port_mask << 16)));
+ break;
+ case IXGBE_ATR_L4TYPE_UDP:
+ if (src_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ input_masks->src_port_mask);
+
+ if (dst_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (0xffff << 16)));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (input_masks->src_port_mask << 16)));
+ break;
+ default:
+ /* this already would have failed above */
+ break;
+ }
+
+ /* Program the last mask register, FDIRM */
+ if (input_masks->vlan_id_mask || !vlan_id)
+ /* Mask both VLAN and VLANP - bits 0 and 1 */
+ fdirm |= 0x3;
+
+ if (input_masks->data_mask || !flex_bytes)
+ /* Flex bytes need masking, so mask the whole thing - bit 4 */
+ fdirm |= 0x10;
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ fdirm |= 0x24;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index d468c03..9235699 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -2158,6 +2158,86 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
}
+static int ixgbe_set_rx_ntuple(struct net_device *dev,
+ struct ethtool_rx_ntuple *cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
+ struct ixgbe_atr_input input_struct;
+ struct ixgbe_atr_input_masks input_masks;
+ int target_queue;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return -EOPNOTSUPP;
+
+ /*
+ * Don't allow programming if we're not in perfect filter mode, or
+ * if the action is a queue greater than the number of online Tx
+ * queues.
+ */
+ if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) ||
+ (fs.action >= adapter->num_tx_queues))
+ return -EINVAL;
+
+ memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+ memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
+
+ input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
+ input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
+ input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
+ input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
+ input_masks.vlan_id_mask = fs.vlan_tag_mask;
+ /* only use the lowest 2 bytes for flex bytes */
+ input_masks.data_mask = (fs.data_mask & 0xffff);
+
+ switch (fs.flow_type) {
+ case TCP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+ break;
+ case UDP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+ break;
+ case SCTP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Mask bits from the inputs based on user-supplied mask */
+ ixgbe_atr_set_src_ipv4_82599(&input_struct,
+ (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
+ ixgbe_atr_set_dst_ipv4_82599(&input_struct,
+ (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
+ /* 82599 expects these to be byte-swapped for perfect filtering */
+ ixgbe_atr_set_src_port_82599(&input_struct,
+ ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
+ ixgbe_atr_set_dst_port_82599(&input_struct,
+ ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
+
+ /* VLAN and Flex bytes are either completely masked or not */
+ if (!fs.vlan_tag_mask)
+ ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
+
+ if (!input_masks.data_mask)
+ /* make sure we only use the first 2 bytes of user data */
+ ixgbe_atr_set_flex_byte_82599(&input_struct,
+ (fs.data & 0xffff));
+
+ /* determine if we need to drop or route the packet */
+ if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ target_queue = MAX_RX_QUEUES - 1;
+ else
+ target_queue = fs.action;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+ ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
+ &input_masks, 0, target_queue);
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return 0;
+}
+
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings,
@@ -2193,6 +2273,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_flags = ethtool_op_get_flags,
.set_flags = ixgbe_set_flags,
+ .set_rx_ntuple = ixgbe_set_rx_ntuple,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 84650c6..6950f62 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2109,6 +2109,15 @@ struct ixgbe_atr_input {
u8 byte_stream[42];
};
+struct ixgbe_atr_input_masks {
+ u32 src_ip_mask;
+ u32 dst_ip_mask;
+ u16 src_port_mask;
+ u16 dst_port_mask;
+ u16 vlan_id_mask;
+ u16 data_mask;
+};
+
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
^ permalink raw reply related [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 2/5] ixgbe: Make descriptor ring allocations NUMA-aware
2010-01-07 4:48 ` [net-next-2.6 PATCH 2/5] ixgbe: Make descriptor ring allocations NUMA-aware Jeff Kirsher
@ 2010-01-08 8:21 ` David Miller
2010-01-08 8:25 ` Waskiewicz Jr, Peter P
0 siblings, 1 reply; 25+ messages in thread
From: David Miller @ 2010-01-08 8:21 UTC (permalink / raw)
To: jeffrey.t.kirsher; +Cc: netdev, gospo, peter.p.waskiewicz.jr
From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Date: Wed, 06 Jan 2010 20:48:46 -0800
> @@ -147,7 +147,7 @@ struct ixgbe_ring {
>
> #ifdef CONFIG_IXGBE_DCA
> /* cpu for tx queue */
> - int cpu;
> + u8 cpu;
> #endif
>
> u16 work_limit; /* max work per interrupt */
Is truncating cpu and node numbers to 8-bits ok? I really don't
see how it can be fine, even for DCA.
This is especially the case since dca3_get_tag() and the
DCA ->get_tag() callback explicitly take an 'int' argument
too.
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support
2010-01-07 4:49 ` [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support Jeff Kirsher
@ 2010-01-08 8:23 ` David Miller
2010-01-08 8:34 ` Waskiewicz Jr, Peter P
2010-01-11 19:32 ` Ben Hutchings
1 sibling, 1 reply; 25+ messages in thread
From: David Miller @ 2010-01-08 8:23 UTC (permalink / raw)
To: jeffrey.t.kirsher
Cc: netdev, gospo, peter.p.waskiewicz.jr, deri, joseph.gasparakis
From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Date: Wed, 06 Jan 2010 20:49:06 -0800
> @@ -500,6 +529,8 @@ struct ethtool_ops {
> int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
> int (*flash_device)(struct net_device *, struct ethtool_flash *);
> int (*reset)(struct net_device *, u32 *);
> + int (*set_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *);
> + int (*get_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *, void *);
> };
> #endif /* __KERNEL__ */
>
> @@ -559,6 +590,9 @@ struct ethtool_ops {
> #define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */
> #define ETHTOOL_RESET 0x00000034 /* Reset hardware */
>
> +#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */
> +#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */
> +
Command value ETHTOOL_GRXNTUPLE and the ethtool_ops callback
->get_rx_ntuple() for it are declared, but I see no implementation
added to net/core/ethtool.c
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 4/5] ixgbe: Add Flow Director configuration support as a mod parameter
2010-01-07 4:49 ` [net-next-2.6 PATCH 4/5] ixgbe: Add Flow Director configuration support as a mod parameter Jeff Kirsher
@ 2010-01-08 8:24 ` David Miller
2010-01-08 8:28 ` Waskiewicz Jr, Peter P
0 siblings, 1 reply; 25+ messages in thread
From: David Miller @ 2010-01-08 8:24 UTC (permalink / raw)
To: jeffrey.t.kirsher
Cc: netdev, gospo, peter.p.waskiewicz.jr, deri, joseph.gasparakis
From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Date: Wed, 06 Jan 2010 20:49:29 -0800
> From: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
>
> This patch adds the ability to change the Flow Director behavior in
> ixgbe through a module parameter. ixgbe, on 82599 hardware, can support
> hash-based or perfect-based filtering. It currently uses hash-based, and
> should have a knob to enable perfect-based filtering.
>
> This will be used in conjunction with the ethtool n-tuple filter
> programming support.
>
> Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Use a generic ethtool boolean flag or similar, not "yet another
different-in-every-driver" module parameter please.
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 2/5] ixgbe: Make descriptor ring allocations NUMA-aware
2010-01-08 8:21 ` David Miller
@ 2010-01-08 8:25 ` Waskiewicz Jr, Peter P
2010-01-08 8:31 ` David Miller
0 siblings, 1 reply; 25+ messages in thread
From: Waskiewicz Jr, Peter P @ 2010-01-08 8:25 UTC (permalink / raw)
To: David Miller
Cc: Kirsher, Jeffrey T, netdev@vger.kernel.org, gospo@redhat.com,
Waskiewicz Jr, Peter P
On Fri, 8 Jan 2010, David Miller wrote:
> From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
> Date: Wed, 06 Jan 2010 20:48:46 -0800
>
> > @@ -147,7 +147,7 @@ struct ixgbe_ring {
> >
> > #ifdef CONFIG_IXGBE_DCA
> > /* cpu for tx queue */
> > - int cpu;
> > + u8 cpu;
> > #endif
> >
> > u16 work_limit; /* max work per interrupt */
>
> Is truncating cpu and node numbers to 8-bits ok? I really don't
> see how it can be fine, even for DCA.
In our hardware (82598 and 82599), the CPU field in the DCA registers is 8
bits.
The reason I truncated this was to fit these values into the
first cacheline of the struct. I also didn't figure we'd see a system
that would have 255 NUMA nodes before 10 GbE was something that was on the
shelf collecting dust. :-)
> This is especially the case since dca3_get_tag() and the
> DCA ->get_tag() callback explicitly take an 'int' argument
> too.
Cheers,
-PJ
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 5/5] ixgbe: Add support for the new ethtool n-tuple programming interface
2010-01-07 4:49 ` [net-next-2.6 PATCH 5/5] ixgbe: Add support for the new ethtool n-tuple programming interface Jeff Kirsher
@ 2010-01-08 8:26 ` David Miller
0 siblings, 0 replies; 25+ messages in thread
From: David Miller @ 2010-01-08 8:26 UTC (permalink / raw)
To: jeffrey.t.kirsher
Cc: netdev, gospo, peter.p.waskiewicz.jr, deri, joseph.gasparakis
From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Date: Wed, 06 Jan 2010 20:49:53 -0800
> + /*
> + * Don't allow programming if we're not in perfect filter mode, or
> + * if the action is a queue greater than the number of online Tx
> + * queues.
> + */
> + if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) ||
> + (fs.action >= adapter->num_tx_queues))
> + return -EINVAL;
This is yet another argument for making the perfect filter mode
control an ethtool setting, why should the user have to reload the
module just to get that boolean setting fixed up when they want to
load filters?
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 4/5] ixgbe: Add Flow Director configuration support as a mod parameter
2010-01-08 8:24 ` David Miller
@ 2010-01-08 8:28 ` Waskiewicz Jr, Peter P
2010-01-08 8:32 ` David Miller
0 siblings, 1 reply; 25+ messages in thread
From: Waskiewicz Jr, Peter P @ 2010-01-08 8:28 UTC (permalink / raw)
To: David Miller
Cc: Kirsher, Jeffrey T, netdev@vger.kernel.org, gospo@redhat.com,
Waskiewicz Jr, Peter P, deri@ntop.org, Gasparakis, Joseph
On Fri, 8 Jan 2010, David Miller wrote:
> From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
> Date: Wed, 06 Jan 2010 20:49:29 -0800
>
> > From: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
> >
> > This patch adds the ability to change the Flow Director behavior in
> > ixgbe through a module parameter. ixgbe, on 82599 hardware, can support
> > hash-based or perfect-based filtering. It currently uses hash-based, and
> > should have a knob to enable perfect-based filtering.
> >
> > This will be used in conjunction with the ethtool n-tuple filter
> > programming support.
> >
> > Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
> > Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
>
> Use a generic ethtool boolean flag or similar, not "yet another
> different-in-every-driver" module parameter please.
Something added to the setflags routine perhaps?
-PJ
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 2/5] ixgbe: Make descriptor ring allocations NUMA-aware
2010-01-08 8:25 ` Waskiewicz Jr, Peter P
@ 2010-01-08 8:31 ` David Miller
2010-01-08 8:36 ` Waskiewicz Jr, Peter P
0 siblings, 1 reply; 25+ messages in thread
From: David Miller @ 2010-01-08 8:31 UTC (permalink / raw)
To: peter.p.waskiewicz.jr; +Cc: jeffrey.t.kirsher, netdev, gospo
From: "Waskiewicz Jr, Peter P" <peter.p.waskiewicz.jr@intel.com>
Date: Fri, 8 Jan 2010 00:25:26 -0800 (Pacific Standard Time)
> In our hardware (82598 and 82599), the CPU field in the DCA registers is 8
> bits.
>
> The reason I truncated this was to fit these values into the
> first cacheline of the struct. I also didn't figure we'd see a system
> that would have 255 NUMA nodes before 10 GbE was something that was on the
> shelf collecting dust. :-)
The X86 port supports 9 for "NODES_SHIFT", and up to 4096 cpus.
Even when !MAXSMP, the cpu limit is 512.
Don't make the type ungeneric just because some current piece of
hardware has that limitation. This is how subtle bugs slip into the
tree, and it makes auditing for such bugs a nightmare.
Thanks.
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 4/5] ixgbe: Add Flow Director configuration support as a mod parameter
2010-01-08 8:28 ` Waskiewicz Jr, Peter P
@ 2010-01-08 8:32 ` David Miller
2010-01-08 8:36 ` Waskiewicz Jr, Peter P
0 siblings, 1 reply; 25+ messages in thread
From: David Miller @ 2010-01-08 8:32 UTC (permalink / raw)
To: peter.p.waskiewicz.jr
Cc: jeffrey.t.kirsher, netdev, gospo, deri, joseph.gasparakis
From: "Waskiewicz Jr, Peter P" <peter.p.waskiewicz.jr@intel.com>
Date: Fri, 8 Jan 2010 00:28:17 -0800 (Pacific Standard Time)
> On Fri, 8 Jan 2010, David Miller wrote:
>
>> Use a generic ethtool boolean flag or similar, not "yet another
>> different-in-every-driver" module parameter please.
>
> Something added to the setflags routine perhaps?
Yeah, something via 'struct ethtool_flags' is probably appropriate.
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support
2010-01-08 8:23 ` David Miller
@ 2010-01-08 8:34 ` Waskiewicz Jr, Peter P
2010-01-08 8:38 ` David Miller
0 siblings, 1 reply; 25+ messages in thread
From: Waskiewicz Jr, Peter P @ 2010-01-08 8:34 UTC (permalink / raw)
To: David Miller
Cc: Kirsher, Jeffrey T, netdev@vger.kernel.org, gospo@redhat.com,
Waskiewicz Jr, Peter P, deri@ntop.org, Gasparakis, Joseph
On Fri, 8 Jan 2010, David Miller wrote:
> From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
> Date: Wed, 06 Jan 2010 20:49:06 -0800
>
> > @@ -500,6 +529,8 @@ struct ethtool_ops {
> > int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
> > int (*flash_device)(struct net_device *, struct ethtool_flash *);
> > int (*reset)(struct net_device *, u32 *);
> > + int (*set_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *);
> > + int (*get_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *, void *);
> > };
> > #endif /* __KERNEL__ */
> >
> > @@ -559,6 +590,9 @@ struct ethtool_ops {
> > #define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */
> > #define ETHTOOL_RESET 0x00000034 /* Reset hardware */
> >
> > +#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */
> > +#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */
> > +
>
> Command value ETHTOOL_GRXNTUPLE and the ethtool_ops callback
> ->get_rx_ntuple() for it are declared, but I see no implementation
> added to net/core/ethtool.c
Yes, I left that in there somewhat accidentally. I had every intention in
putting a get routine in, but I couldn't come up with a generic way to
work with most hardware (how to get all filters back and dump them). The
82599 needs to query the hardware for each filter currently programmed,
and I'm not sure what the niu hardware layout is for the filters.
I can remove the get stubs for now, since I have no good solution to dump
the filters that is generic.
Thanks,
-PJ
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 2/5] ixgbe: Make descriptor ring allocations NUMA-aware
2010-01-08 8:31 ` David Miller
@ 2010-01-08 8:36 ` Waskiewicz Jr, Peter P
0 siblings, 0 replies; 25+ messages in thread
From: Waskiewicz Jr, Peter P @ 2010-01-08 8:36 UTC (permalink / raw)
To: David Miller
Cc: Waskiewicz Jr, Peter P, Kirsher, Jeffrey T,
netdev@vger.kernel.org, gospo@redhat.com
On Fri, 8 Jan 2010, David Miller wrote:
> From: "Waskiewicz Jr, Peter P" <peter.p.waskiewicz.jr@intel.com>
> Date: Fri, 8 Jan 2010 00:25:26 -0800 (Pacific Standard Time)
>
> > In our hardware (82598 and 82599), the CPU field in the DCA registers is 8
> > bits.
> >
> > The reason I truncated this was to fit these values into the
> > first cacheline of the struct. I also didn't figure we'd see a system
> > that would have 255 NUMA nodes before 10 GbE was something that was on the
> > shelf collecting dust. :-)
>
> The X86 port supports 9 for "NODES_SHIFT", and up to 4096 cpus.
>
> Even when !MAXSMP, the cpu limit is 512.
>
> Don't make the type ungeneric just because some current piece of
> hardware has that limitation. This is how subtle bugs slip into the
> tree, and it makes auditing for such bugs a nightmare.
>
> Thanks.
Understood. Let me see if I can shuffle things around in the struct to
keep my cacheline packed, and not screw with the data type sizes. I'll
respin and have Jeff push a new patch once I get it sorted out.
Thanks for the review Dave,
-PJ
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 4/5] ixgbe: Add Flow Director configuration support as a mod parameter
2010-01-08 8:32 ` David Miller
@ 2010-01-08 8:36 ` Waskiewicz Jr, Peter P
0 siblings, 0 replies; 25+ messages in thread
From: Waskiewicz Jr, Peter P @ 2010-01-08 8:36 UTC (permalink / raw)
To: David Miller
Cc: Waskiewicz Jr, Peter P, Kirsher, Jeffrey T,
netdev@vger.kernel.org, gospo@redhat.com, deri@ntop.org,
Gasparakis, Joseph
On Fri, 8 Jan 2010, David Miller wrote:
> From: "Waskiewicz Jr, Peter P" <peter.p.waskiewicz.jr@intel.com>
> Date: Fri, 8 Jan 2010 00:28:17 -0800 (Pacific Standard Time)
>
> > On Fri, 8 Jan 2010, David Miller wrote:
> >
> >> Use a generic ethtool boolean flag or similar, not "yet another
> >> different-in-every-driver" module parameter please.
> >
> > Something added to the setflags routine perhaps?
>
> Yeah, something via 'struct ethtool_flags' is probably appropriate.
Cool. I'll respin and get rid of the module parameter.
Thanks for the review,
-PJ
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support
2010-01-08 8:34 ` Waskiewicz Jr, Peter P
@ 2010-01-08 8:38 ` David Miller
2010-01-08 8:42 ` Waskiewicz Jr, Peter P
0 siblings, 1 reply; 25+ messages in thread
From: David Miller @ 2010-01-08 8:38 UTC (permalink / raw)
To: peter.p.waskiewicz.jr
Cc: jeffrey.t.kirsher, netdev, gospo, deri, joseph.gasparakis
From: "Waskiewicz Jr, Peter P" <peter.p.waskiewicz.jr@intel.com>
Date: Fri, 8 Jan 2010 00:34:23 -0800 (Pacific Standard Time)
> Yes, I left that in there somewhat accidentally. I had every intention in
> putting a get routine in, but I couldn't come up with a generic way to
> work with most hardware (how to get all filters back and dump them). The
> 82599 needs to query the hardware for each filter currently programmed,
> and I'm not sure what the niu hardware layout is for the filters.
NIU simply has a TCAM array that you can read the values back from.
> I can remove the get stubs for now, since I have no good solution to dump
> the filters that is generic.
For hardware where it's difficult to read the values back, you
can maintain a copy of the current filters in software.
Just an idea. If you need you'll need something like this for
multiple drivers already, probably we can put the helper code
into ethtool.c and just maintain a software copy in the kernel
for all devices. Even one's for which fetching is straightforward.
And anyways, I can't see this being a usable interface if the current
set of filters can't be queried by the user.
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support
2010-01-08 8:38 ` David Miller
@ 2010-01-08 8:42 ` Waskiewicz Jr, Peter P
0 siblings, 0 replies; 25+ messages in thread
From: Waskiewicz Jr, Peter P @ 2010-01-08 8:42 UTC (permalink / raw)
To: David Miller
Cc: Waskiewicz Jr, Peter P, Kirsher, Jeffrey T,
netdev@vger.kernel.org, gospo@redhat.com, deri@ntop.org,
Gasparakis, Joseph
On Fri, 8 Jan 2010, David Miller wrote:
> From: "Waskiewicz Jr, Peter P" <peter.p.waskiewicz.jr@intel.com>
> Date: Fri, 8 Jan 2010 00:34:23 -0800 (Pacific Standard Time)
>
> > Yes, I left that in there somewhat accidentally. I had every intention in
> > putting a get routine in, but I couldn't come up with a generic way to
> > work with most hardware (how to get all filters back and dump them). The
> > 82599 needs to query the hardware for each filter currently programmed,
> > and I'm not sure what the niu hardware layout is for the filters.
>
> NIU simply has a TCAM array that you can read the values back from.
I wish we had that in our hardware. :-)
>
> > I can remove the get stubs for now, since I have no good solution to dump
> > the filters that is generic.
>
> For hardware where it's difficult to read the values back, you
> can maintain a copy of the current filters in software.
>
> Just an idea. If you need you'll need something like this for
> multiple drivers already, probably we can put the helper code
> into ethtool.c and just maintain a software copy in the kernel
> for all devices. Even one's for which fetching is straightforward.
It's a good idea. I'll go with that.
> And anyways, I can't see this being a usable interface if the current
> set of filters can't be queried by the user.
I can agree with that. Thinking of user applications written to maintain
these filters, it'd be nice to have a way to populate a View tab or
something.
I'll implement the get portion and resend.
Thanks Dave for the inputs,
-PJ
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support
2010-01-07 4:49 ` [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support Jeff Kirsher
2010-01-08 8:23 ` David Miller
@ 2010-01-11 19:32 ` Ben Hutchings
2010-01-12 19:13 ` Waskiewicz Jr, Peter P
1 sibling, 1 reply; 25+ messages in thread
From: Ben Hutchings @ 2010-01-11 19:32 UTC (permalink / raw)
To: Jeff Kirsher
Cc: davem, netdev, gospo, Peter P Waskiewicz Jr, Luca Deri,
Joseph Gasparakis
On Wed, 2010-01-06 at 20:49 -0800, Jeff Kirsher wrote:
> From: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
>
> This patchset enables the ethtool layer to program n-tuple
> filters to an underlying device. The idea is to allow capable
> hardware to have static rules applied that can assist steering
> flows into appropriate queues.
[...]
> @@ -500,6 +529,8 @@ struct ethtool_ops {
> int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
> int (*flash_device)(struct net_device *, struct ethtool_flash *);
> int (*reset)(struct net_device *, u32 *);
> + int (*set_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *);
> + int (*get_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *, void *);
> };
> #endif /* __KERNEL__ */
>
[...]
It it really necessary to add new driver operations? It seems to me
that it would be preferable to extend {get,set}_rx_nfc() and have the
ethtool common code convert between the ethtool_rxnfc and
ethtool_rx_ntuple structures. Does that seem possible?
Ben.
--
Ben Hutchings, Senior Software Engineer, Solarflare Communications
Not speaking for my employer; that's the marketing department's job.
They asked us to note that Solarflare product names are trademarked.
^ permalink raw reply [flat|nested] 25+ messages in thread
* RE: [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support
2010-01-11 19:32 ` Ben Hutchings
@ 2010-01-12 19:13 ` Waskiewicz Jr, Peter P
2010-01-21 2:54 ` David Miller
0 siblings, 1 reply; 25+ messages in thread
From: Waskiewicz Jr, Peter P @ 2010-01-12 19:13 UTC (permalink / raw)
To: Ben Hutchings, Kirsher, Jeffrey T
Cc: davem@davemloft.net, netdev@vger.kernel.org, gospo@redhat.com,
Luca Deri, Gasparakis, Joseph
>-----Original Message-----
>From: Ben Hutchings [mailto:bhutchings@solarflare.com]
>Sent: Monday, January 11, 2010 11:33 AM
>To: Kirsher, Jeffrey T
>Cc: davem@davemloft.net; netdev@vger.kernel.org; gospo@redhat.com;
>Waskiewicz Jr, Peter P; Luca Deri; Gasparakis, Joseph
>Subject: Re: [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter
>programming support
>
>On Wed, 2010-01-06 at 20:49 -0800, Jeff Kirsher wrote:
>> From: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
>>
>> This patchset enables the ethtool layer to program n-tuple
>> filters to an underlying device. The idea is to allow capable
>> hardware to have static rules applied that can assist steering
>> flows into appropriate queues.
>[...]
>> @@ -500,6 +529,8 @@ struct ethtool_ops {
>> int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
>> int (*flash_device)(struct net_device *, struct ethtool_flash
>*);
>> int (*reset)(struct net_device *, u32 *);
>> + int (*set_rx_ntuple)(struct net_device *, struct
>ethtool_rx_ntuple *);
>> + int (*get_rx_ntuple)(struct net_device *, struct
>ethtool_rx_ntuple *, void *);
>> };
>> #endif /* __KERNEL__ */
>>
>[...]
>
>It it really necessary to add new driver operations? It seems to me
>that it would be preferable to extend {get,set}_rx_nfc() and have the
>ethtool common code convert between the ethtool_rxnfc and
>ethtool_rx_ntuple structures. Does that seem possible?
>
It is possible, but I'm not sure if it's the right way to go. The nfc routines are just flipping the various engines on in niu, where the ntuple routines are for passing full amounts of data through for different filters. Also, I think keeping them separate makes niu programming cleaner, which can support both the nfc and ntuple modes.
Cheers,
-PJ
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node
2010-01-07 4:48 [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node Jeff Kirsher
` (3 preceding siblings ...)
2010-01-07 4:49 ` [net-next-2.6 PATCH 5/5] ixgbe: Add support for the new ethtool n-tuple programming interface Jeff Kirsher
@ 2010-01-14 11:02 ` Andi Kleen
2010-01-19 16:10 ` Waskiewicz Jr, Peter P
4 siblings, 1 reply; 25+ messages in thread
From: Andi Kleen @ 2010-01-14 11:02 UTC (permalink / raw)
To: Jeff Kirsher; +Cc: davem, netdev, gospo, Peter P Waskiewicz Jr
Jeff Kirsher <jeffrey.t.kirsher@intel.com> writes:
> enum ixbge_state_t {
> diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
> index 2ad754c..6895de7 100644
> --- a/drivers/net/ixgbe/ixgbe_main.c
> +++ b/drivers/net/ixgbe/ixgbe_main.c
> @@ -3741,7 +3741,8 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
> }
>
> for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
> - q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
> + q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
> + GFP_KERNEL, adapter->node);
The problem of doing this is that the node might be full or have
no memory and k*alloc_node will fail then.
So you would need a fallback to be reliable (we probably should have a
generic utility function for this somewhere, but we don't currently)
-Andi
--
ak@linux.intel.com -- Speaking for myself only.
^ permalink raw reply [flat|nested] 25+ messages in thread
* RE: [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node
2010-01-14 11:02 ` [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node Andi Kleen
@ 2010-01-19 16:10 ` Waskiewicz Jr, Peter P
2010-01-19 19:34 ` Andi Kleen
0 siblings, 1 reply; 25+ messages in thread
From: Waskiewicz Jr, Peter P @ 2010-01-19 16:10 UTC (permalink / raw)
To: Andi Kleen, Kirsher, Jeffrey T
Cc: davem@davemloft.net, netdev@vger.kernel.org, gospo@redhat.com
>-----Original Message-----
>From: Andi Kleen [mailto:andi@firstfloor.org]
>Sent: Thursday, January 14, 2010 3:02 AM
>To: Kirsher, Jeffrey T
>Cc: davem@davemloft.net; netdev@vger.kernel.org; gospo@redhat.com;
>Waskiewicz Jr, Peter P
>Subject: Re: [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources
>per NUMA node
>
>Jeff Kirsher <jeffrey.t.kirsher@intel.com> writes:
>> enum ixbge_state_t {
>> diff --git a/drivers/net/ixgbe/ixgbe_main.c
>b/drivers/net/ixgbe/ixgbe_main.c
>> index 2ad754c..6895de7 100644
>> --- a/drivers/net/ixgbe/ixgbe_main.c
>> +++ b/drivers/net/ixgbe/ixgbe_main.c
>> @@ -3741,7 +3741,8 @@ static int ixgbe_alloc_q_vectors(struct
>ixgbe_adapter *adapter)
>> }
>>
>> for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
>> - q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
>GFP_KERNEL);
>> + q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
>> + GFP_KERNEL, adapter->node);
>
>
>The problem of doing this is that the node might be full or have
>no memory and k*alloc_node will fail then.
>
>So you would need a fallback to be reliable (we probably should have a
>generic utility function for this somewhere, but we don't currently)
So you'd rather see us call kzalloc() if kzalloc_node() fails, instead of immediately failing out to err_out?
Thanks,
-PJ
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node
2010-01-19 16:10 ` Waskiewicz Jr, Peter P
@ 2010-01-19 19:34 ` Andi Kleen
2010-01-19 19:34 ` Waskiewicz Jr, Peter P
0 siblings, 1 reply; 25+ messages in thread
From: Andi Kleen @ 2010-01-19 19:34 UTC (permalink / raw)
To: Waskiewicz Jr, Peter P
Cc: Andi Kleen, Kirsher, Jeffrey T, davem@davemloft.net,
netdev@vger.kernel.org, gospo@redhat.com
> >The problem of doing this is that the node might be full or have
> >no memory and k*alloc_node will fail then.
> >
> >So you would need a fallback to be reliable (we probably should have a
> >generic utility function for this somewhere, but we don't currently)
>
> So you'd rather see us call kzalloc() if kzalloc_node() fails, instead of immediately failing out to err_out?
Yes.
After all it's only an optimization to place memory, it's not
a functional requirement.
-Andi
--
ak@linux.intel.com -- Speaking for myself only.
^ permalink raw reply [flat|nested] 25+ messages in thread
* RE: [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node
2010-01-19 19:34 ` Andi Kleen
@ 2010-01-19 19:34 ` Waskiewicz Jr, Peter P
0 siblings, 0 replies; 25+ messages in thread
From: Waskiewicz Jr, Peter P @ 2010-01-19 19:34 UTC (permalink / raw)
To: Andi Kleen
Cc: Kirsher, Jeffrey T, davem@davemloft.net, netdev@vger.kernel.org,
gospo@redhat.com
>-----Original Message-----
>From: Andi Kleen [mailto:andi@firstfloor.org]
>Sent: Tuesday, January 19, 2010 11:34 AM
>To: Waskiewicz Jr, Peter P
>Cc: Andi Kleen; Kirsher, Jeffrey T; davem@davemloft.net;
>netdev@vger.kernel.org; gospo@redhat.com
>Subject: Re: [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources
>per NUMA node
>
>> >The problem of doing this is that the node might be full or have
>> >no memory and k*alloc_node will fail then.
>> >
>> >So you would need a fallback to be reliable (we probably should have
>a
>> >generic utility function for this somewhere, but we don't currently)
>>
>> So you'd rather see us call kzalloc() if kzalloc_node() fails, instead
>of immediately failing out to err_out?
>
>Yes.
>
>After all it's only an optimization to place memory, it's not
>a functional requirement.
>
>-Andi
>
Makes sense. I have the change in my local tree and will push new patches through Jeff shortly.
Thanks Andi,
-PJ
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support
2010-01-12 19:13 ` Waskiewicz Jr, Peter P
@ 2010-01-21 2:54 ` David Miller
0 siblings, 0 replies; 25+ messages in thread
From: David Miller @ 2010-01-21 2:54 UTC (permalink / raw)
To: peter.p.waskiewicz.jr
Cc: bhutchings, jeffrey.t.kirsher, netdev, gospo, deri,
joseph.gasparakis
From: "Waskiewicz Jr, Peter P" <peter.p.waskiewicz.jr@intel.com>
Date: Tue, 12 Jan 2010 11:13:54 -0800
>>-----Original Message-----
>>From: Ben Hutchings [mailto:bhutchings@solarflare.com]
>>Sent: Monday, January 11, 2010 11:33 AM
>>To: Kirsher, Jeffrey T
>>Cc: davem@davemloft.net; netdev@vger.kernel.org; gospo@redhat.com;
>>Waskiewicz Jr, Peter P; Luca Deri; Gasparakis, Joseph
>>Subject: Re: [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter
>>programming support
>>
>>It it really necessary to add new driver operations? It seems to me
>>that it would be preferable to extend {get,set}_rx_nfc() and have the
>>ethtool common code convert between the ethtool_rxnfc and
>>ethtool_rx_ntuple structures. Does that seem possible?
>>
>
> It is possible, but I'm not sure if it's the right way to go. The
> nfc routines are just flipping the various engines on in niu, where
> the ntuple routines are for passing full amounts of data through for
> different filters. Also, I think keeping them separate makes niu
> programming cleaner, which can support both the nfc and ntuple
> modes.
Agreed.
^ permalink raw reply [flat|nested] 25+ messages in thread
end of thread, other threads:[~2010-01-21 2:54 UTC | newest]
Thread overview: 25+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-01-07 4:48 [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node Jeff Kirsher
2010-01-07 4:48 ` [net-next-2.6 PATCH 2/5] ixgbe: Make descriptor ring allocations NUMA-aware Jeff Kirsher
2010-01-08 8:21 ` David Miller
2010-01-08 8:25 ` Waskiewicz Jr, Peter P
2010-01-08 8:31 ` David Miller
2010-01-08 8:36 ` Waskiewicz Jr, Peter P
2010-01-07 4:49 ` [net-next-2.6 PATCH 3/5] ethtool: Introduce n-tuple filter programming support Jeff Kirsher
2010-01-08 8:23 ` David Miller
2010-01-08 8:34 ` Waskiewicz Jr, Peter P
2010-01-08 8:38 ` David Miller
2010-01-08 8:42 ` Waskiewicz Jr, Peter P
2010-01-11 19:32 ` Ben Hutchings
2010-01-12 19:13 ` Waskiewicz Jr, Peter P
2010-01-21 2:54 ` David Miller
2010-01-07 4:49 ` [net-next-2.6 PATCH 4/5] ixgbe: Add Flow Director configuration support as a mod parameter Jeff Kirsher
2010-01-08 8:24 ` David Miller
2010-01-08 8:28 ` Waskiewicz Jr, Peter P
2010-01-08 8:32 ` David Miller
2010-01-08 8:36 ` Waskiewicz Jr, Peter P
2010-01-07 4:49 ` [net-next-2.6 PATCH 5/5] ixgbe: Add support for the new ethtool n-tuple programming interface Jeff Kirsher
2010-01-08 8:26 ` David Miller
2010-01-14 11:02 ` [net-next-2.6 PATCH 1/5] ixgbe: Allocate driver resources per NUMA node Andi Kleen
2010-01-19 16:10 ` Waskiewicz Jr, Peter P
2010-01-19 19:34 ` Andi Kleen
2010-01-19 19:34 ` Waskiewicz Jr, Peter P
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).