netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, gospo@redhat.com,
	Alexander Duyck <alexander.h.duyck@intel.com>,
	Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Subject: [net-next-2.6 PATCH 3/7] igb: when number of CPUs > 4 combine tx/rx queues to allow more queues
Date: Thu, 12 Nov 2009 20:37:19 -0800	[thread overview]
Message-ID: <20091113043718.1240.62193.stgit@localhost.localdomain> (raw)
In-Reply-To: <20091113043604.1240.80142.stgit@localhost.localdomain>

From: Alexander Duyck <alexander.h.duyck@intel.com>

This patch makes it so that nics such as 82576 and newer can support more
hardware queues when there are more than 4 cpus by combining a tx/rx queue
pair onto one interrupt so that 8 queue pairs can be supported and thus
allow for more queues.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---

 drivers/net/igb/igb.h      |    9 +++++----
 drivers/net/igb/igb_main.c |   31 ++++++++++++++++++++++---------
 2 files changed, 27 insertions(+), 13 deletions(-)

diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 3298f5a..2bb9549 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -59,10 +59,10 @@ struct igb_adapter;
 #define MAX_Q_VECTORS                      8
 
 /* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES     (adapter->vfs_allocated_count ? \
-                               (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4)
-#define IGB_MAX_TX_QUEUES     IGB_MAX_RX_QUEUES
-#define IGB_ABS_MAX_TX_QUEUES     4
+#define IGB_MAX_RX_QUEUES                  (adapter->vfs_allocated_count ? 2 : \
+                                           (hw->mac.type > e1000_82575 ? 8 : 4))
+#define IGB_ABS_MAX_TX_QUEUES              8
+#define IGB_MAX_TX_QUEUES                  IGB_MAX_RX_QUEUES
 
 #define IGB_MAX_VF_MC_ENTRIES              30
 #define IGB_MAX_VF_FUNCTIONS               8
@@ -315,6 +315,7 @@ struct igb_adapter {
 	u16 rx_ring_count;
 	unsigned int vfs_allocated_count;
 	struct vf_data_storage *vf_data;
+	u32 rss_queues;
 };
 
 #define IGB_FLAG_HAS_MSI           (1 << 0)
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index d72d484..0235220 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -296,10 +296,10 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
 		 * and continue consuming queues in the same sequence
 		 */
 		if (adapter->vfs_allocated_count) {
-			for (; i < adapter->num_rx_queues; i++)
+			for (; i < adapter->rss_queues; i++)
 				adapter->rx_ring[i].reg_idx = rbase_offset +
 				                              Q_IDX_82576(i);
-			for (; j < adapter->num_tx_queues; j++)
+			for (; j < adapter->rss_queues; j++)
 				adapter->tx_ring[j].reg_idx = rbase_offset +
 				                              Q_IDX_82576(j);
 		}
@@ -618,14 +618,15 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
 	int numvecs, i;
 
 	/* Number of supported queues. */
-	adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
-	adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
+	adapter->num_rx_queues = adapter->rss_queues;
+	adapter->num_tx_queues = adapter->rss_queues;
 
 	/* start with one vector for every rx queue */
 	numvecs = adapter->num_rx_queues;
 
 	/* if tx handler is seperate add 1 for every tx queue */
-	numvecs += adapter->num_tx_queues;
+	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
+		numvecs += adapter->num_tx_queues;
 
 	/* store the number of vectors reserved for queues */
 	adapter->num_q_vectors = numvecs;
@@ -666,6 +667,7 @@ msi_only:
 	}
 #endif
 	adapter->vfs_allocated_count = 0;
+	adapter->rss_queues = 1;
 	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
 	adapter->num_rx_queues = 1;
 	adapter->num_tx_queues = 1;
@@ -1824,6 +1826,17 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
 		adapter->vfs_allocated_count = max_vfs;
 
 #endif /* CONFIG_PCI_IOV */
+	adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
+
+	/*
+	 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
+	 * then we should combine the queues into a queue pair in order to
+	 * conserve interrupts due to limited supply
+	 */
+	if ((adapter->rss_queues > 4) ||
+	    ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
+		adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+
 	/* This call may decrease the number of queues */
 	if (igb_init_interrupt_scheme(adapter)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
@@ -2015,7 +2028,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
 		}
 	}
 
-	for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
+	for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
 		int r_idx = i % adapter->num_tx_queues;
 		adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
 	}
@@ -2199,7 +2212,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
 		array_wr32(E1000_RSSRK(0), j, rsskey);
 	}
 
-	num_rx_queues = adapter->num_rx_queues;
+	num_rx_queues = adapter->rss_queues;
 
 	if (adapter->vfs_allocated_count) {
 		/* 82575 and 82576 supports 2 RSS queues for VMDq */
@@ -2255,7 +2268,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
 				E1000_VT_CTL_DEFAULT_POOL_SHIFT;
 			wr32(E1000_VT_CTL, vtctl);
 		}
-		if (adapter->num_rx_queues > 1)
+		if (adapter->rss_queues > 1)
 			mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
 		else
 			mrqc = E1000_MRQC_ENABLE_VMDQ;
@@ -2385,7 +2398,7 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
 	/* clear all bits that might not be set */
 	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
 
-	if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count)
+	if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
 		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
 	/*
 	 * for VMDq only allow the VFs and pool 0 to accept broadcast and


  parent reply	other threads:[~2009-11-13  4:37 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-11-13  4:36 [net-next-2.6 PATCH 1/7] igb: change type for ring sizes to u16 in igb_set_ring_param Jeff Kirsher
2009-11-13  4:37 ` [net-next-2.6 PATCH 2/7] igb: move timesync init into a seperate function Jeff Kirsher
2009-11-13  4:37 ` Jeff Kirsher [this message]
2009-11-13  4:37 ` [net-next-2.6 PATCH 4/7] igb: Rework how netdev->stats is handled Jeff Kirsher
2009-11-13  4:37 ` [net-next-2.6 PATCH 5/7] igb: removed unused tx/rx total bytes/packets from adapter struct Jeff Kirsher
2009-11-13  4:38 ` [net-next-2.6 PATCH 6/7] igb: check for packets on all tx rings when link is down Jeff Kirsher
2009-11-13  4:38 ` [net-next-2.6 PATCH 7/7] igb: only recycle page if it is on our numa node Jeff Kirsher
2009-11-14  4:48 ` [net-next-2.6 PATCH 1/7] igb: change type for ring sizes to u16 in igb_set_ring_param David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20091113043718.1240.62193.stgit@localhost.localdomain \
    --to=jeffrey.t.kirsher@intel.com \
    --cc=alexander.h.duyck@intel.com \
    --cc=davem@davemloft.net \
    --cc=gospo@redhat.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).