From: Thomas Falcon <tlfalcon@linux.ibm.com>
To: netdev@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org, dnbanerg@us.ibm.com,
brking@linux.vnet.ibm.com, pradeep@us.ibm.com,
drt@linux.vnet.ibm.com, sukadev@linux.vnet.ibm.com,
ljp@linux.vnet.ibm.com, cforno12@linux.ibm.com,
tlfalcon@linux.ibm.com, ricklind@linux.ibm.com
Subject: [PATCH net-next 03/12] ibmvnic: Introduce batched RX buffer descriptor transmission
Date: Thu, 12 Nov 2020 13:09:58 -0600 [thread overview]
Message-ID: <1605208207-1896-4-git-send-email-tlfalcon@linux.ibm.com> (raw)
In-Reply-To: <1605208207-1896-1-git-send-email-tlfalcon@linux.ibm.com>
Utilize the H_SEND_SUB_CRQ_INDIRECT hypervisor call to send
multiple RX buffer descriptors to the device in one hypervisor
call operation. This change will reduce the number of hypervisor
calls and thus hypervisor call overhead needed to transmit
RX buffer descriptors to the device.
Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
---
drivers/net/ethernet/ibm/ibmvnic.c | 57 +++++++++++++++++++-----------
1 file changed, 37 insertions(+), 20 deletions(-)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index dd9ca06f355b..524020691ef8 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -306,9 +306,11 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
int count = pool->size - atomic_read(&pool->available);
u64 handle = adapter->rx_scrq[pool->index]->handle;
struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_ind_xmit_queue *ind_bufp;
+ struct ibmvnic_sub_crq_queue *rx_scrq;
+ union sub_crq *sub_crq;
int buffers_added = 0;
unsigned long lpar_rc;
- union sub_crq sub_crq;
struct sk_buff *skb;
unsigned int offset;
dma_addr_t dma_addr;
@@ -320,6 +322,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
if (!pool->active)
return;
+ rx_scrq = adapter->rx_scrq[pool->index];
+ ind_bufp = &rx_scrq->ind_buf;
for (i = 0; i < count; ++i) {
skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
if (!skb) {
@@ -346,12 +350,13 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
pool->rx_buff[index].pool_index = pool->index;
pool->rx_buff[index].size = pool->buff_size;
- memset(&sub_crq, 0, sizeof(sub_crq));
- sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
- sub_crq.rx_add.correlator =
+ sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
+ memset(sub_crq, 0, sizeof(*sub_crq));
+ sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
+ sub_crq->rx_add.correlator =
cpu_to_be64((u64)&pool->rx_buff[index]);
- sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
- sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
+ sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
+ sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
/* The length field of the sCRQ is defined to be 24 bits so the
* buffer size needs to be left shifted by a byte before it is
@@ -361,15 +366,20 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
#ifdef __LITTLE_ENDIAN__
shift = 8;
#endif
- sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
-
- lpar_rc = send_subcrq(adapter, handle, &sub_crq);
- if (lpar_rc != H_SUCCESS)
- goto failure;
-
- buffers_added++;
- adapter->replenish_add_buff_success++;
+ sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
pool->next_free = (pool->next_free + 1) % pool->size;
+ if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
+ i == count - 1) {
+ lpar_rc =
+ send_subcrq_indirect(adapter, handle,
+ (u64)ind_bufp->indir_dma,
+ (u64)ind_bufp->index);
+ if (lpar_rc != H_SUCCESS)
+ goto failure;
+ buffers_added += ind_bufp->index;
+ adapter->replenish_add_buff_success += ind_bufp->index;
+ ind_bufp->index = 0;
+ }
}
atomic_add(buffers_added, &pool->available);
return;
@@ -377,13 +387,20 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
failure:
if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
- pool->free_map[pool->next_free] = index;
- pool->rx_buff[index].skb = NULL;
-
- dev_kfree_skb_any(skb);
- adapter->replenish_add_buff_failure++;
- atomic_add(buffers_added, &pool->available);
+ for (i = ind_bufp->index - 1; i >= 0; --i) {
+ struct ibmvnic_rx_buff *rx_buff;
+ pool->next_free = pool->next_free == 0 ?
+ pool->size - 1 : pool->next_free - 1;
+ sub_crq = &ind_bufp->indir_arr[i];
+ rx_buff = (struct ibmvnic_rx_buff *)
+ be64_to_cpu(sub_crq->rx_add.correlator);
+ index = (int)(rx_buff - pool->rx_buff);
+ pool->free_map[pool->next_free] = index;
+ dev_kfree_skb_any(pool->rx_buff[index].skb);
+ pool->rx_buff[index].skb = NULL;
+ }
+ ind_bufp->index = 0;
if (lpar_rc == H_CLOSED || adapter->failover_pending) {
/* Disable buffer pool replenishment and report carrier off if
* queue is closed or pending failover.
--
2.26.2
next prev parent reply other threads:[~2020-11-12 19:11 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-11-12 19:09 [PATCH net-next 00/12] ibmvnic: Performance improvements and other updates Thomas Falcon
2020-11-12 19:09 ` [PATCH net-next 01/12] ibmvnic: Ensure that subCRQ entry reads are ordered Thomas Falcon
2020-11-13 5:45 ` drt
2020-11-13 16:14 ` Brian King
2020-11-14 23:35 ` Jakub Kicinski
2020-11-16 18:28 ` Thomas Falcon
2020-11-16 18:30 ` Jakub Kicinski
2020-11-12 19:09 ` [PATCH net-next 02/12] ibmvnic: Introduce indirect subordinate Command Response Queue buffer Thomas Falcon
2020-11-13 16:17 ` Brian King
2020-11-14 23:35 ` Jakub Kicinski
2020-11-16 18:18 ` Thomas Falcon
2020-11-12 19:09 ` Thomas Falcon [this message]
2020-11-12 19:09 ` [PATCH net-next 04/12] ibmvnic: Introduce xmit_more support using batched subCRQ hcalls Thomas Falcon
2020-11-14 23:46 ` Jakub Kicinski
2020-11-16 18:40 ` Thomas Falcon
2020-11-12 19:10 ` [PATCH net-next 05/12] ibmvnic: Fix TX completion error handling Thomas Falcon
2020-11-12 19:10 ` [PATCH net-next 06/12] ibmvnic: Clean up TX code and TX buffer data structure Thomas Falcon
2020-11-12 19:10 ` [PATCH net-next 07/12] ibmvnic: Clean up TX error handling and statistics tracking Thomas Falcon
2020-11-12 19:10 ` [PATCH net-next 08/12] ibmvnic: Remove send_subcrq function Thomas Falcon
2020-11-12 19:10 ` [PATCH net-next 09/12] ibmvnic: Ensure that device queue memory is cache-line aligned Thomas Falcon
2020-11-12 19:10 ` [PATCH net-next 10/12] ibmvnic: Correctly re-enable interrupts in NAPI polling routine Thomas Falcon
2020-11-12 19:10 ` [PATCH net-next 11/12] ibmvnic: Use netdev_alloc_skb instead of alloc_skb to replenish RX buffers Thomas Falcon
2020-11-12 19:10 ` [PATCH net-next 12/12] ibmvnic: Do not replenish RX buffers after every polling loop Thomas Falcon
2020-11-13 5:52 ` drt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1605208207-1896-4-git-send-email-tlfalcon@linux.ibm.com \
--to=tlfalcon@linux.ibm.com \
--cc=brking@linux.vnet.ibm.com \
--cc=cforno12@linux.ibm.com \
--cc=dnbanerg@us.ibm.com \
--cc=drt@linux.vnet.ibm.com \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=ljp@linux.vnet.ibm.com \
--cc=netdev@vger.kernel.org \
--cc=pradeep@us.ibm.com \
--cc=ricklind@linux.ibm.com \
--cc=sukadev@linux.vnet.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).