From: Nick Child <nnac123@linux.ibm.com>
To: netdev@vger.kernel.org
Cc: Nick Child <nnac123@linux.ibm.com>
Subject: [PATCH v2 net-next 2/3] ibmveth: Implement multi queue on xmit
Date: Wed, 28 Sep 2022 16:43:49 -0500 [thread overview]
Message-ID: <20220928214350.29795-2-nnac123@linux.ibm.com> (raw)
In-Reply-To: <20220928214350.29795-1-nnac123@linux.ibm.com>
The `ndo_start_xmit` function is protected by a spinlock on the tx queue
being used to transmit the skb. Allow concurrent calls to
`ndo_start_xmit` by using more than one tx queue. This allows for
greater throughput when several jobs are trying to transmit data.
Introduce 16 tx queues (leave single rx queue as is) which each
correspond to one DMA mapped long term buffer.
Signed-off-by: Nick Child <nnac123@linux.ibm.com>
---
drivers/net/ethernet/ibm/ibmveth.c | 69 +++++++++++++++++-------------
drivers/net/ethernet/ibm/ibmveth.h | 5 ++-
2 files changed, 43 insertions(+), 31 deletions(-)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 675eaeed7a7b..7abd67c2336e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -538,20 +538,22 @@ static int ibmveth_open(struct net_device *netdev)
goto out_unmap_buffer_list;
}
- adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE);
- adapter->tx_ltb_ptr = kzalloc(adapter->tx_ltb_size, GFP_KERNEL);
- if (!adapter->tx_ltb_ptr) {
- netdev_err(netdev,
- "unable to allocate transmit long term buffer\n");
- goto out_unmap_buffer_list;
- }
- adapter->tx_ltb_dma = dma_map_single(dev, adapter->tx_ltb_ptr,
- adapter->tx_ltb_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, adapter->tx_ltb_dma)) {
- netdev_err(netdev,
- "unable to DMA map transmit long term buffer\n");
- goto out_unmap_tx_dma;
+ for (i = 0; i < IBMVETH_MAX_QUEUES; i++) {
+ adapter->tx_ltb_ptr[i] = kzalloc(adapter->tx_ltb_size,
+ GFP_KERNEL);
+ if (!adapter->tx_ltb_ptr[i]) {
+ netdev_err(netdev,
+ "unable to allocate transmit long term buffer\n");
+ goto out_free_tx_ltb_ptrs;
+ }
+ adapter->tx_ltb_dma[i] = dma_map_single(dev,
+ adapter->tx_ltb_ptr[i],
+ adapter->tx_ltb_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, adapter->tx_ltb_dma[i])) {
+ netdev_err(netdev, "unable to DMA map transmit long term buffer\n");
+ goto out_unmap_tx_dma;
+ }
}
adapter->rx_queue.index = 0;
@@ -614,7 +616,7 @@ static int ibmveth_open(struct net_device *netdev)
netdev_dbg(netdev, "initial replenish cycle\n");
ibmveth_interrupt(netdev->irq, netdev);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
netdev_dbg(netdev, "open complete\n");
@@ -631,7 +633,14 @@ static int ibmveth_open(struct net_device *netdev)
DMA_BIDIRECTIONAL);
out_unmap_tx_dma:
- kfree(adapter->tx_ltb_ptr);
+ kfree(adapter->tx_ltb_ptr[i]);
+
+out_free_tx_ltb_ptrs:
+ while (--i >= 0) {
+ dma_unmap_single(dev, adapter->tx_ltb_dma[i],
+ adapter->tx_ltb_size, DMA_TO_DEVICE);
+ kfree(adapter->tx_ltb_ptr[i]);
+ }
out_unmap_buffer_list:
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
@@ -661,7 +670,7 @@ static int ibmveth_close(struct net_device *netdev)
napi_disable(&adapter->napi);
if (!adapter->pool_config)
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
@@ -695,9 +704,11 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
- dma_unmap_single(dev, adapter->tx_ltb_dma, adapter->tx_ltb_size,
- DMA_TO_DEVICE);
- kfree(adapter->tx_ltb_ptr);
+ for (i = 0; i < IBMVETH_MAX_QUEUES; i++) {
+ dma_unmap_single(dev, adapter->tx_ltb_dma[i],
+ adapter->tx_ltb_size, DMA_TO_DEVICE);
+ kfree(adapter->tx_ltb_ptr[i]);
+ }
netdev_dbg(netdev, "close complete\n");
@@ -1027,15 +1038,13 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- unsigned int desc_flags;
+ unsigned int desc_flags, total_bytes;
union ibmveth_buf_desc desc;
- int i;
+ int i, queue_num = skb_get_queue_mapping(skb);
unsigned long mss = 0;
- size_t total_bytes;
if (ibmveth_is_packet_unsupported(skb, netdev))
goto out;
-
/* veth can't checksum offload UDP */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
((skb->protocol == htons(ETH_P_IP) &&
@@ -1088,14 +1097,14 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
netdev->stats.tx_dropped++;
goto out;
}
- memcpy(adapter->tx_ltb_ptr, skb->data, skb_headlen(skb));
+ memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb));
total_bytes = skb_headlen(skb);
/* Copy frags into mapped buffers */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- memcpy(adapter->tx_ltb_ptr + total_bytes, skb_frag_address_safe(frag),
- skb_frag_size(frag));
+ memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes,
+ skb_frag_address_safe(frag), skb_frag_size(frag));
total_bytes += skb_frag_size(frag);
}
@@ -1106,7 +1115,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
goto out;
}
desc.fields.flags_len = desc_flags | skb->len;
- desc.fields.address = adapter->tx_ltb_dma;
+ desc.fields.address = adapter->tx_ltb_dma[queue_num];
/* finish writing to long_term_buff before VIOS accessing it */
dma_wmb();
@@ -1599,7 +1608,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
return -EINVAL;
}
- netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
+ netdev = alloc_etherdev_mqs(sizeof(struct ibmveth_adapter), IBMVETH_MAX_QUEUES, 1);
if (!netdev)
return -ENOMEM;
@@ -1666,6 +1675,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
kobject_uevent(kobj, KOBJ_ADD);
}
+ adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE);
+
netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
netdev_dbg(netdev, "registering netdev...\n");
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index a46ead9b31de..daf6f615c03f 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -99,6 +99,7 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
+#define IBMVETH_MAX_QUEUES 16U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -138,9 +139,9 @@ struct ibmveth_adapter {
unsigned int mcastFilterSize;
void * buffer_list_addr;
void * filter_list_addr;
- void *tx_ltb_ptr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
unsigned int tx_ltb_size;
- dma_addr_t tx_ltb_dma;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
--
2.31.1
next prev parent reply other threads:[~2022-09-28 21:44 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-28 21:43 [PATCH v2 net-next 1/3] ibmveth: Copy tx skbs into a premapped buffer Nick Child
2022-09-28 21:43 ` Nick Child [this message]
2022-09-28 21:43 ` [PATCH v2 net-next 3/3] ibmveth: Ethtool set queue support Nick Child
2022-09-30 11:50 ` [PATCH v2 net-next 1/3] ibmveth: Copy tx skbs into a premapped buffer patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220928214350.29795-2-nnac123@linux.ibm.com \
--to=nnac123@linux.ibm.com \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).