From: Jakub Kicinski <jakub.kicinski@netronome.com>
To: Lorenzo Bianconi <lorenzo@kernel.org>
Cc: netdev@vger.kernel.org, lorenzo.bianconi@redhat.com,
davem@davemloft.net, thomas.petazzoni@bootlin.com,
brouer@redhat.com, ilias.apalodimas@linaro.org,
matteo.croce@redhat.com, mw@semihalf.com
Subject: Re: [PATCH v4 net-next 2/7] net: mvneta: introduce page pool API for sw buffer manager
Date: Wed, 16 Oct 2019 18:14:50 -0700 [thread overview]
Message-ID: <20191016181450.1729a2da@cakuba.netronome.com> (raw)
In-Reply-To: <c63e3b458e6b047f167322b31553d2ba384fd3d0.1571258792.git.lorenzo@kernel.org>
On Wed, 16 Oct 2019 23:03:07 +0200, Lorenzo Bianconi wrote:
> Use the page_pool api for allocations and DMA handling instead of
> __dev_alloc_page()/dma_map_page() and free_page()/dma_unmap_page().
> Pages are unmapped using page_pool_release_page before packets
> go into the network stack.
>
> The page_pool API offers buffer recycling capabilities for XDP but
> allocates one page per packet, unless the driver splits and manages
> the allocated page.
> This is a preliminary patch to add XDP support to mvneta driver
>
> Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
> drivers/net/ethernet/marvell/Kconfig | 1 +
> drivers/net/ethernet/marvell/mvneta.c | 77 ++++++++++++++++++++-------
> 2 files changed, 59 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
> index fb942167ee54..3d5caea096fb 100644
> --- a/drivers/net/ethernet/marvell/Kconfig
> +++ b/drivers/net/ethernet/marvell/Kconfig
> @@ -61,6 +61,7 @@ config MVNETA
> depends on ARCH_MVEBU || COMPILE_TEST
> select MVMDIO
> select PHYLINK
> + select PAGE_POOL
> ---help---
> This driver supports the network interface units in the
> Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
> diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
> index 128b9fded959..9e4d828787fd 100644
> --- a/drivers/net/ethernet/marvell/mvneta.c
> +++ b/drivers/net/ethernet/marvell/mvneta.c
> @@ -37,6 +37,7 @@
> #include <net/ip.h>
> #include <net/ipv6.h>
> #include <net/tso.h>
> +#include <net/page_pool.h>
>
> /* Registers */
> #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
> @@ -603,6 +604,10 @@ struct mvneta_rx_queue {
> u32 pkts_coal;
> u32 time_coal;
>
> + /* page_pool */
> + struct page_pool *page_pool;
> + struct xdp_rxq_info xdp_rxq;
> +
> /* Virtual address of the RX buffer */
> void **buf_virt_addr;
>
> @@ -1815,20 +1820,14 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
> dma_addr_t phys_addr;
> struct page *page;
>
> - page = __dev_alloc_page(gfp_mask);
> + page = page_pool_alloc_pages(rxq->page_pool,
> + gfp_mask | __GFP_NOWARN);
> if (!page)
> return -ENOMEM;
>
> - /* map page for use */
> - phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
> - DMA_FROM_DEVICE);
> - if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
> - __free_page(page);
> - return -ENOMEM;
> - }
> -
> - phys_addr += pp->rx_offset_correction;
> + phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
> mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
> +
> return 0;
> }
>
> @@ -1894,10 +1893,11 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
> if (!data || !(rx_desc->buf_phys_addr))
> continue;
>
> - dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
> - PAGE_SIZE, DMA_FROM_DEVICE);
> - __free_page(data);
> + page_pool_put_page(rxq->page_pool, data, false);
> }
> + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
> + xdp_rxq_info_unreg(&rxq->xdp_rxq);
> + page_pool_destroy(rxq->page_pool);
I think you may need to set the page_pool pointer to NULL here, no?
AFAICT the ndo_stop in this driver has to be idempotent due to the
open/close-to-reconfigure dances.
> }
>
> static void
> @@ -2029,8 +2029,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
> skb_add_rx_frag(rxq->skb, frag_num, page,
> frag_offset, frag_size,
> PAGE_SIZE);
> - dma_unmap_page(dev->dev.parent, phys_addr,
> - PAGE_SIZE, DMA_FROM_DEVICE);
> + page_pool_release_page(rxq->page_pool, page);
> rxq->left_size -= frag_size;
> }
> } else {
> @@ -2060,9 +2059,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
> frag_offset, frag_size,
> PAGE_SIZE);
>
> - dma_unmap_page(dev->dev.parent, phys_addr,
> - PAGE_SIZE, DMA_FROM_DEVICE);
> -
> + page_pool_release_page(rxq->page_pool, page);
> rxq->left_size -= frag_size;
> }
> } /* Middle or Last descriptor */
> @@ -2829,11 +2826,53 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
> return rx_done;
> }
>
> +static int mvneta_create_page_pool(struct mvneta_port *pp,
> + struct mvneta_rx_queue *rxq, int size)
> +{
> + struct page_pool_params pp_params = {
> + .order = 0,
> + .flags = PP_FLAG_DMA_MAP,
> + .pool_size = size,
> + .nid = cpu_to_node(0),
> + .dev = pp->dev->dev.parent,
> + .dma_dir = DMA_FROM_DEVICE,
> + };
> + int err;
> +
> + rxq->page_pool = page_pool_create(&pp_params);
> + if (IS_ERR(rxq->page_pool)) {
> + err = PTR_ERR(rxq->page_pool);
> + rxq->page_pool = NULL;
> + return err;
> + }
> +
> + err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
> + if (err < 0)
> + goto err_free_pp;
> +
> + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
> + rxq->page_pool);
> + if (err)
> + goto err_unregister_rxq;
> +
> + return 0;
> +
> +err_unregister_rxq:
> + xdp_rxq_info_unreg(&rxq->xdp_rxq);
> +err_free_pp:
> + page_pool_destroy(rxq->page_pool);
ditto
> + return err;
> +}
> +
> /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
> static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
> int num)
> {
> - int i;
> + int i, err;
> +
> + err = mvneta_create_page_pool(pp, rxq, num);
> + if (err < 0)
> + return err;
Note that callers of mvneta_rxq_fill() never check the return code.
Some form of error print or such could be justified here.. although
actually propagating the error code all the way back to user space is
even better.
IMHO on device start the fill ring should be required to be filled up
completely, but that's just an opinion.
> for (i = 0; i < num; i++) {
> memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
next prev parent reply other threads:[~2019-10-17 1:15 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-10-16 21:03 [PATCH v4 net-next 0/7] add XDP support to mvneta driver Lorenzo Bianconi
2019-10-16 21:03 ` [PATCH v4 net-next 1/7] net: mvneta: introduce mvneta_update_stats routine Lorenzo Bianconi
2019-10-17 1:01 ` Jakub Kicinski
2019-10-16 21:03 ` [PATCH v4 net-next 2/7] net: mvneta: introduce page pool API for sw buffer manager Lorenzo Bianconi
2019-10-17 1:14 ` Jakub Kicinski [this message]
2019-10-17 9:58 ` Lorenzo Bianconi
2019-10-16 21:03 ` [PATCH v4 net-next 3/7] net: mvneta: rely on build_skb in mvneta_rx_swbm poll routine Lorenzo Bianconi
2019-10-17 1:19 ` Jakub Kicinski
2019-10-16 21:03 ` [PATCH v4 net-next 4/7] net: mvneta: add basic XDP support Lorenzo Bianconi
2019-10-17 1:26 ` Jakub Kicinski
2019-10-17 9:46 ` Lorenzo Bianconi
2019-10-16 21:03 ` [PATCH v4 net-next 5/7] net: mvneta: move header prefetch in mvneta_swbm_rx_frame Lorenzo Bianconi
2019-10-16 21:03 ` [PATCH v4 net-next 6/7] net: mvneta: make tx buffer array agnostic Lorenzo Bianconi
2019-10-16 21:03 ` [PATCH v4 net-next 7/7] net: mvneta: add XDP_TX support Lorenzo Bianconi
2019-10-17 1:28 ` Jakub Kicinski
2019-10-17 9:44 ` Lorenzo Bianconi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191016181450.1729a2da@cakuba.netronome.com \
--to=jakub.kicinski@netronome.com \
--cc=brouer@redhat.com \
--cc=davem@davemloft.net \
--cc=ilias.apalodimas@linaro.org \
--cc=lorenzo.bianconi@redhat.com \
--cc=lorenzo@kernel.org \
--cc=matteo.croce@redhat.com \
--cc=mw@semihalf.com \
--cc=netdev@vger.kernel.org \
--cc=thomas.petazzoni@bootlin.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).