From mboxrd@z Thu Jan 1 00:00:00 1970 From: Ivan Khoronzhuk Subject: [PATCH v4 net-next 1/6] net: ethernet: ti: cpsw: use cpdma channels in backward order for txq Date: Sat, 21 Jul 2018 14:59:18 +0300 Message-ID: <20180721115923.1389-2-ivan.khoronzhuk@linaro.org> References: <20180721115923.1389-1-ivan.khoronzhuk@linaro.org> Cc: corbet@lwn.net, akpm@linux-foundation.org, netdev@vger.kernel.org, linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-omap@vger.kernel.org, vinicius.gomes@intel.com, henrik@austad.us, jesus.sanchez-palencia@intel.com, ilias.apalodimas@linaro.org, p-varis@ti.com, spatton@ti.com, francois.ozog@linaro.org, yogeshs@ti.com, nsekhar@ti.com, andrew@lunn.ch, Ivan Khoronzhuk To: davem@davemloft.net, grygorii.strashko@ti.com Return-path: Received: from mail-lf1-f67.google.com ([209.85.167.67]:33114 "EHLO mail-lf1-f67.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727781AbeGUMwE (ORCPT ); Sat, 21 Jul 2018 08:52:04 -0400 Received: by mail-lf1-f67.google.com with SMTP id u14-v6so3476892lfu.0 for ; Sat, 21 Jul 2018 04:59:34 -0700 (PDT) In-Reply-To: <20180721115923.1389-1-ivan.khoronzhuk@linaro.org> Sender: netdev-owner@vger.kernel.org List-ID: The cpdma channel highest priority is from hi to lo number. The driver has limited number of descriptors that are shared between number of cpdma channels. Number of queues can be tuned with ethtool, that allows to not spend descriptors on not needed cpdma channels. In AVB usually only 2 tx queues can be enough with rate limitation. The rate limitation can be used only for hi priority queues. Thus, to use only 2 queues the 8 has to be created. It's wasteful. So, in order to allow using only needed number of rate limited tx queues, save resources, and be able to set rate limitation for them, let assign tx cpdma channels in backward order to queues. Reviewed-by: Grygorii Strashko Signed-off-by: Ivan Khoronzhuk --- drivers/net/ethernet/ti/cpsw.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 00761fe59848..4425b537b9dd 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -968,8 +968,8 @@ static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) /* process every unprocessed channel */ ch_map = cpdma_ctrl_txchs_state(cpsw->dma); - for (ch = 0, num_tx = 0; ch_map; ch_map >>= 1, ch++) { - if (!(ch_map & 0x01)) + for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) { + if (!(ch_map & 0x80)) continue; txv = &cpsw->txv[ch]; @@ -2432,7 +2432,7 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) void (*handler)(void *, int, int); struct netdev_queue *queue; struct cpsw_vector *vec; - int ret, *ch; + int ret, *ch, vch; if (rx) { ch = &cpsw->rx_ch_num; @@ -2445,7 +2445,8 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) } while (*ch < ch_num) { - vec[*ch].ch = cpdma_chan_create(cpsw->dma, *ch, handler, rx); + vch = rx ? *ch : 7 - *ch; + vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx); queue = netdev_get_tx_queue(priv->ndev, *ch); queue->tx_maxrate = 0; @@ -2982,7 +2983,7 @@ static int cpsw_probe(struct platform_device *pdev) u32 slave_offset, sliver_offset, slave_size; const struct soc_device_attribute *soc; struct cpsw_common *cpsw; - int ret = 0, i; + int ret = 0, i, ch; int irq; cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); @@ -3157,7 +3158,8 @@ static int cpsw_probe(struct platform_device *pdev) if (soc) cpsw->quirk_irq = 1; - cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); + ch = cpsw->quirk_irq ? 0 : 7; + cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0); if (IS_ERR(cpsw->txv[0].ch)) { dev_err(priv->dev, "error initializing tx dma channel\n"); ret = PTR_ERR(cpsw->txv[0].ch); -- 2.17.1