From: Frank Li <Frank.Li@freescale.com>
To: b38611@freescale.com, davem@davemloft.net,
netdev@vger.kernel.org, lznuaa@gmail.com
Cc: shawn.guo@linaro.org, linux-arm-kernel@lists.infradead.org,
devicetree@vger.kernel.org, Frank Li <Frank.Li@freescale.com>,
Duan Fugang <B38611@freescale.com>
Subject: [Patch v2 net-next 05/12] net: fec: init multi queue date structure
Date: Wed, 10 Sep 2014 04:15:36 +0800 [thread overview]
Message-ID: <1410293743-27874-6-git-send-email-Frank.Li@freescale.com> (raw)
In-Reply-To: <1410293743-27874-1-git-send-email-Frank.Li@freescale.com>
initilized all queues according to queue number get from DT file.
Signed-off-by: Frank Li <Frank.Li@freescale.com>
Signed-off-by: Duan Fugang <B38611@freescale.com>
---
drivers/net/ethernet/freescale/fec_main.c | 365 +++++++++++++++++++++---------
1 file changed, 252 insertions(+), 113 deletions(-)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2240df0..03972f7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -834,47 +834,98 @@ static void fec_enet_bd_init(struct net_device *dev)
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned int i;
+ unsigned int q;
- /* Initialize the receive buffer descriptors. */
- rxq = fep->rx_queue[0];
- bdp = rxq->rx_bd_base;
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ /* Initialize the receive buffer descriptors. */
+ rxq = fep->rx_queue[q];
+ bdp = rxq->rx_bd_base;
- for (i = 0; i < rxq->rx_ring_size; i++) {
+ for (i = 0; i < rxq->rx_ring_size; i++) {
- /* Initialize the BD for every fragment in the page. */
- if (bdp->cbd_bufaddr)
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- else
+ /* Initialize the BD for every fragment in the page. */
+ if (bdp->cbd_bufaddr)
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ else
+ bdp->cbd_sc = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep, q);
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ rxq->cur_rx = rxq->rx_bd_base;
+ }
+
+ for (q = 0; q < fep->num_tx_queues; q++) {
+ /* ...and the same for transmit */
+ txq = fep->tx_queue[q];
+ bdp = txq->tx_bd_base;
+ txq->cur_tx = bdp;
+
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep, 0);
+ if (txq->tx_skbuff[i]) {
+ dev_kfree_skb_any(txq->tx_skbuff[i]);
+ txq->tx_skbuff[i] = NULL;
+ }
+ bdp->cbd_bufaddr = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep, q);
+ bdp->cbd_sc |= BD_SC_WRAP;
+ txq->dirty_tx = bdp;
}
+}
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep, 0);
- bdp->cbd_sc |= BD_SC_WRAP;
+static void fec_enet_enable_ring(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ int i;
- rxq->cur_rx = rxq->rx_bd_base;
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ rxq = fep->rx_queue[i];
+ writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
- /* ...and the same for transmit */
- txq = fep->tx_queue[0];
- bdp = txq->tx_bd_base;
- txq->cur_tx = bdp;
+ /* enable DMA1/2 */
+ if (i)
+ writel(RCMR_MATCHEN | RCMR_CMP(i),
+ fep->hwp + FEC_RCMR(i));
+ }
- for (i = 0; i < txq->tx_ring_size; i++) {
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- if (txq->tx_skbuff[i]) {
- dev_kfree_skb_any(txq->tx_skbuff[i]);
- txq->tx_skbuff[i] = NULL;
- }
- bdp->cbd_bufaddr = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep, 0);
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+ writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
+
+ /* enable DMA1/2 */
+ if (i)
+ writel(DMA_CLASS_EN | IDLE_SLOPE(i),
+ fep->hwp + FEC_DMA_CFG(i));
}
+}
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep, 0);
- bdp->cbd_sc |= BD_SC_WRAP;
- txq->dirty_tx = bdp;
+static void fec_enet_reset_skb(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ int i, j;
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+
+ for (j = 0; j < txq->tx_ring_size; j++) {
+ if (txq->tx_skbuff[j]) {
+ dev_kfree_skb_any(txq->tx_skbuff[j]);
+ txq->tx_skbuff[j] = NULL;
+ }
+ }
+ }
}
/*
@@ -893,8 +944,6 @@ fec_restart(struct net_device *ndev)
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
- struct fec_enet_priv_tx_q *txq;
- struct fec_enet_priv_rx_q *rxq;
/* Whack a reset. We should wait for this. */
writel(1, fep->hwp + FEC_ECNTRL);
@@ -918,24 +967,10 @@ fec_restart(struct net_device *ndev)
fec_enet_bd_init(ndev);
- /* Set receive and transmit descriptor base. */
- rxq = fep->rx_queue[0];
- writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(0));
- if (fep->bufdesc_ex)
- writel((unsigned long)rxq->bd_dma + sizeof(struct bufdesc_ex)
- * rxq->rx_ring_size, fep->hwp + FEC_X_DES_START(0));
- else
- writel((unsigned long)rxq->bd_dma + sizeof(struct bufdesc)
- * rxq->rx_ring_size, fep->hwp + FEC_X_DES_START(0));
+ fec_enet_enable_ring(ndev);
-
- txq = fep->tx_queue[0];
- for (i = 0; i <= TX_RING_MOD_MASK; i++) {
- if (txq->tx_skbuff[i]) {
- dev_kfree_skb_any(txq->tx_skbuff[i]);
- txq->tx_skbuff[i] = NULL;
- }
- }
+ /* Reset tx SKB buffers. */
+ fec_enet_reset_skb(ndev);
/* Enable MII mode */
if (fep->full_duplex == DUPLEX_FULL) {
@@ -1057,7 +1092,8 @@ fec_restart(struct net_device *ndev)
/* And last, enable the transmit and receive processing */
writel(ecntl, fep->hwp + FEC_ECNTRL);
- writel(0, fep->hwp + FEC_R_DES_ACTIVE(0));
+ for (i = 0; i < fep->num_rx_queues; i++)
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
if (fep->bufdesc_ex)
fec_ptp_start_cyclecounter(ndev);
@@ -2233,41 +2269,122 @@ static void fec_enet_free_buffers(struct net_device *ndev)
struct bufdesc *bdp;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
+ unsigned int q;
+
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ rxq = fep->rx_queue[q];
+ bdp = rxq->rx_bd_base;
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+ skb = rxq->rx_skbuff[i];
+ rxq->rx_skbuff[i] = NULL;
+ if (skb) {
+ dma_unmap_single(&fep->pdev->dev,
+ bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+ }
- rxq = fep->rx_queue[0];
- bdp = rxq->rx_bd_base;
- for (i = 0; i < rxq->rx_ring_size; i++) {
- skb = rxq->rx_skbuff[i];
- rxq->rx_skbuff[i] = NULL;
- if (skb) {
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+ for (q = 0; q < fep->num_tx_queues; q++) {
+ txq = fep->tx_queue[q];
+ bdp = txq->tx_bd_base;
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ kfree(txq->tx_bounce[i]);
+ txq->tx_bounce[i] = NULL;
+ skb = txq->tx_skbuff[i];
+ txq->tx_skbuff[i] = NULL;
dev_kfree_skb(skb);
}
- bdp = fec_enet_get_nextdesc(bdp, fep, 0);
}
+}
- txq = fep->tx_queue[0];
- bdp = txq->tx_bd_base;
- for (i = 0; i < txq->tx_ring_size; i++) {
- kfree(txq->tx_bounce[i]);
- txq->tx_bounce[i] = NULL;
- skb = txq->tx_skbuff[i];
- txq->tx_skbuff[i] = NULL;
- dev_kfree_skb(skb);
+static void fec_enet_free_queue(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ struct fec_enet_priv_tx_q *txq;
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
+ txq = fep->tx_queue[i];
+ dma_free_coherent(NULL,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs,
+ txq->tso_hdrs_dma);
+ }
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ if (fep->rx_queue[i])
+ kfree(fep->rx_queue[i]);
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fep->tx_queue[i])
+ kfree(fep->tx_queue[i]);
+}
+
+static int fec_enet_alloc_queue(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ int ret = 0;
+ struct fec_enet_priv_tx_q *txq;
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ fep->tx_queue[i] = txq;
+ txq->tx_ring_size = TX_RING_SIZE;
+ fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
+
+ txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
+ txq->tx_wake_threshold =
+ (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
+
+ txq->tso_hdrs = dma_alloc_coherent(NULL,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_dma,
+ GFP_KERNEL);
+ if (!txq->tso_hdrs) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
}
+
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
+ GFP_KERNEL);
+ if (!fep->rx_queue[i]) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
+ fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
+ }
+ return ret;
+
+alloc_failed:
+ fec_enet_free_queue(ndev);
+ return ret;
}
-static int fec_enet_alloc_buffers(struct net_device *ndev)
+static int
+fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
struct sk_buff *skb;
struct bufdesc *bdp;
- struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
- rxq = fep->rx_queue[0];
+ rxq = fep->rx_queue[queue];
bdp = rxq->rx_bd_base;
for (i = 0; i < rxq->rx_ring_size; i++) {
dma_addr_t addr;
@@ -2294,14 +2411,28 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_RX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep, 0);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep, 0);
+ bdp = fec_enet_get_prevdesc(bdp, fep, queue);
bdp->cbd_sc |= BD_SC_WRAP;
+ return 0;
- txq = fep->tx_queue[0];
+ err_alloc:
+ fec_enet_free_buffers(ndev);
+ return -ENOMEM;
+}
+
+static int
+fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+ struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+
+ txq = fep->tx_queue[queue];
bdp = txq->tx_bd_base;
for (i = 0; i < txq->tx_ring_size; i++) {
txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
@@ -2316,11 +2447,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep, 0);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep, 0);
+ bdp = fec_enet_get_prevdesc(bdp, fep, queue);
bdp->cbd_sc |= BD_SC_WRAP;
return 0;
@@ -2330,6 +2461,21 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
return -ENOMEM;
}
+static int fec_enet_alloc_buffers(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ if (fec_enet_alloc_rxq_buffers(ndev, i))
+ return -ENOMEM;
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fec_enet_alloc_txq_buffers(ndev, i))
+ return -ENOMEM;
+ return 0;
+}
+
static int
fec_enet_open(struct net_device *ndev)
{
@@ -2579,28 +2725,9 @@ static int fec_enet_init(struct net_device *ndev)
struct bufdesc *cbd_base;
dma_addr_t bd_dma;
int bd_size;
+ unsigned int i;
- txq = kzalloc(sizeof(*txq), GFP_KERNEL);
- if (!txq)
- return -ENOMEM;
- fep->tx_queue[0] = txq;
-
- rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
- if (!rxq) {
- kfree(txq);
- return -ENOMEM;
- }
-
- fep->rx_queue[0] = rxq;
-
-
- txq->tx_ring_size = TX_RING_SIZE;
- rxq->rx_ring_size = RX_RING_SIZE;
- fep->total_tx_ring_size = txq->tx_ring_size;
- fep->total_rx_ring_size = rxq->rx_ring_size;
-
- txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
- txq->tx_wake_threshold = (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
+ fec_enet_alloc_queue(ndev);
if (fep->bufdesc_ex)
fep->bufdesc_size = sizeof(struct bufdesc_ex);
@@ -2613,17 +2740,6 @@ static int fec_enet_init(struct net_device *ndev)
cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
GFP_KERNEL);
if (!cbd_base) {
- kfree(rxq);
- kfree(txq);
- return -ENOMEM;
- }
-
- txq->tso_hdrs = dma_alloc_coherent(NULL, txq->tx_ring_size * TSO_HEADER_SIZE,
- &txq->tso_hdrs_dma, GFP_KERNEL);
- if (!txq->tso_hdrs) {
- kfree(rxq);
- kfree(txq);
- dma_free_coherent(NULL, bd_size, cbd_base, bd_dma);
return -ENOMEM;
}
@@ -2635,12 +2751,35 @@ static int fec_enet_init(struct net_device *ndev)
fec_set_mac_address(ndev, NULL);
/* Set receive and transmit descriptor base. */
- rxq->rx_bd_base = cbd_base;
- if (fep->bufdesc_ex)
- txq->tx_bd_base = (struct bufdesc *)
- (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
- else
- txq->tx_bd_base = cbd_base + rxq->rx_ring_size;
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ rxq = fep->rx_queue[i];
+ rxq->index = i;
+ rxq->rx_bd_base = (struct bufdesc *)cbd_base;
+ rxq->bd_dma = bd_dma;
+ if (fep->bufdesc_ex) {
+ bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
+ cbd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
+ } else {
+ bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
+ cbd_base += rxq->rx_ring_size;
+ }
+ }
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+ txq->index = i;
+ txq->tx_bd_base = (struct bufdesc *)cbd_base;
+ txq->bd_dma = bd_dma;
+ if (fep->bufdesc_ex) {
+ bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
+ cbd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
+ } else {
+ bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
+ cbd_base += txq->tx_ring_size;
+ }
+ }
/* The FEC Ethernet specific entries in the device structure */
--
1.9.1
next prev parent reply other threads:[~2014-09-09 20:15 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-09-09 20:15 [Patch v2 net-next 00/12] net: fec: imx6sx multiqueue support Frank Li
2014-09-09 20:15 ` [Patch v2 net-next 01/12] net:fec: add enet refrence clock for i.MX 6SX chip Frank Li
2014-09-09 20:15 ` [Patch v2 net-next 02/12] net:fec: add enet AVB feature macro define for imx6sx Frank Li
2014-09-09 20:15 ` [Patch v2 net-next 03/12] net: fec: change data structure to support multiqueue Frank Li
2014-09-09 20:15 ` [Patch v2 net-next 04/12] net: fec: parser max queue number from dt file Frank Li
2014-09-09 20:15 ` Frank Li [this message]
2014-09-09 20:15 ` [Patch v2 net-next 06/12] net:fec: Disable enet-avb MAC instead of reset MAC Frank Li
2014-09-09 20:15 ` [Patch v2 net-next 07/12] net: fec: add enet-avb IP support Frank Li
2014-09-09 20:15 ` [Patch v2 net-next 08/12] net:fec: Add fsl,imx6sx-fec compatible strings Frank Li
2014-09-09 20:15 ` [Patch v2 net-next 09/12] net: fec: change FEC alignment according to i.mx6 sx requirement Frank Li
2014-09-10 12:51 ` Fabio Estevam
[not found] ` <CAOMZO5AwOWr8uWCvt3XzM8CJideu+1LBY6QUQ772QjLjCYxL1w-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2014-09-10 13:22 ` Zhi Li
[not found] ` <CAHrpEqSf8j7NUZDAORYwN4cbd2MBA9bcKa9mzNGg4JJiKsJKvA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2014-09-10 13:27 ` Fabio Estevam
[not found] ` <1410293743-27874-1-git-send-email-Frank.Li-KZfg59tc24xl57MIdRCFDg@public.gmane.org>
2014-09-09 20:15 ` [Patch v2 net-next 10/12] net: fec: init complete variable in early to avoid kernel dump Frank Li
2014-09-09 20:15 ` [Patch v2 net-next 11/12] ARM: Documentation: Update fec dts binding doc Frank Li
2014-09-09 20:15 ` [Patch v2 net-next 12/12] ARM: dts: imx6sx: add multi-queue support enet Frank Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1410293743-27874-6-git-send-email-Frank.Li@freescale.com \
--to=frank.li@freescale.com \
--cc=b38611@freescale.com \
--cc=davem@davemloft.net \
--cc=devicetree@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=lznuaa@gmail.com \
--cc=netdev@vger.kernel.org \
--cc=shawn.guo@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).