netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christian Marangi <ansuelsmth@gmail.com>
To: Mattias Forsblad <mattias.forsblad@gmail.com>
Cc: netdev@vger.kernel.org, Andrew Lunn <andrew@lunn.ch>,
	Vivien Didelot <vivien.didelot@gmail.com>,
	Florian Fainelli <f.fainelli@gmail.com>,
	Vladimir Oltean <olteanv@gmail.com>,
	"David S . Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	linux@armlinux.org.uk
Subject: Re: [PATCH net-next v14 7/7] net: dsa: qca8k: Use new convenience functions
Date: Mon, 19 Sep 2022 13:23:50 +0200	[thread overview]
Message-ID: <6328514a.170a0220.dd15f.2706@mx.google.com> (raw)
In-Reply-To: <20220919110847.744712-8-mattias.forsblad@gmail.com>

On Mon, Sep 19, 2022 at 01:08:47PM +0200, Mattias Forsblad wrote:
> Use the new common convenience functions for sending and
> waiting for frames.
> 
> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
> Signed-off-by: Mattias Forsblad <mattias.forsblad@gmail.com>

Tested-by: Christian Marangi <ansuelsmth@gmail.com>

> ---
>  drivers/net/dsa/qca/qca8k-8xxx.c | 68 +++++++++++---------------------
>  1 file changed, 24 insertions(+), 44 deletions(-)
> 
> diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
> index c181346388a4..a4ec0d0e608d 100644
> --- a/drivers/net/dsa/qca/qca8k-8xxx.c
> +++ b/drivers/net/dsa/qca/qca8k-8xxx.c
> @@ -160,7 +160,7 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
>  			       QCA_HDR_MGMT_DATA2_LEN);
>  	}
>  
> -	complete(&mgmt_eth_data->rw_done);
> +	dsa_switch_inband_complete(ds, &mgmt_eth_data->rw_done);
>  }
>  
>  static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
> @@ -228,6 +228,7 @@ static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
>  static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  {
>  	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
> +	struct dsa_switch *ds = priv->ds;
>  	struct sk_buff *skb;
>  	bool ack;
>  	int ret;
> @@ -248,17 +249,13 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  
>  	skb->dev = priv->mgmt_master;
>  
> -	reinit_completion(&mgmt_eth_data->rw_done);
> -
>  	/* Increment seq_num and set it in the mdio pkt */
>  	mgmt_eth_data->seq++;
>  	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
>  	mgmt_eth_data->ack = false;
>  
> -	dev_queue_xmit(skb);
> -
> -	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
> +	ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done,
> +				   QCA8K_ETHERNET_TIMEOUT);
>  
>  	*val = mgmt_eth_data->data[0];
>  	if (len > QCA_HDR_MGMT_DATA1_LEN)
> @@ -280,6 +277,7 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  {
>  	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
> +	struct dsa_switch *ds = priv->ds;
>  	struct sk_buff *skb;
>  	bool ack;
>  	int ret;
> @@ -300,17 +298,13 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  
>  	skb->dev = priv->mgmt_master;
>  
> -	reinit_completion(&mgmt_eth_data->rw_done);
> -
>  	/* Increment seq_num and set it in the mdio pkt */
>  	mgmt_eth_data->seq++;
>  	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
>  	mgmt_eth_data->ack = false;
>  
> -	dev_queue_xmit(skb);
> -
> -	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
> +	ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done,
> +				   QCA8K_ETHERNET_TIMEOUT);
>  
>  	ack = mgmt_eth_data->ack;
>  
> @@ -441,24 +435,22 @@ static struct regmap_config qca8k_regmap_config = {
>  };
>  
>  static int
> -qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
> +qca8k_phy_eth_busy_wait(struct qca8k_priv *priv,
>  			struct sk_buff *read_skb, u32 *val)
>  {
> +	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
>  	struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
> +	struct dsa_switch *ds = priv->ds;
>  	bool ack;
>  	int ret;
>  
> -	reinit_completion(&mgmt_eth_data->rw_done);
> -
>  	/* Increment seq_num and set it in the copy pkt */
>  	mgmt_eth_data->seq++;
>  	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
>  	mgmt_eth_data->ack = false;
>  
> -	dev_queue_xmit(skb);
> -
> -	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -					  QCA8K_ETHERNET_TIMEOUT);
> +	ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done,
> +				   QCA8K_ETHERNET_TIMEOUT);
>  
>  	ack = mgmt_eth_data->ack;
>  
> @@ -480,6 +472,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  	struct sk_buff *write_skb, *clear_skb, *read_skb;
>  	struct qca8k_mgmt_eth_data *mgmt_eth_data;
>  	u32 write_val, clear_val = 0, val;
> +	struct dsa_switch *ds = priv->ds;
>  	struct net_device *mgmt_master;
>  	int ret, ret1;
>  	bool ack;
> @@ -540,17 +533,13 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  	clear_skb->dev = mgmt_master;
>  	write_skb->dev = mgmt_master;
>  
> -	reinit_completion(&mgmt_eth_data->rw_done);
> -
>  	/* Increment seq_num and set it in the write pkt */
>  	mgmt_eth_data->seq++;
>  	qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
>  	mgmt_eth_data->ack = false;
>  
> -	dev_queue_xmit(write_skb);
> -
> -	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -					  QCA8K_ETHERNET_TIMEOUT);
> +	ret = dsa_switch_inband_tx(ds, write_skb, &mgmt_eth_data->rw_done,
> +				   QCA8K_ETHERNET_TIMEOUT);
>  
>  	ack = mgmt_eth_data->ack;
>  
> @@ -569,7 +558,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  	ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
>  				!(val & QCA8K_MDIO_MASTER_BUSY), 0,
>  				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
> -				mgmt_eth_data, read_skb, &val);
> +				priv, read_skb, &val);
>  
>  	if (ret < 0 && ret1 < 0) {
>  		ret = ret1;
> @@ -577,17 +566,14 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  	}
>  
>  	if (read) {
> -		reinit_completion(&mgmt_eth_data->rw_done);
> -
>  		/* Increment seq_num and set it in the read pkt */
>  		mgmt_eth_data->seq++;
>  		qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
>  		mgmt_eth_data->ack = false;
>  
> -		dev_queue_xmit(read_skb);
> -
> -		ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -						  QCA8K_ETHERNET_TIMEOUT);
> +		ret = dsa_switch_inband_tx(ds, read_skb,
> +					   &mgmt_eth_data->rw_done,
> +					   QCA8K_ETHERNET_TIMEOUT);
>  
>  		ack = mgmt_eth_data->ack;
>  
> @@ -606,17 +592,13 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  		kfree_skb(read_skb);
>  	}
>  exit:
> -	reinit_completion(&mgmt_eth_data->rw_done);
> -
>  	/* Increment seq_num and set it in the clear pkt */
>  	mgmt_eth_data->seq++;
>  	qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
>  	mgmt_eth_data->ack = false;
>  
> -	dev_queue_xmit(clear_skb);
> -
> -	wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -				    QCA8K_ETHERNET_TIMEOUT);
> +	dsa_switch_inband_tx(ds, clear_skb, &mgmt_eth_data->rw_done,
> +			     QCA8K_ETHERNET_TIMEOUT);
>  
>  	mutex_unlock(&mgmt_eth_data->mutex);
>  
> @@ -1528,7 +1510,7 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
>  exit:
>  	/* Complete on receiving all the mib packet */
>  	if (refcount_dec_and_test(&mib_eth_data->port_parsed))
> -		complete(&mib_eth_data->rw_done);
> +		dsa_switch_inband_complete(ds, &mib_eth_data->rw_done);
>  }
>  
>  static int
> @@ -1543,8 +1525,6 @@ qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
>  
>  	mutex_lock(&mib_eth_data->mutex);
>  
> -	reinit_completion(&mib_eth_data->rw_done);
> -
>  	mib_eth_data->req_port = dp->index;
>  	mib_eth_data->data = data;
>  	refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
> @@ -1562,8 +1542,8 @@ qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
>  	if (ret)
>  		goto exit;
>  
> -	ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
> -
> +	ret = dsa_switch_inband_tx(ds, NULL, &mib_eth_data->rw_done,
> +				   QCA8K_ETHERNET_TIMEOUT);
>  exit:
>  	mutex_unlock(&mib_eth_data->mutex);
>  
> -- 
> 2.25.1
> 

-- 
	Ansuel

      reply	other threads:[~2022-09-19 11:24 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-19 11:08 [PATCH net-next v14 0/7] net: dsa: qca8k, mv88e6xxx: rmon: Add RMU support Mattias Forsblad
2022-09-19 11:08 ` [PATCH net-next v14 1/7] net: dsa: mv88e6xxx: Add RMU enable for select switches Mattias Forsblad
2022-09-19 11:08 ` [PATCH net-next v14 2/7] net: dsa: Add convenience functions for frame handling Mattias Forsblad
2022-09-19 22:14   ` Vladimir Oltean
2022-09-19 22:22     ` Andrew Lunn
2022-09-19 22:18   ` [PATCH rfc v0 0/9] DSA: Move parts of inband signalling into the DSA Andrew Lunn
2022-09-19 22:18     ` [PATCH rfc v0 1/9] net: dsa: qca8k: Fix inconsistent use of jiffies vs milliseconds Andrew Lunn
2022-09-19 22:18     ` [PATCH rfc v0 2/9] net: dsa: qca8k: Move completion into DSA core Andrew Lunn
2022-09-20 14:43       ` Vladimir Oltean
2022-09-21  0:19         ` Andrew Lunn
2022-09-21  0:22           ` Vladimir Oltean
2022-09-19 22:18     ` [PATCH rfc v0 3/9] net: dsa: qca8K: Move queuing for request frame into the core Andrew Lunn
2022-09-19 22:18     ` [PATCH rfc v0 4/9] net: dsa: qca8k: dsa_inband_request: More normal return values Andrew Lunn
2022-09-19 23:02       ` Vladimir Oltean
2022-09-19 23:21         ` Andrew Lunn
2022-09-19 23:16       ` Vladimir Oltean
2022-09-19 22:18     ` [PATCH rfc v0 5/9] net: dsa: qca8k: Move request sequence number handling into core Andrew Lunn
2022-09-19 22:18     ` [PATCH rfc v0 6/9] net: dsa: qca8k: Refactor sequence number mismatch to use error code Andrew Lunn
2022-09-19 23:30       ` Vladimir Oltean
2022-09-20  0:05         ` Andrew Lunn
2022-09-19 22:18     ` [PATCH rfc v0 7/9] net: dsa: qca8k: Pass error code from reply decoder to requester Andrew Lunn
2022-09-19 22:18     ` [PATCH rfc v0 8/9] net: dsa: qca8k: Pass response buffer via dsa_rmu_request Andrew Lunn
2022-09-20  0:27       ` Vladimir Oltean
2022-09-20 12:33         ` Andrew Lunn
2022-09-19 22:18     ` [PATCH rfc v0 9/9] net: dsa: qca8k: Move inband mutex into DSA core Andrew Lunn
2022-09-20  3:19       ` Christian Marangi
2022-09-20 15:48         ` Andrew Lunn
2022-09-19 11:08 ` [PATCH net-next v14 3/7] net: dsa: Introduce dsa tagger data operation Mattias Forsblad
2022-09-19 22:00   ` Vladimir Oltean
2022-09-20  6:41     ` Mattias Forsblad
2022-09-20 10:31       ` Vladimir Oltean
2022-09-20 11:10         ` Mattias Forsblad
2022-09-19 11:08 ` [PATCH net-next v14 4/7] net: dsa: mv88e6xxxx: Add RMU functionality Mattias Forsblad
2022-09-19 22:39   ` Vladimir Oltean
2022-09-20 11:53     ` Mattias Forsblad
2022-09-20 12:22       ` Vladimir Oltean
2022-09-19 11:08 ` [PATCH net-next v14 5/7] net: dsa: mv88e6xxx: rmu: Add functionality to get RMON Mattias Forsblad
2022-09-19 22:49   ` Vladimir Oltean
2022-09-20 12:26     ` Mattias Forsblad
2022-09-20 13:10       ` Vladimir Oltean
2022-09-20 13:40         ` Mattias Forsblad
2022-09-20 21:04         ` Andrew Lunn
2022-09-21  5:35           ` Mattias Forsblad
2022-09-21 15:50             ` Andrew Lunn
2022-09-22 11:48           ` Vladimir Oltean
2022-09-22 12:45             ` Andrew Lunn
2022-09-22 13:04               ` Vladimir Oltean
2022-09-22 17:27                 ` Andrew Lunn
2022-09-19 11:08 ` [PATCH net-next v14 6/7] net: dsa: mv88e6xxx: rmon: Use RMU for reading RMON data Mattias Forsblad
2022-09-19 11:08 ` [PATCH net-next v14 7/7] net: dsa: qca8k: Use new convenience functions Mattias Forsblad
2022-09-19 11:23   ` Christian Marangi [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6328514a.170a0220.dd15f.2706@mx.google.com \
    --to=ansuelsmth@gmail.com \
    --cc=andrew@lunn.ch \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=f.fainelli@gmail.com \
    --cc=kuba@kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=mattias.forsblad@gmail.com \
    --cc=netdev@vger.kernel.org \
    --cc=olteanv@gmail.com \
    --cc=pabeni@redhat.com \
    --cc=vivien.didelot@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).