linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
To: Andrew Lunn <andrew@lunn.ch>, Heiner Kallweit <hkallweit1@gmail.com>
Cc: Alexandre Torgue <alexandre.torgue@foss.st.com>,
	"David S. Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>,
	Florian Fainelli <f.fainelli@gmail.com>,
	Jakub Kicinski <kuba@kernel.org>,
	Jiawen Wu <jiawenwu@trustnetic.com>,
	Jose Abreu <joabreu@synopsys.com>,
	Jose Abreu <Jose.Abreu@synopsys.com>,
	linux-arm-kernel@lists.infradead.org,
	linux-stm32@st-md-mailman.stormreply.com,
	Maxime Coquelin <mcoquelin.stm32@gmail.com>,
	Mengyuan Lou <mengyuanlou@net-swift.com>,
	netdev@vger.kernel.org, Paolo Abeni <pabeni@redhat.com>,
	Vladimir Oltean <olteanv@gmail.com>
Subject: [PATCH net-next 11/13] net: pcs: xpcs: use dev_*() to print messages
Date: Fri, 04 Oct 2024 11:21:32 +0100	[thread overview]
Message-ID: <E1swfRE-006Dfy-Hy@rmk-PC.armlinux.org.uk> (raw)
In-Reply-To: <Zv_BTd8UF7XbJF_e@shell.armlinux.org.uk>

Use the dev_*() family of functions to print all messages from the XPCS
driver so we know which instance issues the messages.

Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
---
 drivers/net/pcs/pcs-xpcs.c | 44 +++++++++++++++++++-------------------
 1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 06a495135418..d6e63f091995 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -356,7 +356,8 @@ static void xpcs_config_usxgmii(struct dw_xpcs *xpcs, int speed)
 	return;
 
 out:
-	pr_err("%s: XPCS access returned %pe\n", __func__, ERR_PTR(ret));
+	dev_err(&xpcs->mdiodev->dev, "%s: XPCS access returned %pe\n",
+		__func__, ERR_PTR(ret));
 }
 
 static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
@@ -1070,32 +1071,27 @@ static void xpcs_get_state(struct phylink_pcs *pcs,
 		break;
 	case DW_AN_C73:
 		ret = xpcs_get_state_c73(xpcs, state, compat);
-		if (ret) {
-			pr_err("xpcs_get_state_c73 returned %pe\n",
-			       ERR_PTR(ret));
-			return;
-		}
+		if (ret)
+			dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n",
+				"xpcs_get_state_c73", ERR_PTR(ret));
 		break;
 	case DW_AN_C37_SGMII:
 		ret = xpcs_get_state_c37_sgmii(xpcs, state);
-		if (ret) {
-			pr_err("xpcs_get_state_c37_sgmii returned %pe\n",
-			       ERR_PTR(ret));
-		}
+		if (ret)
+			dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n",
+				"xpcs_get_state_c37_sgmii", ERR_PTR(ret));
 		break;
 	case DW_AN_C37_1000BASEX:
 		ret = xpcs_get_state_c37_1000basex(xpcs, state);
-		if (ret) {
-			pr_err("xpcs_get_state_c37_1000basex returned %pe\n",
-			       ERR_PTR(ret));
-		}
+		if (ret)
+			dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n",
+				"xpcs_get_state_c37_1000basex", ERR_PTR(ret));
 		break;
 	case DW_2500BASEX:
 		ret = xpcs_get_state_2500basex(xpcs, state);
-		if (ret) {
-			pr_err("xpcs_get_state_2500basex returned %pe\n",
-			       ERR_PTR(ret));
-		}
+		if (ret)
+			dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n",
+				"xpcs_get_state_2500basex", ERR_PTR(ret));
 		break;
 	default:
 		return;
@@ -1113,7 +1109,8 @@ static void xpcs_link_up_sgmii(struct dw_xpcs *xpcs, unsigned int neg_mode,
 	val = mii_bmcr_encode_fixed(speed, duplex);
 	ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val);
 	if (ret)
-		pr_err("%s: xpcs_write returned %pe\n", __func__, ERR_PTR(ret));
+		dev_err(&xpcs->mdiodev->dev, "%s: xpcs_write returned %pe\n",
+			__func__, ERR_PTR(ret));
 }
 
 static void xpcs_link_up_1000basex(struct dw_xpcs *xpcs, unsigned int neg_mode,
@@ -1131,18 +1128,21 @@ static void xpcs_link_up_1000basex(struct dw_xpcs *xpcs, unsigned int neg_mode,
 	case SPEED_100:
 	case SPEED_10:
 	default:
-		pr_err("%s: speed = %d\n", __func__, speed);
+		dev_err(&xpcs->mdiodev->dev, "%s: speed = %d\n",
+			__func__, speed);
 		return;
 	}
 
 	if (duplex == DUPLEX_FULL)
 		val |= BMCR_FULLDPLX;
 	else
-		pr_err("%s: half duplex not supported\n", __func__);
+		dev_err(&xpcs->mdiodev->dev, "%s: half duplex not supported\n",
+			__func__);
 
 	ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val);
 	if (ret)
-		pr_err("%s: xpcs_write returned %pe\n", __func__, ERR_PTR(ret));
+		dev_err(&xpcs->mdiodev->dev, "%s: xpcs_write returned %pe\n",
+			__func__, ERR_PTR(ret));
 }
 
 static void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
-- 
2.30.2



  parent reply	other threads:[~2024-10-04 10:36 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-04 10:19 [PATCH net-next 00/13] net: pcs: xpcs: cleanups batch 2 Russell King (Oracle)
2024-10-04 10:20 ` [PATCH net-next 01/13] net: pcs: xpcs: remove dw_xpcs_compat enum Russell King (Oracle)
2024-10-04 10:20 ` [PATCH net-next 02/13] net: pcs: xpcs: don't use array for interface Russell King (Oracle)
2024-10-04 10:20 ` [PATCH net-next 03/13] net: pcs: xpcs: pass xpcs instead of xpcs->id to xpcs_find_compat() Russell King (Oracle)
2024-10-04 10:20 ` [PATCH net-next 04/13] net: pcs: xpcs: provide a helper to get the phylink pcs given xpcs Russell King (Oracle)
2024-10-04 10:21 ` [PATCH net-next 05/13] net: pcs: xpcs: move definition of struct dw_xpcs to private header Russell King (Oracle)
2024-10-04 10:21 ` [PATCH net-next 06/13] net: pcs: xpcs: rename xpcs_get_id() Russell King (Oracle)
2024-10-04 10:21 ` [PATCH net-next 07/13] net: pcs: xpcs: move searching ID list out of line Russell King (Oracle)
2024-10-04 10:21 ` [PATCH net-next 08/13] net: pcs: xpcs: use FIELD_PREP() and FIELD_GET() Russell King (Oracle)
2024-10-08  9:01   ` Paolo Abeni
2024-10-04 10:21 ` [PATCH net-next 09/13] net: pcs: xpcs: add _modify() accessors Russell King (Oracle)
2024-10-04 10:21 ` [PATCH net-next 10/13] net: pcs: xpcs: convert to use read_poll_timeout() Russell King (Oracle)
2024-10-04 10:21 ` Russell King (Oracle) [this message]
2024-10-04 10:21 ` [PATCH net-next 12/13] net: pcs: xpcs: correctly place DW_VR_MII_DIG_CTRL1_2G5_EN Russell King (Oracle)
2024-10-04 10:21 ` [PATCH net-next 13/13] net: pcs: xpcs: move Wangxun VR_XS_PCS_DIG_CTRL1 configuration Russell King (Oracle)
2024-10-04 10:25 ` [PATCH net-next 00/13] net: pcs: xpcs: cleanups batch 2 Russell King (Oracle)
2024-10-04 11:19 ` Vladimir Oltean
2024-10-04 17:07   ` Russell King (Oracle)
2024-10-04 23:40 ` Serge Semin
     [not found]   ` <rxv7tlvbl57yq62obsqtgr7r4llnb2ejjlaeausfxpdkxgxpyo@kqrgq2hdodts>
2024-10-09  9:27     ` Russell King (Oracle)
2024-10-10 20:31       ` Serge Semin
2024-10-09 11:20 ` patchwork-bot+netdevbpf
2024-10-09 11:47 ` Vladimir Oltean

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=E1swfRE-006Dfy-Hy@rmk-PC.armlinux.org.uk \
    --to=rmk+kernel@armlinux.org.uk \
    --cc=Jose.Abreu@synopsys.com \
    --cc=alexandre.torgue@foss.st.com \
    --cc=andrew@lunn.ch \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=f.fainelli@gmail.com \
    --cc=hkallweit1@gmail.com \
    --cc=jiawenwu@trustnetic.com \
    --cc=joabreu@synopsys.com \
    --cc=kuba@kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-stm32@st-md-mailman.stormreply.com \
    --cc=mcoquelin.stm32@gmail.com \
    --cc=mengyuanlou@net-swift.com \
    --cc=netdev@vger.kernel.org \
    --cc=olteanv@gmail.com \
    --cc=pabeni@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).