From: Julian Wiedmann <jwi@linux.ibm.com>
To: David Miller <davem@davemloft.net>, Jakub Kicinski <kuba@kernel.org>
Cc: linux-netdev <netdev@vger.kernel.org>,
linux-s390 <linux-s390@vger.kernel.org>,
Heiko Carstens <hca@linux.ibm.com>,
Karsten Graul <kgraul@linux.ibm.com>,
Julian Wiedmann <jwi@linux.ibm.com>
Subject: [PATCH net-next 3/9] s390/qeth: unify the tracking of active cmds on ccw device
Date: Fri, 11 Jun 2021 09:33:35 +0200 [thread overview]
Message-ID: <20210611073341.1634501-4-jwi@linux.ibm.com> (raw)
In-Reply-To: <20210611073341.1634501-1-jwi@linux.ibm.com>
We have one field to track _whether_ a cmd is active on a ccw device
('irq_pending'), and one to track _which_ cmd it is ('active_cmd').
Get rid of the irq_pending field, by testing active_cmd for NULL.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
drivers/s390/net/qeth_core.h | 14 +++++++-------
drivers/s390/net/qeth_core_main.c | 12 ++++--------
2 files changed, 11 insertions(+), 15 deletions(-)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 4d29801bcf41..ad0e86aa99b2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -614,7 +614,6 @@ struct qeth_channel {
struct ccw_device *ccwdev;
struct qeth_cmd_buffer *active_cmd;
enum qeth_channel_states state;
- atomic_t irq_pending;
};
struct qeth_reply {
@@ -664,11 +663,6 @@ static inline struct ccw1 *__ccw_from_cmd(struct qeth_cmd_buffer *iob)
return (struct ccw1 *)(iob->data + ALIGN(iob->length, 8));
}
-static inline bool qeth_trylock_channel(struct qeth_channel *channel)
-{
- return atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0;
-}
-
/**
* OSA card related definitions
*/
@@ -896,10 +890,16 @@ static inline bool qeth_use_tx_irqs(struct qeth_card *card)
static inline void qeth_unlock_channel(struct qeth_card *card,
struct qeth_channel *channel)
{
- atomic_set(&channel->irq_pending, 0);
+ xchg(&channel->active_cmd, NULL);
wake_up(&card->wait_q);
}
+static inline bool qeth_trylock_channel(struct qeth_channel *channel,
+ struct qeth_cmd_buffer *cmd)
+{
+ return cmpxchg(&channel->active_cmd, NULL, cmd) == NULL;
+}
+
struct qeth_trap_id {
__u16 lparnr;
char vmname[8];
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f22f223a4a6c..83d540f8b527 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1268,7 +1268,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
}
- channel->active_cmd = NULL;
qeth_unlock_channel(card, channel);
rc = qeth_check_irb_error(card, cdev, irb);
@@ -1715,11 +1714,10 @@ static int qeth_stop_channel(struct qeth_channel *channel)
rc = ccw_device_set_offline(cdev);
spin_lock_irq(get_ccwdev_lock(cdev));
- if (channel->active_cmd) {
+ if (channel->active_cmd)
dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
channel->active_cmd);
- channel->active_cmd = NULL;
- }
+
cdev->handler = NULL;
spin_unlock_irq(get_ccwdev_lock(cdev));
@@ -1732,7 +1730,7 @@ static int qeth_start_channel(struct qeth_channel *channel)
int rc;
channel->state = CH_STATE_DOWN;
- atomic_set(&channel->irq_pending, 0);
+ xchg(&channel->active_cmd, NULL);
spin_lock_irq(get_ccwdev_lock(cdev));
cdev->handler = qeth_irq;
@@ -2039,7 +2037,7 @@ static int qeth_send_control_data(struct qeth_card *card,
reply->param = reply_param;
timeout = wait_event_interruptible_timeout(card->wait_q,
- qeth_trylock_channel(channel),
+ qeth_trylock_channel(channel, iob),
timeout);
if (timeout <= 0) {
qeth_put_cmd(iob);
@@ -2059,8 +2057,6 @@ static int qeth_send_control_data(struct qeth_card *card,
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
(addr_t) iob, 0, 0, timeout);
- if (!rc)
- channel->active_cmd = iob;
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
--
2.25.1
next prev parent reply other threads:[~2021-06-11 7:33 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-11 7:33 [PATCH net-next 0/9] s390/qeth: updates 2021-06-11 Julian Wiedmann
2021-06-11 7:33 ` [PATCH net-next 1/9] s390/qeth: count TX completion interrupts Julian Wiedmann
2021-06-11 7:33 ` [PATCH net-next 2/9] s390/qeth: also use TX NAPI for non-IQD devices Julian Wiedmann
2021-06-11 7:33 ` Julian Wiedmann [this message]
2021-06-11 7:33 ` [PATCH net-next 4/9] s390/qeth: use ethtool_sprintf() Julian Wiedmann
2021-06-11 7:33 ` [PATCH net-next 5/9] s390/qeth: consolidate completion of pending TX buffers Julian Wiedmann
2021-06-11 7:33 ` [PATCH net-next 6/9] s390/qeth: remove QAOB's pointer to its TX buffer Julian Wiedmann
2021-06-11 7:33 ` [PATCH net-next 7/9] s390/qeth: remove TX buffer's pointer to its queue Julian Wiedmann
2021-06-11 7:33 ` [PATCH net-next 8/9] s390/qeth: shrink TX buffer struct Julian Wiedmann
2021-06-11 7:33 ` [PATCH net-next 9/9] s390/qeth: Consider dependency on SWITCHDEV module Julian Wiedmann
2021-06-11 20:00 ` [PATCH net-next 0/9] s390/qeth: updates 2021-06-11 patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210611073341.1634501-4-jwi@linux.ibm.com \
--to=jwi@linux.ibm.com \
--cc=davem@davemloft.net \
--cc=hca@linux.ibm.com \
--cc=kgraul@linux.ibm.com \
--cc=kuba@kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox