From: Niklas Schnelle <schnelle@linux.ibm.com>
To: Paolo Abeni <pabeni@redhat.com>,
Alexandra Winter <wintera@linux.ibm.com>,
Wenjia Zhang <wenjia@linux.ibm.com>,
Jan Karcher <jaka@linux.ibm.com>,
Stefan Raspl <raspl@linux.ibm.com>,
"David S. Miller" <davem@davemloft.net>
Cc: linux-s390@vger.kernel.org, netdev@vger.kernel.org,
linux-kernel@vger.kernel.org
Subject: [PATCH net v2 2/3] s390/ism: Fix and simplify add()/remove() callback handling
Date: Fri, 7 Jul 2023 12:56:21 +0200 [thread overview]
Message-ID: <20230707105622.3332261-3-schnelle@linux.ibm.com> (raw)
In-Reply-To: <20230707105622.3332261-1-schnelle@linux.ibm.com>
Previously the clients_lock was protecting the clients array against
concurrent addition/removal of clients but was also accessed from IRQ
context. This meant that it had to be a spinlock and that the add() and
remove() callbacks in which clients need to do allocation and take
mutexes can't be called under the clients_lock. To work around this these
callbacks were moved to workqueues. This not only introduced significant
complexity but is also subtly broken in at least one way.
In ism_dev_init() and ism_dev_exit() clients[i]->tgt_ism is used to
communicate the added/removed ISM device to the work function. While
write access to client[i]->tgt_ism is protected by the clients_lock and
the code waits that there is no pending add/remove work before and after
setting clients[i]->tgt_ism this is not enough. The problem is that the
wait happens based on per ISM device counters. Thus a concurrent
ism_dev_init()/ism_dev_exit() for a different ISM device may overwrite
a clients[i]->tgt_ism between unlocking the clients_lock and the
subsequent wait for the work to finnish.
Thankfully with the clients_lock no longer held in IRQ context it can be
turned into a mutex which can be held during the calls to add()/remove()
completely removing the need for the workqueues and the associated
broken housekeeping including the per ISM device counters and the
clients[i]->tgt_ism.
Fixes: 89e7d2ba61b7 ("net/ism: Add new API for client registration")
Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
---
drivers/s390/net/ism_drv.c | 90 +++++++++++---------------------------
include/linux/ism.h | 6 ---
2 files changed, 26 insertions(+), 70 deletions(-)
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index b664e4a08645..54091b7aea16 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -36,7 +36,7 @@ static const struct smcd_ops ism_ops;
static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
/* a list for fast mapping */
static u8 max_client;
-static DEFINE_SPINLOCK(clients_lock);
+static DEFINE_MUTEX(clients_lock);
struct ism_dev_list {
struct list_head list;
struct mutex mutex; /* protects ism device list */
@@ -59,11 +59,10 @@ static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
int ism_register_client(struct ism_client *client)
{
struct ism_dev *ism;
- unsigned long flags;
int i, rc = -ENOSPC;
mutex_lock(&ism_dev_list.mutex);
- spin_lock_irqsave(&clients_lock, flags);
+ mutex_lock(&clients_lock);
for (i = 0; i < MAX_CLIENTS; ++i) {
if (!clients[i]) {
clients[i] = client;
@@ -74,7 +73,8 @@ int ism_register_client(struct ism_client *client)
break;
}
}
- spin_unlock_irqrestore(&clients_lock, flags);
+ mutex_unlock(&clients_lock);
+
if (i < MAX_CLIENTS) {
/* initialize with all devices that we got so far */
list_for_each_entry(ism, &ism_dev_list.list, list) {
@@ -96,11 +96,11 @@ int ism_unregister_client(struct ism_client *client)
int rc = 0;
mutex_lock(&ism_dev_list.mutex);
- spin_lock_irqsave(&clients_lock, flags);
+ mutex_lock(&clients_lock);
clients[client->id] = NULL;
if (client->id + 1 == max_client)
max_client--;
- spin_unlock_irqrestore(&clients_lock, flags);
+ mutex_unlock(&clients_lock);
list_for_each_entry(ism, &ism_dev_list.list, list) {
spin_lock_irqsave(&ism->lock, flags);
/* Stop forwarding IRQs and events */
@@ -571,21 +571,9 @@ static u64 ism_get_local_gid(struct ism_dev *ism)
return ism->local_gid;
}
-static void ism_dev_add_work_func(struct work_struct *work)
-{
- struct ism_client *client = container_of(work, struct ism_client,
- add_work);
-
- client->add(client->tgt_ism);
- ism_setup_forwarding(client, client->tgt_ism);
- atomic_dec(&client->tgt_ism->add_dev_cnt);
- wake_up(&client->tgt_ism->waitq);
-}
-
static int ism_dev_init(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
- unsigned long flags;
int i, ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
@@ -618,25 +606,16 @@ static int ism_dev_init(struct ism_dev *ism)
/* hardware is V2 capable */
ism_create_system_eid();
- init_waitqueue_head(&ism->waitq);
- atomic_set(&ism->free_clients_cnt, 0);
- atomic_set(&ism->add_dev_cnt, 0);
-
- wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
- spin_lock_irqsave(&clients_lock, flags);
- for (i = 0; i < max_client; ++i)
- if (clients[i]) {
- INIT_WORK(&clients[i]->add_work,
- ism_dev_add_work_func);
- clients[i]->tgt_ism = ism;
- atomic_inc(&ism->add_dev_cnt);
- schedule_work(&clients[i]->add_work);
- }
- spin_unlock_irqrestore(&clients_lock, flags);
-
- wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
-
mutex_lock(&ism_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i]) {
+ clients[i]->add(ism);
+ ism_setup_forwarding(clients[i], ism);
+ }
+ }
+ mutex_unlock(&clients_lock);
+
list_add(&ism->list, &ism_dev_list.list);
mutex_unlock(&ism_dev_list.mutex);
@@ -711,40 +690,24 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
-static void ism_dev_remove_work_func(struct work_struct *work)
-{
- struct ism_client *client = container_of(work, struct ism_client,
- remove_work);
- unsigned long flags;
-
- spin_lock_irqsave(&client->tgt_ism->lock, flags);
- client->tgt_ism->subs[client->id] = NULL;
- spin_unlock_irqrestore(&client->tgt_ism->lock, flags);
- client->remove(client->tgt_ism);
- atomic_dec(&client->tgt_ism->free_clients_cnt);
- wake_up(&client->tgt_ism->waitq);
-}
-
-/* Callers must hold ism_dev_list.mutex */
static void ism_dev_exit(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
unsigned long flags;
int i;
- wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
- spin_lock_irqsave(&clients_lock, flags);
+ spin_lock_irqsave(&ism->lock, flags);
for (i = 0; i < max_client; ++i)
- if (clients[i]) {
- INIT_WORK(&clients[i]->remove_work,
- ism_dev_remove_work_func);
- clients[i]->tgt_ism = ism;
- atomic_inc(&ism->free_clients_cnt);
- schedule_work(&clients[i]->remove_work);
- }
- spin_unlock_irqrestore(&clients_lock, flags);
+ ism->subs[i] = NULL;
+ spin_unlock_irqrestore(&ism->lock, flags);
- wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
+ mutex_lock(&ism_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i])
+ clients[i]->remove(ism);
+ }
+ mutex_unlock(&clients_lock);
if (SYSTEM_EID.serial_number[0] != '0' ||
SYSTEM_EID.type[0] != '0')
@@ -755,15 +718,14 @@ static void ism_dev_exit(struct ism_dev *ism)
kfree(ism->sba_client_arr);
pci_free_irq_vectors(pdev);
list_del_init(&ism->list);
+ mutex_unlock(&ism_dev_list.mutex);
}
static void ism_remove(struct pci_dev *pdev)
{
struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
- mutex_lock(&ism_dev_list.mutex);
ism_dev_exit(ism);
- mutex_unlock(&ism_dev_list.mutex);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
diff --git a/include/linux/ism.h b/include/linux/ism.h
index 5160d47e5ea9..9a4c204df3da 100644
--- a/include/linux/ism.h
+++ b/include/linux/ism.h
@@ -45,9 +45,6 @@ struct ism_dev {
int ieq_idx;
struct ism_client *subs[MAX_CLIENTS];
- atomic_t free_clients_cnt;
- atomic_t add_dev_cnt;
- wait_queue_head_t waitq;
};
struct ism_event {
@@ -69,9 +66,6 @@ struct ism_client {
*/
void (*handle_irq)(struct ism_dev *dev, unsigned int bit, u16 dmbemask);
/* Private area - don't touch! */
- struct work_struct remove_work;
- struct work_struct add_work;
- struct ism_dev *tgt_ism;
u8 id;
};
--
2.39.2
next prev parent reply other threads:[~2023-07-07 10:56 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-07 10:56 [PATCH net v2 0/3] s390/ism: Fixes to client handling Niklas Schnelle
2023-07-07 10:56 ` [PATCH net v2 1/3] s390/ism: Fix locking for forwarding of IRQs and events to clients Niklas Schnelle
2023-07-07 13:37 ` Alexandra Winter
2023-07-07 14:08 ` Niklas Schnelle
2023-07-07 10:56 ` Niklas Schnelle [this message]
2023-07-07 10:56 ` [PATCH net v2 3/3] s390/ism: Do not unregister clients with registered DMBs Niklas Schnelle
2023-07-07 13:46 ` [PATCH net v2 0/3] s390/ism: Fixes to client handling Alexandra Winter
2023-07-07 20:19 ` Wenjia Zhang
2023-07-08 9:13 ` patchwork-bot+netdevbpf
[not found] <20230707104359.3324039-1-schnelle@linux.ibm.com>
2023-07-07 10:43 ` [PATCH net v2 2/3] s390/ism: Fix and simplify add()/remove() callback handling Niklas Schnelle
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230707105622.3332261-3-schnelle@linux.ibm.com \
--to=schnelle@linux.ibm.com \
--cc=davem@davemloft.net \
--cc=jaka@linux.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=raspl@linux.ibm.com \
--cc=wenjia@linux.ibm.com \
--cc=wintera@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).