From: "Eugenio Pérez" <eperezma@redhat.com>
To: "Michael S . Tsirkin " <mst@redhat.com>
Cc: "Yongji Xie" <xieyongji@bytedance.com>,
linux-kernel@vger.kernel.org,
"Maxime Coquelin" <mcoqueli@redhat.com>,
"Eugenio Pérez" <eperezma@redhat.com>,
"Stefano Garzarella" <sgarzare@redhat.com>,
"Xuan Zhuo" <xuanzhuo@linux.alibaba.com>,
"Cindy Lu" <lulu@redhat.com>,
virtualization@lists.linux.dev,
"Laurent Vivier" <lvivier@redhat.com>,
jasowang@redhat.com
Subject: [PATCH v5 1/6] vduse: make domain_lock an rwlock
Date: Fri, 26 Sep 2025 12:14:27 +0200 [thread overview]
Message-ID: <20250926101432.2251301-2-eperezma@redhat.com> (raw)
In-Reply-To: <20250926101432.2251301-1-eperezma@redhat.com>
It will be used in a few more scenarios read-only so make it more
scalable.
Suggested-by: Xie Yongji <xieyongji@bytedance.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Xie Yongji <xieyongji@bytedance.com>
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
v2: New in v2
---
drivers/vdpa/vdpa_user/vduse_dev.c | 41 +++++++++++++++---------------
1 file changed, 21 insertions(+), 20 deletions(-)
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index e7bced0b5542..2b6a8958ffe0 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -14,6 +14,7 @@
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/eventfd.h>
+#include <linux/rwlock.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/dma-map-ops.h>
@@ -117,7 +118,7 @@ struct vduse_dev {
struct vduse_umem *umem;
struct mutex mem_lock;
unsigned int bounce_size;
- struct mutex domain_lock;
+ rwlock_t domain_lock;
};
struct vduse_dev_msg {
@@ -1176,9 +1177,9 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
if (entry.start > entry.last)
break;
- mutex_lock(&dev->domain_lock);
+ read_lock(&dev->domain_lock);
if (!dev->domain) {
- mutex_unlock(&dev->domain_lock);
+ read_unlock(&dev->domain_lock);
break;
}
spin_lock(&dev->domain->iotlb_lock);
@@ -1193,7 +1194,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
entry.perm = map->perm;
}
spin_unlock(&dev->domain->iotlb_lock);
- mutex_unlock(&dev->domain_lock);
+ read_unlock(&dev->domain_lock);
ret = -EINVAL;
if (!f)
break;
@@ -1346,10 +1347,10 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
sizeof(umem.reserved)))
break;
- mutex_lock(&dev->domain_lock);
+ write_lock(&dev->domain_lock);
ret = vduse_dev_reg_umem(dev, umem.iova,
umem.uaddr, umem.size);
- mutex_unlock(&dev->domain_lock);
+ write_unlock(&dev->domain_lock);
break;
}
case VDUSE_IOTLB_DEREG_UMEM: {
@@ -1363,10 +1364,10 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
if (!is_mem_zero((const char *)umem.reserved,
sizeof(umem.reserved)))
break;
- mutex_lock(&dev->domain_lock);
+ write_lock(&dev->domain_lock);
ret = vduse_dev_dereg_umem(dev, umem.iova,
umem.size);
- mutex_unlock(&dev->domain_lock);
+ write_unlock(&dev->domain_lock);
break;
}
case VDUSE_IOTLB_GET_INFO: {
@@ -1385,9 +1386,9 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
sizeof(info.reserved)))
break;
- mutex_lock(&dev->domain_lock);
+ read_lock(&dev->domain_lock);
if (!dev->domain) {
- mutex_unlock(&dev->domain_lock);
+ read_unlock(&dev->domain_lock);
break;
}
spin_lock(&dev->domain->iotlb_lock);
@@ -1402,7 +1403,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
info.capability |= VDUSE_IOVA_CAP_UMEM;
}
spin_unlock(&dev->domain->iotlb_lock);
- mutex_unlock(&dev->domain_lock);
+ read_unlock(&dev->domain_lock);
if (!map)
break;
@@ -1425,10 +1426,10 @@ static int vduse_dev_release(struct inode *inode, struct file *file)
{
struct vduse_dev *dev = file->private_data;
- mutex_lock(&dev->domain_lock);
+ write_lock(&dev->domain_lock);
if (dev->domain)
vduse_dev_dereg_umem(dev, 0, dev->domain->bounce_size);
- mutex_unlock(&dev->domain_lock);
+ write_unlock(&dev->domain_lock);
spin_lock(&dev->msg_lock);
/* Make sure the inflight messages can processed after reconncection */
list_splice_init(&dev->recv_list, &dev->send_list);
@@ -1647,7 +1648,7 @@ static struct vduse_dev *vduse_dev_create(void)
mutex_init(&dev->lock);
mutex_init(&dev->mem_lock);
- mutex_init(&dev->domain_lock);
+ rwlock_init(&dev->domain_lock);
spin_lock_init(&dev->msg_lock);
INIT_LIST_HEAD(&dev->send_list);
INIT_LIST_HEAD(&dev->recv_list);
@@ -1805,7 +1806,7 @@ static ssize_t bounce_size_store(struct device *device,
int ret;
ret = -EPERM;
- mutex_lock(&dev->domain_lock);
+ write_lock(&dev->domain_lock);
if (dev->domain)
goto unlock;
@@ -1821,7 +1822,7 @@ static ssize_t bounce_size_store(struct device *device,
dev->bounce_size = bounce_size & PAGE_MASK;
ret = count;
unlock:
- mutex_unlock(&dev->domain_lock);
+ write_unlock(&dev->domain_lock);
return ret;
}
@@ -2045,11 +2046,11 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
if (ret)
return ret;
- mutex_lock(&dev->domain_lock);
+ write_lock(&dev->domain_lock);
if (!dev->domain)
dev->domain = vduse_domain_create(VDUSE_IOVA_SIZE - 1,
dev->bounce_size);
- mutex_unlock(&dev->domain_lock);
+ write_unlock(&dev->domain_lock);
if (!dev->domain) {
put_device(&dev->vdev->vdpa.dev);
return -ENOMEM;
@@ -2059,10 +2060,10 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
if (ret) {
put_device(&dev->vdev->vdpa.dev);
- mutex_lock(&dev->domain_lock);
+ write_lock(&dev->domain_lock);
vduse_domain_destroy(dev->domain);
dev->domain = NULL;
- mutex_unlock(&dev->domain_lock);
+ write_unlock(&dev->domain_lock);
return ret;
}
--
2.51.0
next prev parent reply other threads:[~2025-09-26 10:14 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-26 10:14 [PATCH v5 0/6] Add multiple address spaces support to VDUSE Eugenio Pérez
2025-09-26 10:14 ` Eugenio Pérez [this message]
2025-09-26 10:14 ` [PATCH v5 2/6] vduse: add v1 API definition Eugenio Pérez
2025-09-26 10:14 ` [PATCH v5 3/6] vduse: add vq group support Eugenio Pérez
2025-09-26 14:59 ` Michael S. Tsirkin
2025-09-29 5:54 ` Eugenio Perez Martin
2025-09-26 15:26 ` Michael S. Tsirkin
2025-09-29 5:55 ` Eugenio Perez Martin
2025-09-29 7:55 ` Michael S. Tsirkin
2025-09-29 8:52 ` Eugenio Perez Martin
2025-09-29 9:29 ` Michael S. Tsirkin
2025-09-29 9:31 ` Eugenio Perez Martin
2025-09-26 10:14 ` [PATCH v5 4/6] vduse: return internal vq group struct as map token Eugenio Pérez
2025-09-26 10:14 ` [PATCH v5 5/6] vduse: add vq group asid support Eugenio Pérez
2025-09-26 15:22 ` Michael S. Tsirkin
2025-09-29 11:29 ` Eugenio Perez Martin
2025-09-26 15:25 ` Michael S. Tsirkin
2025-09-29 11:28 ` Eugenio Perez Martin
2025-09-27 20:00 ` Michael S. Tsirkin
2025-09-29 11:27 ` Eugenio Perez Martin
2025-09-26 10:14 ` [PATCH v5 6/6] vduse: bump version number Eugenio Pérez
2025-09-26 14:37 ` [PATCH v5 0/6] Add multiple address spaces support to VDUSE Michael S. Tsirkin
2025-09-29 5:41 ` Eugenio Perez Martin
2025-09-29 7:56 ` Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250926101432.2251301-2-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=jasowang@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mcoqueli@redhat.com \
--cc=mst@redhat.com \
--cc=sgarzare@redhat.com \
--cc=virtualization@lists.linux.dev \
--cc=xieyongji@bytedance.com \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).