From: Sungho Bae <baver.bae@gmail.com>
To: mst@redhat.com, jasowang@redhat.com
Cc: xuanzhuo@linux.alibaba.com, eperezma@redhat.com,
virtualization@lists.linux.dev, linux-kernel@vger.kernel.org,
Sungho Bae <baver.bae@lge.com>
Subject: [RFC PATCH v4 4/4] virtio-mmio: wire up noirq system sleep PM callbacks
Date: Fri, 24 Apr 2026 02:40:39 +0900 [thread overview]
Message-ID: <20260423174039.276-5-baver.bae@gmail.com> (raw)
In-Reply-To: <20260423174039.276-1-baver.bae@gmail.com>
From: Sungho Bae <baver.bae@lge.com>
Add noirq system-sleep PM support to the virtio-mmio transport.
This change wires noirq freeze/restore callbacks into virtio-mmio and
hooks queue reset/reactivation into the transport config ops so virtqueues
can be reinitialized and reused across suspend/resume.
For legacy (v1) devices, keep GUEST_PAGE_SIZE programming aligned with the
noirq restore path while avoiding duplicate programming in normal restore.
This enables virtio-mmio based devices to participate safely in the noirq
PM phase, which is required for early-restore users.
Signed-off-by: Sungho Bae <baver.bae@lge.com>
---
drivers/virtio/virtio_mmio.c | 134 ++++++++++++++++++++++++-----------
1 file changed, 94 insertions(+), 40 deletions(-)
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 595c2274fbb5..1cd262f9f8b6 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -336,6 +336,75 @@ static void vm_del_vqs(struct virtio_device *vdev)
free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
}
+static int vm_active_vq(struct virtio_device *vdev, struct virtqueue *vq)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ int q_num = virtqueue_get_vring_size(vq);
+
+ writel(q_num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
+ if (vm_dev->version == 1) {
+ u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
+
+ /*
+ * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
+ * that doesn't fit in 32bit, fail the setup rather than
+ * pretending to be successful.
+ */
+ if (q_pfn >> 32) {
+ dev_err(&vdev->dev,
+ "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
+ 0x1ULL << (32 + PAGE_SHIFT - 30));
+ return -E2BIG;
+ }
+
+ writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
+ writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
+ } else {
+ u64 addr;
+
+ addr = virtqueue_get_desc_addr(vq);
+ writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
+ writel((u32)(addr >> 32),
+ vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
+
+ addr = virtqueue_get_avail_addr(vq);
+ writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
+ writel((u32)(addr >> 32),
+ vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
+
+ addr = virtqueue_get_used_addr(vq);
+ writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
+ writel((u32)(addr >> 32),
+ vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
+
+ writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
+ }
+
+ return 0;
+}
+
+static int vm_reset_vqs(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ struct virtqueue *vq;
+ int err;
+
+ virtio_device_for_each_vq(vdev, vq) {
+ /* Re-initialize vring state */
+ virtqueue_reinit_vring(vq);
+
+ /* Select the queue we're interested in */
+ writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
+
+ /* Activate the queue */
+ err = vm_active_vq(vdev, vq);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static void vm_synchronize_cbs(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
@@ -388,45 +457,9 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
vq->num_max = num;
/* Activate the queue */
- writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
- if (vm_dev->version == 1) {
- u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
-
- /*
- * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
- * that doesn't fit in 32bit, fail the setup rather than
- * pretending to be successful.
- */
- if (q_pfn >> 32) {
- dev_err(&vdev->dev,
- "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
- 0x1ULL << (32 + PAGE_SHIFT - 30));
- err = -E2BIG;
- goto error_bad_pfn;
- }
-
- writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
- writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
- } else {
- u64 addr;
-
- addr = virtqueue_get_desc_addr(vq);
- writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
- writel((u32)(addr >> 32),
- vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
-
- addr = virtqueue_get_avail_addr(vq);
- writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
- writel((u32)(addr >> 32),
- vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
-
- addr = virtqueue_get_used_addr(vq);
- writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
- writel((u32)(addr >> 32),
- vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
-
- writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
- }
+ err = vm_active_vq(vdev, vq);
+ if (err < 0)
+ goto error_bad_pfn;
return vq;
@@ -528,11 +561,13 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
.reset = vm_reset,
.find_vqs = vm_find_vqs,
.del_vqs = vm_del_vqs,
+ .reset_vqs = vm_reset_vqs,
.get_features = vm_get_features,
.finalize_features = vm_finalize_features,
.bus_name = vm_bus_name,
.get_shm_region = vm_get_shm_region,
.synchronize_cbs = vm_synchronize_cbs,
+ .noirq_safe = true,
};
#ifdef CONFIG_PM_SLEEP
@@ -547,14 +582,33 @@ static int virtio_mmio_restore(struct device *dev)
{
struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
- if (vm_dev->version == 1)
+ if (vm_dev->version == 1 && !vm_dev->vdev.noirq_restore_done)
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
return virtio_device_restore(&vm_dev->vdev);
}
+static int virtio_mmio_freeze_noirq(struct device *dev)
+{
+ struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
+
+ return virtio_device_freeze_noirq(&vm_dev->vdev);
+}
+
+static int virtio_mmio_restore_noirq(struct device *dev)
+{
+ struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
+
+ if (vm_dev->version == 1)
+ writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+
+ return virtio_device_restore_noirq(&vm_dev->vdev);
+}
+
static const struct dev_pm_ops virtio_mmio_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze_noirq,
+ virtio_mmio_restore_noirq)
};
#endif
--
2.43.0
prev parent reply other threads:[~2026-04-23 17:41 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-23 17:40 [RFC PATCH v4 0/4] virtio: add noirq system sleep PM callbacks for virtio-mmio Sungho Bae
2026-04-23 17:40 ` [RFC PATCH v4 1/4] virtio: separate PM restore and reset_done paths Sungho Bae
2026-04-23 17:40 ` [RFC PATCH v4 2/4] virtio_ring: export virtqueue_reinit_vring() for noirq restore Sungho Bae
2026-04-23 17:40 ` [RFC PATCH v4 3/4] virtio: add noirq system sleep PM infrastructure Sungho Bae
2026-04-23 17:40 ` Sungho Bae [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260423174039.276-5-baver.bae@gmail.com \
--to=baver.bae@gmail.com \
--cc=baver.bae@lge.com \
--cc=eperezma@redhat.com \
--cc=jasowang@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mst@redhat.com \
--cc=virtualization@lists.linux.dev \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox