public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] virtio: console: fix use-after-free after failed restore
@ 2026-04-16 23:37 physicalmtea
  0 siblings, 0 replies; only message in thread
From: physicalmtea @ 2026-04-16 23:37 UTC (permalink / raw)
  To: amit; +Cc: arnd, gregkh, virtualization, linux-kernel, JiaJia

From: JiaJia <physicalmtea@gmail.com>

virtcons_freeze() tears down the virtqueues through remove_vqs(), but
remove_vqs() only deletes the virtqueues and frees portdev->in_vqs and
portdev->out_vqs. It leaves port->in_vq, port->out_vq, portdev->c_ivq,
and portdev->c_ovq pointing at the old queues.

Those stale pointers survive a failed restore:

  virtcons_freeze()
    -> remove_vqs()
       -> del_vqs()
          -> vring_del_virtqueue()
             -> kfree(vq)

  virtcons_restore()
    -> init_vqs()
       -> virtio_find_vqs()

If init_vqs() fails, virtcons_restore() returns without rebinding the
per-port queue pointers. Later get_chars(), __send_control_msg(),
control_work_handler(), get_inbuf(), reclaim_consumed_buffers(),
__send_to_port(), fill_readbuf(), and splice_write can still reach those
cached pointers and trigger a use-after-free or Oops.

A fault-injection-assisted hibernation run that forces init_vqs() to
fail reproduces this as a KASAN slab-use-after-free in
virtqueue_disable_cb() from virtcons_freeze() on the subsequent PM
path.

Clear the cached virtqueue pointers when queues are deleted, guard the
direct queue users that can still run after a failed restore, and make
queue-less open/write paths fail immediately instead of sleeping or
succeeding against a broken port state.

Signed-off-by: JiaJia <physicalmtea@gmail.com>
---
Runtime evidence from a fault-injection-assisted hibernation run that
forces init_vqs() to fail with -ENOMEM:

  virtio_console virtio1: virtcons_restore: init_vqs failed: -12
  virtio-pci 0000:00:02.0: PM: failed to thaw: error -12

  KASAN report: slab-use-after-free in virtqueue_disable_cb+0x20/0xb0
  Read of size 4 at addr fff000000417c848 by task virtio-console-/1
  The buggy address belongs to the object at fff000000417c800
    which belongs to the cache kmalloc-256 of size 256
  The buggy address is located 72 bytes inside of
    freed 256-byte region [fff000000417c800, fff000000417c900)

  Call trace:
    virtqueue_disable_cb+0x20/0xb0
    virtcons_freeze+0x1f8/0x208
    virtio_device_freeze+0x98/0xd0
    virtio_pci_freeze+0x28/0x60
    pci_pm_freeze+0x94/0x1a8
    dpm_run_callback+0x118/0x4e0
    device_suspend+0x210/0xa40
    dpm_suspend+0x258/0x628
    dpm_suspend_start+0x78/0x88
    hibernation_restore+0x28/0x188
    load_image_and_restore+0xd0/0x118
    hibernate+0x2cc/0x430

 drivers/char/virtio_console.c | 57 ++++++++++++++++++++++++++++-------
 1 file changed, 46 insertions(+), 11 deletions(-)

diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 9a33217c6..05887bdb5 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -464,6 +464,8 @@ static struct port_buffer *get_inbuf(struct port *port)
 
 	if (port->inbuf)
 		return port->inbuf;
+	if (!port->in_vq)
+		return NULL;
 
 	buf = virtqueue_get_buf(port->in_vq, &len);
 	if (buf) {
@@ -547,6 +549,8 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
 		return 0;
 
 	vq = portdev->c_ovq;
+	if (!vq)
+		return -ENODEV;
 
 	spin_lock(&portdev->c_ovq_lock);
 
@@ -583,8 +587,8 @@ static void reclaim_consumed_buffers(struct port *port)
 	struct port_buffer *buf;
 	unsigned int len;
 
-	if (!port->portdev) {
-		/* Device has been unplugged.  vqs are already gone. */
+	if (!port->portdev || !port->out_vq) {
+		/* Device has been unplugged or the queues have been torn down. */
 		return;
 	}
 	while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
@@ -603,6 +607,8 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
 	unsigned int len;
 
 	out_vq = port->out_vq;
+	if (!out_vq)
+		return -ENODEV;
 
 	spin_lock_irqsave(&port->outvq_lock, flags);
 
@@ -684,7 +690,9 @@ static ssize_t fill_readbuf(struct port *port, u8 __user *out_buf,
 		spin_lock_irqsave(&port->inbuf_lock, flags);
 		port->inbuf = NULL;
 
-		if (add_inbuf(port->in_vq, buf) < 0)
+		if (!port->in_vq)
+			free_buf(buf, false);
+		else if (add_inbuf(port->in_vq, buf) < 0)
 			dev_warn(port->dev, "failed add_buf\n");
 
 		spin_unlock_irqrestore(&port->inbuf_lock, flags);
@@ -777,6 +785,9 @@ static int wait_port_writable(struct port *port, bool nonblock)
 {
 	int ret;
 
+	if (!port->out_vq)
+		return -ENODEV;
+
 	if (will_write_block(port)) {
 		if (nonblock)
 			return -EAGAIN;
@@ -786,8 +797,8 @@ static int wait_port_writable(struct port *port, bool nonblock)
 		if (ret < 0)
 			return ret;
 	}
-	/* Port got hot-unplugged. */
-	if (!port->guest_connected)
+	/* Port got hot-unplugged or lost its queues. */
+	if (!port->guest_connected || !port->out_vq)
 		return -ENODEV;
 
 	return 0;
@@ -919,6 +930,8 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
 	 * regular pages to dma pages. And alloc_buf and free_buf must
 	 * support allocating and freeing such a list of dma-buffers.
 	 */
+	if (!port->portdev || !port->out_vq)
+		return -ENODEV;
 	if (is_rproc_serial(port->out_vq->vdev))
 		return -EINVAL;
 
@@ -1039,7 +1052,6 @@ static int port_fops_open(struct inode *inode, struct file *filp)
 		ret = -ENXIO;
 		goto out;
 	}
-
 	/* Allow only one process to open a particular port at a time */
 	spin_lock_irq(&port->inbuf_lock);
 	if (port->guest_connected) {
@@ -1047,6 +1059,11 @@ static int port_fops_open(struct inode *inode, struct file *filp)
 		ret = -EBUSY;
 		goto out;
 	}
+	if (!port->in_vq || !port->out_vq) {
+		spin_unlock_irq(&port->inbuf_lock);
+		ret = -ENODEV;
+		goto out;
+	}
 
 	port->guest_connected = true;
 	spin_unlock_irq(&port->inbuf_lock);
@@ -1141,7 +1158,8 @@ static ssize_t get_chars(u32 vtermno, u8 *buf, size_t count)
 		return -EPIPE;
 
 	/* If we don't have an input queue yet, we can't get input. */
-	BUG_ON(!port->in_vq);
+	if (!port->in_vq)
+		return -EPIPE;
 
 	return fill_readbuf(port, (__force u8 __user *)buf, count, false);
 }
@@ -1666,6 +1684,8 @@ static void control_work_handler(struct work_struct *work)
 
 	portdev = container_of(work, struct ports_device, control_work);
 	vq = portdev->c_ivq;
+	if (!vq)
+		return;
 
 	spin_lock(&portdev->c_ivq_lock);
 	while ((buf = virtqueue_get_buf(vq, &len))) {
@@ -1869,7 +1889,11 @@ static int init_vqs(struct ports_device *portdev)
 
 free:
 	kfree(portdev->out_vqs);
+	portdev->out_vqs = NULL;
 	kfree(portdev->in_vqs);
+	portdev->in_vqs = NULL;
+	portdev->c_ivq = NULL;
+	portdev->c_ovq = NULL;
 	kfree(vqs_info);
 	kfree(vqs);
 
@@ -1882,6 +1906,7 @@ static const struct file_operations portdev_fops = {
 
 static void remove_vqs(struct ports_device *portdev)
 {
+	struct port *port;
 	struct virtqueue *vq;
 
 	virtio_device_for_each_vq(portdev->vdev, vq) {
@@ -1892,9 +1917,17 @@ static void remove_vqs(struct ports_device *portdev)
 			free_buf(buf, true);
 		cond_resched();
 	}
+	list_for_each_entry(port, &portdev->ports, list) {
+		port->in_vq = NULL;
+		port->out_vq = NULL;
+	}
+	portdev->c_ivq = NULL;
+	portdev->c_ovq = NULL;
 	portdev->vdev->config->del_vqs(portdev->vdev);
 	kfree(portdev->in_vqs);
+	portdev->in_vqs = NULL;
 	kfree(portdev->out_vqs);
+	portdev->out_vqs = NULL;
 }
 
 static void virtcons_remove(struct virtio_device *vdev)
@@ -2094,7 +2127,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
 
 	virtio_reset_device(vdev);
 
-	if (use_multiport(portdev))
+	if (use_multiport(portdev) && portdev->c_ivq)
 		virtqueue_disable_cb(portdev->c_ivq);
 	cancel_work_sync(&portdev->control_work);
 	cancel_work_sync(&portdev->config_work);
@@ -2102,12 +2135,14 @@ static int virtcons_freeze(struct virtio_device *vdev)
 	 * Once more: if control_work_handler() was running, it would
 	 * enable the cb as the last step.
 	 */
-	if (use_multiport(portdev))
+	if (use_multiport(portdev) && portdev->c_ivq)
 		virtqueue_disable_cb(portdev->c_ivq);
 
 	list_for_each_entry(port, &portdev->ports, list) {
-		virtqueue_disable_cb(port->in_vq);
-		virtqueue_disable_cb(port->out_vq);
+		if (port->in_vq)
+			virtqueue_disable_cb(port->in_vq);
+		if (port->out_vq)
+			virtqueue_disable_cb(port->out_vq);
 		/*
 		 * We'll ask the host later if the new invocation has
 		 * the port opened or closed.
-- 
2.34.1

^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2026-04-16 23:38 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-16 23:37 [PATCH] virtio: console: fix use-after-free after failed restore physicalmtea

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox