qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v1] softmmu/physmem: fix memory leak in dirty_memory_extend()
@ 2024-08-27  8:37 David Hildenbrand
  2024-08-27 16:16 ` Peter Maydell
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: David Hildenbrand @ 2024-08-27  8:37 UTC (permalink / raw)
  To: qemu-devel
  Cc: David Hildenbrand, Peter Maydell, qemu-stable, Stefan Hajnoczi,
	Paolo Bonzini, Peter Xu, Philippe Mathieu-Daudé

As reported by Peter, we might be leaking memory when removing the
highest RAMBlock (in the weird ram_addr_t space), and adding a new one.

We will fail to realize that we already allocated bitmaps for more
dirty memory blocks, and effectively discard the pointers to them.

Fix it by getting rid of last_ram_page() and simply storing the number
of dirty memory blocks that have been allocated. We'll store the number
of blocks along with the actual pointer to keep it simple.

Looks like this leak was introduced as we switched from using a single
bitmap_zero_extend() to allocating multiple bitmaps:
bitmap_zero_extend() relies on g_renew() which should have taken care of
this.

Resolves: https://lkml.kernel.org/r/CAFEAcA-k7a+VObGAfCFNygQNfCKL=AfX6A4kScq=VSSK0peqPg@mail.gmail.com
Reported-by: Peter Maydell <peter.maydell@linaro.org>
Fixes: 5b82b703b69a ("memory: RCU ram_list.dirty_memory[] for safe RAM hotplug")
Cc: qemu-stable@nongnu.org
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: "Philippe Mathieu-Daudé" <philmd@linaro.org>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
 include/exec/ramlist.h |  1 +
 system/physmem.c       | 44 ++++++++++++++----------------------------
 2 files changed, 16 insertions(+), 29 deletions(-)

diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h
index 2ad2a81acc..f2a965f293 100644
--- a/include/exec/ramlist.h
+++ b/include/exec/ramlist.h
@@ -41,6 +41,7 @@ typedef struct RAMBlockNotifier RAMBlockNotifier;
 #define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
 typedef struct {
     struct rcu_head rcu;
+    unsigned int num_blocks;
     unsigned long *blocks[];
 } DirtyMemoryBlocks;
 
diff --git a/system/physmem.c b/system/physmem.c
index 94600a33ec..fa48ff8333 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -1534,18 +1534,6 @@ static ram_addr_t find_ram_offset(ram_addr_t size)
     return offset;
 }
 
-static unsigned long last_ram_page(void)
-{
-    RAMBlock *block;
-    ram_addr_t last = 0;
-
-    RCU_READ_LOCK_GUARD();
-    RAMBLOCK_FOREACH(block) {
-        last = MAX(last, block->offset + block->max_length);
-    }
-    return last >> TARGET_PAGE_BITS;
-}
-
 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
 {
     int ret;
@@ -1799,28 +1787,31 @@ void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
 }
 
 /* Called with ram_list.mutex held */
-static void dirty_memory_extend(ram_addr_t old_ram_size,
-                                ram_addr_t new_ram_size)
+static void dirty_memory_extend(ram_addr_t new_ram_size)
 {
-    ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
-                                             DIRTY_MEMORY_BLOCK_SIZE);
     ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
                                              DIRTY_MEMORY_BLOCK_SIZE);
     int i;
 
-    /* Only need to extend if block count increased */
-    if (new_num_blocks <= old_num_blocks) {
-        return;
-    }
-
     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
         DirtyMemoryBlocks *old_blocks;
         DirtyMemoryBlocks *new_blocks;
+        ram_addr_t old_num_blocks = 0;
         int j;
 
         old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]);
+        if (old_blocks) {
+            old_num_blocks = old_blocks->num_blocks;
+
+            /* Only need to extend if block count increased */
+            if (new_num_blocks <= old_num_blocks) {
+                return;
+            }
+        }
+
         new_blocks = g_malloc(sizeof(*new_blocks) +
                               sizeof(new_blocks->blocks[0]) * new_num_blocks);
+        new_blocks->num_blocks = new_num_blocks;
 
         if (old_num_blocks) {
             memcpy(new_blocks->blocks, old_blocks->blocks,
@@ -1846,11 +1837,9 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
     RAMBlock *block;
     RAMBlock *last_block = NULL;
     bool free_on_error = false;
-    ram_addr_t old_ram_size, new_ram_size;
+    ram_addr_t ram_size;
     Error *err = NULL;
 
-    old_ram_size = last_ram_page();
-
     qemu_mutex_lock_ramlist();
     new_block->offset = find_ram_offset(new_block->max_length);
 
@@ -1901,11 +1890,8 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
         }
     }
 
-    new_ram_size = MAX(old_ram_size,
-              (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
-    if (new_ram_size > old_ram_size) {
-        dirty_memory_extend(old_ram_size, new_ram_size);
-    }
+    ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS;
+    dirty_memory_extend(ram_size);
     /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
      * QLIST (which has an RCU-friendly variant) does not have insertion at
      * tail, so save the last element in last_block.
-- 
2.46.0



^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2024-08-28  7:21 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-08-27  8:37 [PATCH v1] softmmu/physmem: fix memory leak in dirty_memory_extend() David Hildenbrand
2024-08-27 16:16 ` Peter Maydell
2024-08-27 16:52 ` Stefan Hajnoczi
2024-08-27 17:23   ` David Hildenbrand
2024-08-27 17:28     ` Stefan Hajnoczi
2024-08-27 17:50       ` Peter Xu
2024-08-27 17:55         ` David Hildenbrand
2024-08-27 17:57 ` Peter Xu
2024-08-27 18:00   ` David Hildenbrand
2024-08-27 18:41     ` Peter Xu
2024-08-28  7:20       ` David Hildenbrand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).