From: "Philippe Mathieu-Daudé" <philmd@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Peter Maydell" <peter.maydell@linaro.org>,
"Jagannathan Raman" <jag.raman@oracle.com>,
qemu-ppc@nongnu.org, "Ilya Leoshkevich" <iii@linux.ibm.com>,
"Thomas Huth" <thuth@redhat.com>,
"Jason Herne" <jjherne@linux.ibm.com>,
"Peter Xu" <peterx@redhat.com>,
"Cédric Le Goater" <clg@redhat.com>,
kvm@vger.kernel.org,
"Christian Borntraeger" <borntraeger@linux.ibm.com>,
"Halil Pasic" <pasic@linux.ibm.com>,
"Matthew Rosato" <mjrosato@linux.ibm.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Elena Ufimtseva" <elena.ufimtseva@oracle.com>,
"Richard Henderson" <richard.henderson@linaro.org>,
"Harsh Prateek Bora" <harshpb@linux.ibm.com>,
"Fabiano Rosas" <farosas@suse.de>,
"Eric Farman" <farman@linux.ibm.com>,
qemu-arm@nongnu.org, qemu-s390x@nongnu.org,
"David Hildenbrand" <david@redhat.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Nicholas Piggin" <npiggin@gmail.com>
Subject: [PATCH v2 09/18] system/physmem: Un-inline cpu_physical_memory_range_includes_clean()
Date: Wed, 1 Oct 2025 19:54:38 +0200 [thread overview]
Message-ID: <20251001175448.18933-10-philmd@linaro.org> (raw)
In-Reply-To: <20251001175448.18933-1-philmd@linaro.org>
Avoid maintaining large functions in header, rely on the
linker to optimize at linking time.
cpu_physical_memory_all_dirty() doesn't involve any CPU,
remove the 'cpu_' prefix.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
---
include/system/ram_addr.h | 62 ++-------------------------------------
system/physmem.c | 60 +++++++++++++++++++++++++++++++++++++
2 files changed, 63 insertions(+), 59 deletions(-)
diff --git a/include/system/ram_addr.h b/include/system/ram_addr.h
index cdf25c315be..2dcca260b2b 100644
--- a/include/system/ram_addr.h
+++ b/include/system/ram_addr.h
@@ -142,69 +142,13 @@ static inline void qemu_ram_block_writeback(RAMBlock *block)
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
-static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
- ram_addr_t length,
- unsigned client)
-{
- DirtyMemoryBlocks *blocks;
- unsigned long end, page;
- unsigned long idx, offset, base;
- bool dirty = true;
-
- assert(client < DIRTY_MEMORY_NUM);
-
- end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
- page = start >> TARGET_PAGE_BITS;
-
- RCU_READ_LOCK_GUARD();
-
- blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
-
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- base = page - offset;
- while (page < end) {
- unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
- unsigned long num = next - base;
- unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
- if (found < num) {
- dirty = false;
- break;
- }
-
- page = next;
- idx++;
- offset = 0;
- base += DIRTY_MEMORY_BLOCK_SIZE;
- }
-
- return dirty;
-}
-
bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client);
bool cpu_physical_memory_is_clean(ram_addr_t addr);
-static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
- ram_addr_t length,
- uint8_t mask)
-{
- uint8_t ret = 0;
-
- if (mask & (1 << DIRTY_MEMORY_VGA) &&
- !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
- ret |= (1 << DIRTY_MEMORY_VGA);
- }
- if (mask & (1 << DIRTY_MEMORY_CODE) &&
- !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
- ret |= (1 << DIRTY_MEMORY_CODE);
- }
- if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
- !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
- ret |= (1 << DIRTY_MEMORY_MIGRATION);
- }
- return ret;
-}
+uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
+ ram_addr_t length,
+ uint8_t mask);
static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
unsigned client)
diff --git a/system/physmem.c b/system/physmem.c
index fb6a7378ff7..2667f289044 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -954,6 +954,66 @@ bool cpu_physical_memory_is_clean(ram_addr_t addr)
return !(vga && code && migration);
}
+static bool physical_memory_all_dirty(ram_addr_t start, ram_addr_t length,
+ unsigned client)
+{
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ unsigned long idx, offset, base;
+ bool dirty = true;
+
+ assert(client < DIRTY_MEMORY_NUM);
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+
+ RCU_READ_LOCK_GUARD();
+
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long num = next - base;
+ unsigned long found = find_next_zero_bit(blocks->blocks[idx],
+ num, offset);
+ if (found < num) {
+ dirty = false;
+ break;
+ }
+
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
+ }
+
+ return dirty;
+}
+
+uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
+ ram_addr_t length,
+ uint8_t mask)
+{
+ uint8_t ret = 0;
+
+ if (mask & (1 << DIRTY_MEMORY_VGA) &&
+ !physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
+ ret |= (1 << DIRTY_MEMORY_VGA);
+ }
+ if (mask & (1 << DIRTY_MEMORY_CODE) &&
+ !physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
+ ret |= (1 << DIRTY_MEMORY_CODE);
+ }
+ if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
+ !physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
+ ret |= (1 << DIRTY_MEMORY_MIGRATION);
+ }
+ return ret;
+}
+
/* Note: start and end must be within the same ram block. */
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
--
2.51.0
next prev parent reply other threads:[~2025-10-01 17:58 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-01 17:54 [PATCH v2 00/18] system/physmem: Extract API out of 'system/ram_addr.h' header Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 01/18] system/ram_addr: Remove unnecessary 'exec/cpu-common.h' header Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 02/18] accel/kvm: Include missing 'exec/target_page.h' header Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 03/18] hw/s390x/s390-stattrib: " Philippe Mathieu-Daudé
2025-10-02 14:18 ` Eric Farman
2025-10-01 17:54 ` [PATCH v2 04/18] hw/vfio/listener: " Philippe Mathieu-Daudé
2025-10-02 8:31 ` Cédric Le Goater
2025-10-01 17:54 ` [PATCH v2 05/18] target/arm/tcg/mte: " Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 06/18] hw: Remove unnecessary 'system/ram_addr.h' header Philippe Mathieu-Daudé
2025-10-01 19:32 ` Jag Raman
2025-10-02 8:37 ` Cédric Le Goater
2025-10-02 15:59 ` Harsh Prateek Bora
2025-10-02 17:38 ` Philippe Mathieu-Daudé
2025-10-02 16:45 ` Eric Farman
2025-10-01 17:54 ` [PATCH v2 07/18] system/physmem: Un-inline cpu_physical_memory_get_dirty_flag() Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 08/18] system/physmem: Un-inline cpu_physical_memory_is_clean() Philippe Mathieu-Daudé
2025-10-01 17:54 ` Philippe Mathieu-Daudé [this message]
2025-10-01 17:54 ` [PATCH v2 10/18] system/physmem: Un-inline cpu_physical_memory_set_dirty_flag() Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 11/18] system/physmem: Un-inline cpu_physical_memory_set_dirty_range() Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 12/18] system/physmem: Remove _WIN32 #ifdef'ry Philippe Mathieu-Daudé
2025-10-01 21:00 ` Richard Henderson
2025-10-01 17:54 ` [PATCH v2 13/18] system/physmem: Un-inline cpu_physical_memory_set_dirty_lebitmap() Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 14/18] system/physmem: Un-inline cpu_physical_memory_dirty_bits_cleared() Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 15/18] system/physmem: Reduce cpu_physical_memory_clear_dirty_range() scope Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 16/18] system/physmem: Reduce cpu_physical_memory_sync_dirty_bitmap() scope Philippe Mathieu-Daudé
2025-10-01 17:54 ` [PATCH v2 17/18] system/physmem: Drop 'cpu_' prefix in Physical Memory API Philippe Mathieu-Daudé
2025-10-02 8:39 ` Cédric Le Goater
2025-10-01 17:54 ` [PATCH v2 18/18] system/physmem: Extract API out of 'system/ram_addr.h' header Philippe Mathieu-Daudé
2025-10-02 8:31 ` Cédric Le Goater
2025-10-02 8:40 ` Cédric Le Goater
2025-10-03 20:54 ` [PATCH v2 00/18] " Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251001175448.18933-10-philmd@linaro.org \
--to=philmd@linaro.org \
--cc=alex.williamson@redhat.com \
--cc=borntraeger@linux.ibm.com \
--cc=clg@redhat.com \
--cc=david@redhat.com \
--cc=elena.ufimtseva@oracle.com \
--cc=farman@linux.ibm.com \
--cc=farosas@suse.de \
--cc=harshpb@linux.ibm.com \
--cc=iii@linux.ibm.com \
--cc=jag.raman@oracle.com \
--cc=jjherne@linux.ibm.com \
--cc=kvm@vger.kernel.org \
--cc=mjrosato@linux.ibm.com \
--cc=mst@redhat.com \
--cc=npiggin@gmail.com \
--cc=pasic@linux.ibm.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=peterx@redhat.com \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
--cc=qemu-s390x@nongnu.org \
--cc=richard.henderson@linaro.org \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).