qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
To: qemu-arm@nongnu.org,
	"Philippe Mathieu-Daudé " <philmd@linaro.org>,
	qemu-devel@nongnu.org
Cc: "Anthony Perard" <anthony.perard@citrix.com>,
	"Paul Durrant" <paul@xen.org>,
	"David Woodhouse" <dwmw@amazon.co.uk>,
	"Thomas Huth" <thuth@redhat.com>,
	qemu-arm@nongnu.org,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Alex Benné e" <alex.bennee@linaro.org>,
	xen-devel@lists.xenproject.org,
	"Philippe Mathieu-Daudé " <philmd@linaro.org>,
	"Richard Henderson" <richard.henderson@linaro.org>,
	"Eduardo Habkost" <eduardo@habkost.net>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>
Subject: Re: [PATCH-for-9.0 3/9] hw/xen/hvm: Get target page size at runtime
Date: Thu, 07 Mar 2024 13:49:11 +0200	[thread overview]
Message-ID: <9z7qq.40e122bkrid@linaro.org> (raw)
In-Reply-To: <20231114163123.74888-4-philmd@linaro.org>

On Tue, 14 Nov 2023 18:31, Philippe Mathieu-Daudé <philmd@linaro.org> wrote:
>In order to build this file once for all targets, replace:
>
>  TARGET_PAGE_BITS -> qemu_target_page_bits()
>  TARGET_PAGE_SIZE -> qemu_target_page_size()
>  TARGET_PAGE_MASK -> -qemu_target_page_size()
>
>Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
>---
> hw/i386/xen/xen-hvm.c | 62 +++++++++++++++++++++++++++----------------
> 1 file changed, 39 insertions(+), 23 deletions(-)
>
>diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
>index 3b10425986..b64204ea94 100644
>--- a/hw/i386/xen/xen-hvm.c
>+++ b/hw/i386/xen/xen-hvm.c
>@@ -22,6 +22,7 @@
> 
> #include "hw/xen/xen-hvm-common.h"
> #include <xen/hvm/e820.h>
>+#include "exec/target_page.h"
> #include "cpu.h"
> 
> static MemoryRegion ram_640k, ram_lo, ram_hi;
>@@ -247,6 +248,9 @@ static int xen_add_to_physmap(XenIOState *state,
>                               MemoryRegion *mr,
>                               hwaddr offset_within_region)
> {
>+    unsigned target_page_bits = qemu_target_page_bits();
>+    int page_size = qemu_target_page_size();
>+    int page_mask = -page_size;
>     unsigned long nr_pages;
>     int rc = 0;
>     XenPhysmap *physmap = NULL;
>@@ -254,7 +258,7 @@ static int xen_add_to_physmap(XenIOState *state,
>     hwaddr phys_offset = memory_region_get_ram_addr(mr);
>     const char *mr_name;
> 
>-    if (get_physmapping(start_addr, size, TARGET_PAGE_MASK)) {
>+    if (get_physmapping(start_addr, size, page_mask)) {
>         return 0;
>     }
>     if (size <= 0) {
>@@ -294,9 +298,9 @@ go_physmap:
>         return 0;
>     }
> 
>-    pfn = phys_offset >> TARGET_PAGE_BITS;
>-    start_gpfn = start_addr >> TARGET_PAGE_BITS;
>-    nr_pages = size >> TARGET_PAGE_BITS;
>+    pfn = phys_offset >> target_page_bits;
>+    start_gpfn = start_addr >> target_page_bits;
>+    nr_pages = size >> target_page_bits;
>     rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn,
>                                         start_gpfn);
>     if (rc) {
>@@ -310,8 +314,8 @@ go_physmap:
>     }
> 
>     rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid,
>-                                   start_addr >> TARGET_PAGE_BITS,
>-                                   (start_addr + size - 1) >> TARGET_PAGE_BITS,
>+                                   start_addr >> target_page_bits,
>+                                   (start_addr + size - 1) >> target_page_bits,
>                                    XEN_DOMCTL_MEM_CACHEATTR_WB);
>     if (rc) {
>         error_report("pin_memory_cacheattr failed: %s", strerror(errno));
>@@ -323,11 +327,14 @@ static int xen_remove_from_physmap(XenIOState *state,
>                                    hwaddr start_addr,
>                                    ram_addr_t size)
> {
>+    unsigned target_page_bits = qemu_target_page_bits();
>+    int page_size = qemu_target_page_size();
>+    int page_mask = -page_size;
>     int rc = 0;
>     XenPhysmap *physmap = NULL;
>     hwaddr phys_offset = 0;
> 
>-    physmap = get_physmapping(start_addr, size, TARGET_PAGE_MASK);
>+    physmap = get_physmapping(start_addr, size, page_mask);
>     if (physmap == NULL) {
>         return -1;
>     }
>@@ -338,9 +345,9 @@ static int xen_remove_from_physmap(XenIOState *state,
>     DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
>             "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
> 
>-    size >>= TARGET_PAGE_BITS;
>-    start_addr >>= TARGET_PAGE_BITS;
>-    phys_offset >>= TARGET_PAGE_BITS;
>+    size >>= target_page_bits;
>+    start_addr >>= target_page_bits;
>+    phys_offset >>= target_page_bits;
>     rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr,
>                                         phys_offset);
>     if (rc) {
>@@ -369,13 +376,16 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
>                                   hwaddr start_addr,
>                                   ram_addr_t size)
> {
>-    hwaddr npages = size >> TARGET_PAGE_BITS;
>+    unsigned target_page_bits = qemu_target_page_bits();
>+    int page_size = qemu_target_page_size();
>+    int page_mask = -page_size;
>+    hwaddr npages = size >> target_page_bits;
>     const int width = sizeof(unsigned long) * 8;
>     size_t bitmap_size = DIV_ROUND_UP(npages, width);
>     int rc, i, j;
>     const XenPhysmap *physmap = NULL;
> 
>-    physmap = get_physmapping(start_addr, size, TARGET_PAGE_MASK);
>+    physmap = get_physmapping(start_addr, size, page_mask);
>     if (physmap == NULL) {
>         /* not handled */
>         return;
>@@ -389,7 +399,7 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
>         return;
>     }
> 
>-    rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS,
>+    rc = xen_track_dirty_vram(xen_domid, start_addr >> target_page_bits,
>                               npages, dirty_bitmap);
>     if (rc < 0) {
> #ifndef ENODATA
>@@ -410,8 +420,7 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
>             j = ctzl(map);
>             map &= ~(1ul << j);
>             memory_region_set_dirty(framebuffer,
>-                                    (i * width + j) * TARGET_PAGE_SIZE,
>-                                    TARGET_PAGE_SIZE);
>+                                    (i * width + j) * page_size, page_size);
>         };
>     }
> }
>@@ -631,17 +640,21 @@ void xen_register_framebuffer(MemoryRegion *mr)
> 
> void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
> {
>+    unsigned target_page_bits = qemu_target_page_bits();
>+    int page_size = qemu_target_page_size();
>+    int page_mask = -page_size;
>+
>     if (unlikely(xen_in_migration)) {
>         int rc;
>         ram_addr_t start_pfn, nb_pages;
> 
>-        start = xen_phys_offset_to_gaddr(start, length, TARGET_PAGE_MASK);
>+        start = xen_phys_offset_to_gaddr(start, length, page_mask);
> 
>         if (length == 0) {
>-            length = TARGET_PAGE_SIZE;
>+            length = page_size;
>         }
>-        start_pfn = start >> TARGET_PAGE_BITS;
>-        nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
>+        start_pfn = start >> target_page_bits;
>+        nb_pages = ((start + length + page_size - 1) >> target_page_bits)
>             - start_pfn;
>         rc = xen_modified_memory(xen_domid, start_pfn, nb_pages);
>         if (rc) {
>@@ -664,6 +677,9 @@ void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
> void xen_arch_set_memory(XenIOState *state, MemoryRegionSection *section,
>                          bool add)
> {
>+    unsigned target_page_bits = qemu_target_page_bits();
>+    int page_size = qemu_target_page_size();
>+    int page_mask = -page_size;
>     hwaddr start_addr = section->offset_within_address_space;
>     ram_addr_t size = int128_get64(section->size);
>     bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
>@@ -679,8 +695,8 @@ void xen_arch_set_memory(XenIOState *state, MemoryRegionSection *section,
> 
>     trace_xen_client_set_memory(start_addr, size, log_dirty);
> 
>-    start_addr &= TARGET_PAGE_MASK;
>-    size = ROUND_UP(size, TARGET_PAGE_SIZE);
>+    start_addr &= page_mask;
>+    size = ROUND_UP(size, page_size);
> 
>     if (add) {
>         if (!memory_region_is_rom(section->mr)) {
>@@ -689,8 +705,8 @@ void xen_arch_set_memory(XenIOState *state, MemoryRegionSection *section,
>         } else {
>             mem_type = HVMMEM_ram_ro;
>             if (xen_set_mem_type(xen_domid, mem_type,
>-                                 start_addr >> TARGET_PAGE_BITS,
>-                                 size >> TARGET_PAGE_BITS)) {
>+                                 start_addr >> target_page_bits,
>+                                 size >> target_page_bits)) {
>                 DPRINTF("xen_set_mem_type error, addr: "HWADDR_FMT_plx"\n",
>                         start_addr);
>             }
>-- 
>2.41.0
>
>


Reviewed-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>


  reply	other threads:[~2024-03-07 11:55 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-14 16:31 [RFC PATCH-for-9.0 0/9] hw/xen: Have ARM targets use common xen_memory_listener Philippe Mathieu-Daudé
2023-11-14 16:31 ` [PATCH-for-9.0 1/9] hw/xen/hvm: Inline TARGET_PAGE_ALIGN() macro Philippe Mathieu-Daudé
2024-03-07 11:43   ` Manos Pitsidianakis
2023-11-14 16:31 ` [PATCH-for-9.0 2/9] hw/xen/hvm: Propagate page_mask to a pair of functions Philippe Mathieu-Daudé
2024-03-07 11:46   ` Manos Pitsidianakis
2023-11-14 16:31 ` [PATCH-for-9.0 3/9] hw/xen/hvm: Get target page size at runtime Philippe Mathieu-Daudé
2024-03-07 11:49   ` Manos Pitsidianakis [this message]
2023-11-14 16:31 ` [PATCH-for-9.0 4/9] hw/xen/hvm: Expose xen_memory_listener declaration Philippe Mathieu-Daudé
2024-03-07 11:54   ` Manos Pitsidianakis
2023-11-14 16:31 ` [PATCH-for-9.0 5/9] hw/xen/hvm: Expose xen_read_physmap() prototype Philippe Mathieu-Daudé
2024-03-07 11:55   ` Manos Pitsidianakis
2023-11-14 16:31 ` [RFC PATCH-for-9.0 6/9] hw/xen/hvm: Initialize xen_physmap QLIST in xen_read_physmap() Philippe Mathieu-Daudé
2024-03-07 11:58   ` Manos Pitsidianakis
2023-11-14 16:31 ` [PATCH-for-9.0 7/9] hw/xen/hvm: Extract common code to xen-hvm-common.c Philippe Mathieu-Daudé
2024-03-07 12:01   ` Manos Pitsidianakis
2023-11-14 16:31 ` [RFC PATCH-for-9.0 8/9] hw/xen/hvm: Merge xen-hvm-common.c files Philippe Mathieu-Daudé
2024-03-07 12:03   ` Manos Pitsidianakis
2023-11-14 16:31 ` [PATCH-for-9.0 9/9] hw/xen/hvm: Inline xen_arch_set_memory() Philippe Mathieu-Daudé
2024-03-07 12:11   ` Manos Pitsidianakis
2023-12-13 17:00 ` [RFC PATCH-for-9.0 0/9] hw/xen: Have ARM targets use common xen_memory_listener Philippe Mathieu-Daudé
2024-03-06 17:03   ` Philippe Mathieu-Daudé

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9z7qq.40e122bkrid@linaro.org \
    --to=manos.pitsidianakis@linaro.org \
    --cc=alex.bennee@linaro.org \
    --cc=anthony.perard@citrix.com \
    --cc=dwmw@amazon.co.uk \
    --cc=eduardo@habkost.net \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    --cc=sstabellini@kernel.org \
    --cc=thuth@redhat.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).