From: Cornelia Huck <cohuck@redhat.com> To: Peter Maydell <peter.maydell@linaro.org> Cc: qemu-s390x@nongnu.org, qemu-devel@nongnu.org, David Hildenbrand <david@redhat.com>, Cornelia Huck <cohuck@redhat.com> Subject: [Qemu-devel] [PULL 19/19] exec: Introduce qemu_maxrampagesize() and rename qemu_getrampagesize() Date: Thu, 25 Apr 2019 15:21:34 +0200 [thread overview] Message-ID: <20190425132134.2839-20-cohuck@redhat.com> (raw) In-Reply-To: <20190425132134.2839-1-cohuck@redhat.com> From: David Hildenbrand <david@redhat.com> Rename qemu_getrampagesize() to qemu_minrampagesize(). While at it, properly rename find_max_supported_pagesize() to find_min_backend_pagesize(). s390x is actually interested into the maximum ram pagesize, so introduce and use qemu_maxrampagesize(). Add a TODO, indicating that looking at any mapped memory backends is not 100% correct in some cases. Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20190417113143.5551-3-david@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com> --- exec.c | 44 ++++++++++++++++++++++++++++++++++---- hw/ppc/spapr_caps.c | 4 ++-- hw/s390x/s390-virtio-ccw.c | 2 +- hw/vfio/spapr.c | 2 +- include/exec/ram_addr.h | 3 ++- target/ppc/kvm.c | 2 +- 6 files changed, 47 insertions(+), 10 deletions(-) diff --git a/exec.c b/exec.c index 2646207661d4..bb1dff423aaf 100644 --- a/exec.c +++ b/exec.c @@ -1688,7 +1688,7 @@ void ram_block_dump(Monitor *mon) * when we actually open and map them. Iterate over the file * descriptors instead, and use qemu_fd_getpagesize(). */ -static int find_max_supported_pagesize(Object *obj, void *opaque) +static int find_min_backend_pagesize(Object *obj, void *opaque) { long *hpsize_min = opaque; @@ -1704,7 +1704,27 @@ static int find_max_supported_pagesize(Object *obj, void *opaque) return 0; } -long qemu_getrampagesize(void) +static int find_max_backend_pagesize(Object *obj, void *opaque) +{ + long *hpsize_max = opaque; + + if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { + HostMemoryBackend *backend = MEMORY_BACKEND(obj); + long hpsize = host_memory_backend_pagesize(backend); + + if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) { + *hpsize_max = hpsize; + } + } + + return 0; +} + +/* + * TODO: We assume right now that all mapped host memory backends are + * used as RAM, however some might be used for different purposes. + */ +long qemu_minrampagesize(void) { long hpsize = LONG_MAX; long mainrampagesize; @@ -1724,7 +1744,7 @@ long qemu_getrampagesize(void) */ memdev_root = object_resolve_path("/objects", NULL); if (memdev_root) { - object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize); + object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); } if (hpsize == LONG_MAX) { /* No additional memory regions found ==> Report main RAM page size */ @@ -1747,8 +1767,24 @@ long qemu_getrampagesize(void) return hpsize; } + +long qemu_maxrampagesize(void) +{ + long pagesize = qemu_mempath_getpagesize(mem_path); + Object *memdev_root = object_resolve_path("/objects", NULL); + + if (memdev_root) { + object_child_foreach(memdev_root, find_max_backend_pagesize, + &pagesize); + } + return pagesize; +} #else -long qemu_getrampagesize(void) +long qemu_minrampagesize(void) +{ + return getpagesize(); +} +long qemu_maxrampagesize(void) { return getpagesize(); } diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index edc5ed0e0c4e..9b1c10baa6c5 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -347,7 +347,7 @@ static void cap_hpt_maxpagesize_apply(SpaprMachineState *spapr, warn_report("Many guests require at least 64kiB hpt-max-page-size"); } - spapr_check_pagesize(spapr, qemu_getrampagesize(), errp); + spapr_check_pagesize(spapr, qemu_minrampagesize(), errp); } static bool spapr_pagesize_cb(void *opaque, uint32_t seg_pshift, @@ -609,7 +609,7 @@ static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr, uint8_t mps; if (kvmppc_hpt_needs_host_contiguous_pages()) { - mps = ctz64(qemu_getrampagesize()); + mps = ctz64(qemu_minrampagesize()); } else { mps = 34; /* allow everything up to 16GiB, i.e. everything */ } diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 3be567965736..7e256d3d31b8 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -188,7 +188,7 @@ static void s390_memory_init(ram_addr_t mem_size) * Configure the maximum page size. As no memory devices were created * yet, this is the page size of initial memory only. */ - s390_set_max_pagesize(qemu_getrampagesize(), &local_err); + s390_set_max_pagesize(qemu_maxrampagesize(), &local_err); if (local_err) { error_report_err(local_err); exit(EXIT_FAILURE); diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index 57fe758e5441..96c0ad9d9b7e 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -148,7 +148,7 @@ int vfio_spapr_create_window(VFIOContainer *container, uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr); unsigned entries, bits_total, bits_per_level, max_levels; struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) }; - long rampagesize = qemu_getrampagesize(); + long rampagesize = qemu_minrampagesize(); /* * The host might not support the guest supported IOMMU page size, diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 9ecd911c3e78..139ad79390fa 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -73,7 +73,8 @@ static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr, bool ramblock_is_pmem(RAMBlock *rb); -long qemu_getrampagesize(void); +long qemu_minrampagesize(void); +long qemu_maxrampagesize(void); /** * qemu_ram_alloc_from_file, diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 2427c8ee13ae..59d92c42751e 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -2136,7 +2136,7 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift) /* Find the largest hardware supported page size that's less than * or equal to the (logical) backing page size of guest RAM */ kvm_get_smmu_info(&info, &error_fatal); - rampagesize = qemu_getrampagesize(); + rampagesize = qemu_minrampagesize(); best_page_shift = 0; for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) { -- 2.17.2
WARNING: multiple messages have this Message-ID (diff)
From: Cornelia Huck <cohuck@redhat.com> To: Peter Maydell <peter.maydell@linaro.org> Cc: qemu-s390x@nongnu.org, Cornelia Huck <cohuck@redhat.com>, qemu-devel@nongnu.org, David Hildenbrand <david@redhat.com> Subject: [Qemu-devel] [PULL 19/19] exec: Introduce qemu_maxrampagesize() and rename qemu_getrampagesize() Date: Thu, 25 Apr 2019 15:21:34 +0200 [thread overview] Message-ID: <20190425132134.2839-20-cohuck@redhat.com> (raw) Message-ID: <20190425132134.I5WEs5gDYryEm2m6ZzzJZytDu7lhLehu9Hx2YNotyQI@z> (raw) In-Reply-To: <20190425132134.2839-1-cohuck@redhat.com> From: David Hildenbrand <david@redhat.com> Rename qemu_getrampagesize() to qemu_minrampagesize(). While at it, properly rename find_max_supported_pagesize() to find_min_backend_pagesize(). s390x is actually interested into the maximum ram pagesize, so introduce and use qemu_maxrampagesize(). Add a TODO, indicating that looking at any mapped memory backends is not 100% correct in some cases. Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20190417113143.5551-3-david@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com> --- exec.c | 44 ++++++++++++++++++++++++++++++++++---- hw/ppc/spapr_caps.c | 4 ++-- hw/s390x/s390-virtio-ccw.c | 2 +- hw/vfio/spapr.c | 2 +- include/exec/ram_addr.h | 3 ++- target/ppc/kvm.c | 2 +- 6 files changed, 47 insertions(+), 10 deletions(-) diff --git a/exec.c b/exec.c index 2646207661d4..bb1dff423aaf 100644 --- a/exec.c +++ b/exec.c @@ -1688,7 +1688,7 @@ void ram_block_dump(Monitor *mon) * when we actually open and map them. Iterate over the file * descriptors instead, and use qemu_fd_getpagesize(). */ -static int find_max_supported_pagesize(Object *obj, void *opaque) +static int find_min_backend_pagesize(Object *obj, void *opaque) { long *hpsize_min = opaque; @@ -1704,7 +1704,27 @@ static int find_max_supported_pagesize(Object *obj, void *opaque) return 0; } -long qemu_getrampagesize(void) +static int find_max_backend_pagesize(Object *obj, void *opaque) +{ + long *hpsize_max = opaque; + + if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { + HostMemoryBackend *backend = MEMORY_BACKEND(obj); + long hpsize = host_memory_backend_pagesize(backend); + + if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) { + *hpsize_max = hpsize; + } + } + + return 0; +} + +/* + * TODO: We assume right now that all mapped host memory backends are + * used as RAM, however some might be used for different purposes. + */ +long qemu_minrampagesize(void) { long hpsize = LONG_MAX; long mainrampagesize; @@ -1724,7 +1744,7 @@ long qemu_getrampagesize(void) */ memdev_root = object_resolve_path("/objects", NULL); if (memdev_root) { - object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize); + object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); } if (hpsize == LONG_MAX) { /* No additional memory regions found ==> Report main RAM page size */ @@ -1747,8 +1767,24 @@ long qemu_getrampagesize(void) return hpsize; } + +long qemu_maxrampagesize(void) +{ + long pagesize = qemu_mempath_getpagesize(mem_path); + Object *memdev_root = object_resolve_path("/objects", NULL); + + if (memdev_root) { + object_child_foreach(memdev_root, find_max_backend_pagesize, + &pagesize); + } + return pagesize; +} #else -long qemu_getrampagesize(void) +long qemu_minrampagesize(void) +{ + return getpagesize(); +} +long qemu_maxrampagesize(void) { return getpagesize(); } diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index edc5ed0e0c4e..9b1c10baa6c5 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -347,7 +347,7 @@ static void cap_hpt_maxpagesize_apply(SpaprMachineState *spapr, warn_report("Many guests require at least 64kiB hpt-max-page-size"); } - spapr_check_pagesize(spapr, qemu_getrampagesize(), errp); + spapr_check_pagesize(spapr, qemu_minrampagesize(), errp); } static bool spapr_pagesize_cb(void *opaque, uint32_t seg_pshift, @@ -609,7 +609,7 @@ static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr, uint8_t mps; if (kvmppc_hpt_needs_host_contiguous_pages()) { - mps = ctz64(qemu_getrampagesize()); + mps = ctz64(qemu_minrampagesize()); } else { mps = 34; /* allow everything up to 16GiB, i.e. everything */ } diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 3be567965736..7e256d3d31b8 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -188,7 +188,7 @@ static void s390_memory_init(ram_addr_t mem_size) * Configure the maximum page size. As no memory devices were created * yet, this is the page size of initial memory only. */ - s390_set_max_pagesize(qemu_getrampagesize(), &local_err); + s390_set_max_pagesize(qemu_maxrampagesize(), &local_err); if (local_err) { error_report_err(local_err); exit(EXIT_FAILURE); diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index 57fe758e5441..96c0ad9d9b7e 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -148,7 +148,7 @@ int vfio_spapr_create_window(VFIOContainer *container, uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr); unsigned entries, bits_total, bits_per_level, max_levels; struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) }; - long rampagesize = qemu_getrampagesize(); + long rampagesize = qemu_minrampagesize(); /* * The host might not support the guest supported IOMMU page size, diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 9ecd911c3e78..139ad79390fa 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -73,7 +73,8 @@ static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr, bool ramblock_is_pmem(RAMBlock *rb); -long qemu_getrampagesize(void); +long qemu_minrampagesize(void); +long qemu_maxrampagesize(void); /** * qemu_ram_alloc_from_file, diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 2427c8ee13ae..59d92c42751e 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -2136,7 +2136,7 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift) /* Find the largest hardware supported page size that's less than * or equal to the (logical) backing page size of guest RAM */ kvm_get_smmu_info(&info, &error_fatal); - rampagesize = qemu_getrampagesize(); + rampagesize = qemu_minrampagesize(); best_page_shift = 0; for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) { -- 2.17.2
next prev parent reply other threads:[~2019-04-25 13:22 UTC|newest] Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-04-25 13:21 [Qemu-devel] [PULL 00/19] first batch of s390x patches for 4.1 Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 01/19] s390 vfio-ccw: Add bootindex property and IPLB data Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-30 16:54 ` Peter Maydell 2019-04-30 16:54 ` Peter Maydell 2019-04-30 20:30 ` Eduardo Habkost 2019-04-30 20:30 ` Eduardo Habkost 2019-05-02 15:48 ` Cornelia Huck 2019-05-02 15:48 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 02/19] s390-bios: decouple cio setup from virtio Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 03/19] s390-bios: decouple common boot logic " Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 04/19] s390-bios: Clean up cio.h Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 05/19] s390-bios: Decouple channel i/o logic from virtio Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 06/19] s390-bios: Map low core memory Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 07/19] s390-bios: ptr2u32 and u32toptr Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 08/19] s390-bios: Support for running format-0/1 channel programs Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 09/19] s390-bios: cio error handling Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 10/19] s390-bios: Extend find_dev() for non-virtio devices Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 11/19] s390-bios: Factor finding boot device out of virtio code path Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 12/19] s390-bios: Refactor virtio to run channel programs via cio Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 13/19] s390-bios: Use control unit type to determine boot method Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 14/19] s390-bios: Add channel command codes/structs needed for dasd-ipl Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 15/19] s390-bios: Support booting from real dasd device Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 16/19] s390-bios: Use control unit type to find bootable devices Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 17/19] pc-bios/s390: Update firmware images Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` [Qemu-devel] [PULL 18/19] s390x/kvm: Configure page size after memory has actually been initialized Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck 2019-04-25 13:21 ` Cornelia Huck [this message] 2019-04-25 13:21 ` [Qemu-devel] [PULL 19/19] exec: Introduce qemu_maxrampagesize() and rename qemu_getrampagesize() Cornelia Huck 2019-04-26 13:29 ` [Qemu-devel] [PULL 00/19] first batch of s390x patches for 4.1 Peter Maydell 2019-04-26 13:29 ` Peter Maydell
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20190425132134.2839-20-cohuck@redhat.com \ --to=cohuck@redhat.com \ --cc=david@redhat.com \ --cc=peter.maydell@linaro.org \ --cc=qemu-devel@nongnu.org \ --cc=qemu-s390x@nongnu.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).