From: Roger Pau Monne <roger.pau@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Stefano Stabellini <sstabellini@kernel.org>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Julien Grall <julien.grall@arm.com>,
Jan Beulich <jbeulich@suse.com>,
boris.ostrovsky@oracle.com,
Roger Pau Monne <roger.pau@citrix.com>
Subject: [PATCH v2 14/30] xen/mm: add a ceil sufix to current page calculation routine
Date: Tue, 27 Sep 2016 17:57:09 +0200 [thread overview]
Message-ID: <1474991845-27962-15-git-send-email-roger.pau@citrix.com> (raw)
In-Reply-To: <1474991845-27962-1-git-send-email-roger.pau@citrix.com>
... and introduce a floor variant.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Julien Grall <julien.grall@arm.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
xen/arch/arm/domain.c | 2 +-
xen/arch/arm/domain_build.c | 16 +++++-----------
xen/arch/arm/kernel.c | 4 ++--
xen/arch/arm/percpu.c | 3 ++-
xen/arch/x86/domain.c | 2 +-
xen/arch/x86/domain_build.c | 4 ++--
xen/arch/x86/hvm/svm/nestedsvm.c | 8 ++++----
xen/arch/x86/hvm/svm/vmcb.c | 5 +++--
xen/arch/x86/percpu.c | 3 ++-
xen/arch/x86/smpboot.c | 4 ++--
xen/common/kexec.c | 2 +-
xen/common/page_alloc.c | 2 +-
xen/common/tmem_xen.c | 2 +-
xen/common/xmalloc_tlsf.c | 6 +++---
xen/drivers/char/console.c | 6 +++---
xen/drivers/char/serial.c | 2 +-
xen/drivers/passthrough/amd/iommu_init.c | 17 +++++++++--------
xen/drivers/passthrough/pci.c | 2 +-
xen/include/asm-x86/flushtlb.h | 2 +-
xen/include/xen/mm.h | 12 +++++++++++-
20 files changed, 56 insertions(+), 48 deletions(-)
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 20bb2ba..1f6b0a4 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -661,7 +661,7 @@ void arch_domain_destroy(struct domain *d)
free_xenheap_page(d->shared_info);
#ifdef CONFIG_ACPI
free_xenheap_pages(d->arch.efi_acpi_table,
- get_order_from_bytes(d->arch.efi_acpi_len));
+ get_order_from_bytes_ceil(d->arch.efi_acpi_len));
#endif
domain_io_free(d);
}
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 35ab08d..cabe030 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -73,14 +73,8 @@ struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0)
static unsigned int get_11_allocation_size(paddr_t size)
{
- /*
- * get_order_from_bytes returns the order greater than or equal to
- * the given size, but we need less than or equal. Adding one to
- * the size pushes an evenly aligned size into the next order, so
- * we can then unconditionally subtract 1 from the order which is
- * returned.
- */
- return get_order_from_bytes(size + 1) - 1;
+
+ return get_order_from_bytes_floor(size);
}
/*
@@ -238,8 +232,8 @@ fail:
static void allocate_memory(struct domain *d, struct kernel_info *kinfo)
{
const unsigned int min_low_order =
- get_order_from_bytes(min_t(paddr_t, dom0_mem, MB(128)));
- const unsigned int min_order = get_order_from_bytes(MB(4));
+ get_order_from_bytes_ceil(min_t(paddr_t, dom0_mem, MB(128)));
+ const unsigned int min_order = get_order_from_bytes_ceil(MB(4));
struct page_info *pg;
unsigned int order = get_11_allocation_size(kinfo->unassigned_mem);
int i;
@@ -1828,7 +1822,7 @@ static int prepare_acpi(struct domain *d, struct kernel_info *kinfo)
if ( rc != 0 )
return rc;
- order = get_order_from_bytes(d->arch.efi_acpi_len);
+ order = get_order_from_bytes_ceil(d->arch.efi_acpi_len);
d->arch.efi_acpi_table = alloc_xenheap_pages(order, 0);
if ( d->arch.efi_acpi_table == NULL )
{
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index 3f6cce3..0d9986b 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -291,7 +291,7 @@ static __init int kernel_decompress(struct bootmodule *mod)
return -EFAULT;
output_size = output_length(input, size);
- kernel_order_out = get_order_from_bytes(output_size);
+ kernel_order_out = get_order_from_bytes_ceil(output_size);
pages = alloc_domheap_pages(NULL, kernel_order_out, 0);
if ( pages == NULL )
{
@@ -463,7 +463,7 @@ static int kernel_elf_probe(struct kernel_info *info,
memset(&info->elf.elf, 0, sizeof(info->elf.elf));
- info->elf.kernel_order = get_order_from_bytes(size);
+ info->elf.kernel_order = get_order_from_bytes_ceil(size);
info->elf.kernel_img = alloc_xenheap_pages(info->elf.kernel_order, 0);
if ( info->elf.kernel_img == NULL )
panic("Cannot allocate temporary buffer for kernel");
diff --git a/xen/arch/arm/percpu.c b/xen/arch/arm/percpu.c
index e545024..954e92f 100644
--- a/xen/arch/arm/percpu.c
+++ b/xen/arch/arm/percpu.c
@@ -7,7 +7,8 @@
unsigned long __per_cpu_offset[NR_CPUS];
#define INVALID_PERCPU_AREA (-(long)__per_cpu_start)
-#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start))
+#define PERCPU_ORDER \
+ (get_order_from_bytes_ceil(__per_cpu_data_end-__per_cpu_start))
void __init percpu_init_areas(void)
{
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 332e7f0..3d70720 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -236,7 +236,7 @@ static unsigned int __init noinline _domain_struct_bits(void)
struct domain *alloc_domain_struct(void)
{
struct domain *d;
- unsigned int order = get_order_from_bytes(sizeof(*d));
+ unsigned int order = get_order_from_bytes_ceil(sizeof(*d));
#ifdef CONFIG_BIGMEM
const unsigned int bits = 0;
#else
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 78980ae..982bb5f 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -290,7 +290,7 @@ static unsigned long __init compute_dom0_nr_pages(
/* Reserve memory for further dom0 vcpu-struct allocations... */
avail -= (d->max_vcpus - 1UL)
- << get_order_from_bytes(sizeof(struct vcpu));
+ << get_order_from_bytes_ceil(sizeof(struct vcpu));
/* ...and compat_l4's, if needed. */
if ( is_pv_32bit_domain(d) )
avail -= d->max_vcpus - 1;
@@ -1172,7 +1172,7 @@ static int __init construct_dom0_pv(
count = v_end - v_start;
if ( vinitrd_start )
count -= PAGE_ALIGN(initrd_len);
- order = get_order_from_bytes(count);
+ order = get_order_from_bytes_ceil(count);
if ( (1UL << order) + PFN_UP(initrd_len) > nr_pages )
panic("Domain 0 allocation is too small for kernel image");
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index f9b38ab..7b3af39 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -101,13 +101,13 @@ int nsvm_vcpu_initialise(struct vcpu *v)
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
+ msrpm = alloc_xenheap_pages(get_order_from_bytes_ceil(MSRPM_SIZE), 0);
svm->ns_cached_msrpm = msrpm;
if (msrpm == NULL)
goto err;
memset(msrpm, 0x0, MSRPM_SIZE);
- msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
+ msrpm = alloc_xenheap_pages(get_order_from_bytes_ceil(MSRPM_SIZE), 0);
svm->ns_merged_msrpm = msrpm;
if (msrpm == NULL)
goto err;
@@ -141,12 +141,12 @@ void nsvm_vcpu_destroy(struct vcpu *v)
if (svm->ns_cached_msrpm) {
free_xenheap_pages(svm->ns_cached_msrpm,
- get_order_from_bytes(MSRPM_SIZE));
+ get_order_from_bytes_ceil(MSRPM_SIZE));
svm->ns_cached_msrpm = NULL;
}
if (svm->ns_merged_msrpm) {
free_xenheap_pages(svm->ns_merged_msrpm,
- get_order_from_bytes(MSRPM_SIZE));
+ get_order_from_bytes_ceil(MSRPM_SIZE));
svm->ns_merged_msrpm = NULL;
}
hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 9ea014f..c763b75 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -98,7 +98,8 @@ static int construct_vmcb(struct vcpu *v)
CR_INTERCEPT_CR8_WRITE);
/* I/O and MSR permission bitmaps. */
- arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
+ arch_svm->msrpm = alloc_xenheap_pages(
+ get_order_from_bytes_ceil(MSRPM_SIZE), 0);
if ( arch_svm->msrpm == NULL )
return -ENOMEM;
memset(arch_svm->msrpm, 0xff, MSRPM_SIZE);
@@ -268,7 +269,7 @@ void svm_destroy_vmcb(struct vcpu *v)
if ( arch_svm->msrpm != NULL )
{
free_xenheap_pages(
- arch_svm->msrpm, get_order_from_bytes(MSRPM_SIZE));
+ arch_svm->msrpm, get_order_from_bytes_ceil(MSRPM_SIZE));
arch_svm->msrpm = NULL;
}
diff --git a/xen/arch/x86/percpu.c b/xen/arch/x86/percpu.c
index 1c1dad9..d44e7e2 100644
--- a/xen/arch/x86/percpu.c
+++ b/xen/arch/x86/percpu.c
@@ -14,7 +14,8 @@ unsigned long __per_cpu_offset[NR_CPUS];
* context of PV guests.
*/
#define INVALID_PERCPU_AREA (0x8000000000000000L - (long)__per_cpu_start)
-#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start))
+#define PERCPU_ORDER \
+ (get_order_from_bytes_ceil(__per_cpu_data_end-__per_cpu_start))
void __init percpu_init_areas(void)
{
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 3a9dd3e..5597675 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -669,7 +669,7 @@ static void cpu_smpboot_free(unsigned int cpu)
free_xenheap_pages(per_cpu(compat_gdt_table, cpu), order);
- order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t));
+ order = get_order_from_bytes_ceil(IDT_ENTRIES * sizeof(idt_entry_t));
free_xenheap_pages(idt_tables[cpu], order);
idt_tables[cpu] = NULL;
@@ -710,7 +710,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
memcpy(gdt, boot_cpu_compat_gdt_table, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
- order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t));
+ order = get_order_from_bytes_ceil(IDT_ENTRIES * sizeof(idt_entry_t));
idt_tables[cpu] = alloc_xenheap_pages(order, memflags);
if ( idt_tables[cpu] == NULL )
goto oom;
diff --git a/xen/common/kexec.c b/xen/common/kexec.c
index c83d48f..f557475 100644
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -556,7 +556,7 @@ static int __init kexec_init(void)
crash_heap_size = PAGE_ALIGN(crash_heap_size);
crash_heap_current = alloc_xenheap_pages(
- get_order_from_bytes(crash_heap_size),
+ get_order_from_bytes_ceil(crash_heap_size),
MEMF_bits(crashinfo_maxaddr_bits) );
if ( ! crash_heap_current )
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index ae2476d..7f0381e 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -553,7 +553,7 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
*use_tail = 0;
}
#endif
- else if ( get_order_from_bytes(sizeof(**_heap)) ==
+ else if ( get_order_from_bytes_ceil(sizeof(**_heap)) ==
get_order_from_pages(needed) )
{
_heap[node] = alloc_xenheap_pages(get_order_from_pages(needed), 0);
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 71cb7d5..6c630b6 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -292,7 +292,7 @@ int __init tmem_init(void)
unsigned int cpu;
dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
- workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
+ workmem_order = get_order_from_bytes_ceil(LZO1X_1_MEM_COMPRESS);
for_each_online_cpu ( cpu )
{
diff --git a/xen/common/xmalloc_tlsf.c b/xen/common/xmalloc_tlsf.c
index 6c1b882..32800e1 100644
--- a/xen/common/xmalloc_tlsf.c
+++ b/xen/common/xmalloc_tlsf.c
@@ -298,7 +298,7 @@ struct xmem_pool *xmem_pool_create(
BUG_ON(max_size && (max_size < init_size));
pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
- pool_order = get_order_from_bytes(pool_bytes);
+ pool_order = get_order_from_bytes_ceil(pool_bytes);
pool = (void *)alloc_xenheap_pages(pool_order, 0);
if ( pool == NULL )
@@ -371,7 +371,7 @@ void xmem_pool_destroy(struct xmem_pool *pool)
spin_unlock(&pool_list_lock);
pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
- pool_order = get_order_from_bytes(pool_bytes);
+ pool_order = get_order_from_bytes_ceil(pool_bytes);
free_xenheap_pages(pool,pool_order);
}
@@ -530,7 +530,7 @@ static void *xmalloc_whole_pages(unsigned long size, unsigned long align)
unsigned int i, order;
void *res, *p;
- order = get_order_from_bytes(max(align, size));
+ order = get_order_from_bytes_ceil(max(align, size));
res = alloc_xenheap_pages(order, 0);
if ( res == NULL )
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index 55ae31a..605639e 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -301,7 +301,7 @@ static void dump_console_ring_key(unsigned char key)
/* create a buffer in which we'll copy the ring in the correct
order and NUL terminate */
- order = get_order_from_bytes(conring_size + 1);
+ order = get_order_from_bytes_ceil(conring_size + 1);
buf = alloc_xenheap_pages(order, 0);
if ( buf == NULL )
{
@@ -759,7 +759,7 @@ void __init console_init_ring(void)
if ( !opt_conring_size )
return;
- order = get_order_from_bytes(max(opt_conring_size, conring_size));
+ order = get_order_from_bytes_ceil(max(opt_conring_size, conring_size));
memflags = MEMF_bits(crashinfo_maxaddr_bits);
while ( (ring = alloc_xenheap_pages(order, memflags)) == NULL )
{
@@ -1080,7 +1080,7 @@ static int __init debugtrace_init(void)
if ( bytes == 0 )
return 0;
- order = get_order_from_bytes(bytes);
+ order = get_order_from_bytes_ceil(bytes);
debugtrace_buf = alloc_xenheap_pages(order, 0);
ASSERT(debugtrace_buf != NULL);
diff --git a/xen/drivers/char/serial.c b/xen/drivers/char/serial.c
index 0fc5ced..5ac75bb 100644
--- a/xen/drivers/char/serial.c
+++ b/xen/drivers/char/serial.c
@@ -577,7 +577,7 @@ void __init serial_async_transmit(struct serial_port *port)
while ( serial_txbufsz & (serial_txbufsz - 1) )
serial_txbufsz &= serial_txbufsz - 1;
port->txbuf = alloc_xenheap_pages(
- get_order_from_bytes(serial_txbufsz), 0);
+ get_order_from_bytes_ceil(serial_txbufsz), 0);
}
/*
diff --git a/xen/drivers/passthrough/amd/iommu_init.c b/xen/drivers/passthrough/amd/iommu_init.c
index ea9f7e7..696ff1a 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -136,7 +136,8 @@ static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
- power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
+ power_of2_entries =
+ get_order_from_bytes_ceil(iommu->cmd_buffer.alloc_size) +
IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
entry = 0;
@@ -164,7 +165,7 @@ static void register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu)
iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);
- power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +
+ power_of2_entries = get_order_from_bytes_ceil(iommu->event_log.alloc_size) +
IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;
entry = 0;
@@ -192,7 +193,7 @@ static void register_iommu_ppr_log_in_mmio_space(struct amd_iommu *iommu)
iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
writel(entry, iommu->mmio_base + IOMMU_PPR_LOG_BASE_LOW_OFFSET);
- power_of2_entries = get_order_from_bytes(iommu->ppr_log.alloc_size) +
+ power_of2_entries = get_order_from_bytes_ceil(iommu->ppr_log.alloc_size) +
IOMMU_PPR_LOG_POWER_OF2_ENTRIES_PER_PAGE;
entry = 0;
@@ -918,7 +919,7 @@ static void __init deallocate_buffer(void *buf, uint32_t sz)
int order = 0;
if ( buf )
{
- order = get_order_from_bytes(sz);
+ order = get_order_from_bytes_ceil(sz);
__free_amd_iommu_tables(buf, order);
}
}
@@ -940,7 +941,7 @@ static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf)
static void * __init allocate_buffer(uint32_t alloc_size, const char *name)
{
void * buffer;
- int order = get_order_from_bytes(alloc_size);
+ int order = get_order_from_bytes_ceil(alloc_size);
buffer = __alloc_amd_iommu_tables(order);
@@ -963,8 +964,8 @@ static void * __init allocate_ring_buffer(struct ring_buffer *ring_buf,
spin_lock_init(&ring_buf->lock);
- ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries *
- entry_size);
+ ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes_ceil(entries *
+ entry_size);
ring_buf->entries = ring_buf->alloc_size / entry_size;
ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name);
return ring_buf->buffer;
@@ -1163,7 +1164,7 @@ static int __init amd_iommu_setup_device_table(
/* allocate 'device table' on a 4K boundary */
device_table.alloc_size = PAGE_SIZE <<
- get_order_from_bytes(
+ get_order_from_bytes_ceil(
PAGE_ALIGN(ivrs_bdf_entries *
IOMMU_DEV_TABLE_ENTRY_SIZE));
device_table.entries = device_table.alloc_size /
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 338d6b4..dd291a2 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -460,7 +460,7 @@ int __init pci_ro_device(int seg, int bus, int devfn)
{
size_t sz = BITS_TO_LONGS(PCI_BDF(-1, -1, -1) + 1) * sizeof(long);
- pseg->ro_map = alloc_xenheap_pages(get_order_from_bytes(sz), 0);
+ pseg->ro_map = alloc_xenheap_pages(get_order_from_bytes_ceil(sz), 0);
if ( !pseg->ro_map )
return -ENOMEM;
memset(pseg->ro_map, 0, sz);
diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h
index 2e7ed6b..45d6b0a 100644
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -125,7 +125,7 @@ static inline int invalidate_dcache_va_range(const void *p,
static inline int clean_and_invalidate_dcache_va_range(const void *p,
unsigned long size)
{
- unsigned int order = get_order_from_bytes(size);
+ unsigned int order = get_order_from_bytes_ceil(size);
/* sub-page granularity support needs to be added if necessary */
flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order));
return 0;
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 76fbb82..5357a08 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -519,7 +519,7 @@ page_list_splice(struct page_list_head *list, struct page_list_head *head)
list_for_each_entry_safe_reverse(pos, tmp, head, list)
#endif
-static inline unsigned int get_order_from_bytes(paddr_t size)
+static inline unsigned int get_order_from_bytes_ceil(paddr_t size)
{
unsigned int order;
@@ -530,6 +530,16 @@ static inline unsigned int get_order_from_bytes(paddr_t size)
return order;
}
+static inline unsigned int get_order_from_bytes_floor(paddr_t size)
+{
+ unsigned int order;
+
+ size >>= PAGE_SHIFT;
+ for ( order = 0; size >= (1 << (order + 1)); order++ );
+
+ return order;
+}
+
static inline unsigned int get_order_from_pages(unsigned long nr_pages)
{
unsigned int order;
--
2.7.4 (Apple Git-66)
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-09-27 15:58 UTC|newest]
Thread overview: 146+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-09-27 15:56 [PATCH v2 00/30] PVHv2 Dom0 Roger Pau Monne
2016-09-27 15:56 ` [PATCH v2 01/30] xen/x86: move setup of the VM86 TSS to the domain builder Roger Pau Monne
2016-09-28 15:35 ` Jan Beulich
2016-09-29 12:57 ` Roger Pau Monne
2016-09-27 15:56 ` [PATCH v2 02/30] xen/x86: remove XENFEAT_hvm_pirqs for PVHv2 guests Roger Pau Monne
2016-09-28 16:03 ` Jan Beulich
2016-09-29 14:17 ` Roger Pau Monne
2016-09-29 16:07 ` Jan Beulich
2016-09-27 15:56 ` [PATCH v2 03/30] xen/x86: fix parameters and return value of *_set_allocation functions Roger Pau Monne
2016-09-28 9:34 ` Tim Deegan
2016-09-29 10:39 ` Jan Beulich
2016-09-29 14:33 ` Roger Pau Monne
2016-09-29 16:09 ` Jan Beulich
2016-09-30 16:48 ` George Dunlap
2016-10-03 8:05 ` Paul Durrant
2016-10-06 11:33 ` Roger Pau Monne
2016-09-27 15:56 ` [PATCH v2 04/30] xen/x86: allow calling {sh/hap}_set_allocation with the idle domain Roger Pau Monne
2016-09-29 10:43 ` Jan Beulich
2016-09-29 14:37 ` Roger Pau Monne
2016-09-29 16:10 ` Jan Beulich
2016-09-30 16:56 ` George Dunlap
2016-09-30 16:56 ` George Dunlap
2016-09-27 15:57 ` [PATCH v2 05/30] xen/x86: assert that local_events_need_delivery is not called by " Roger Pau Monne
2016-09-29 10:45 ` Jan Beulich
2016-09-30 8:32 ` Roger Pau Monne
2016-09-30 8:59 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 06/30] x86/paging: introduce paging_set_allocation Roger Pau Monne
2016-09-29 10:51 ` Jan Beulich
2016-09-29 14:51 ` Roger Pau Monne
2016-09-29 16:12 ` Jan Beulich
2016-09-29 16:57 ` Roger Pau Monne
2016-09-30 17:00 ` George Dunlap
2016-09-27 15:57 ` [PATCH v2 07/30] xen/x86: split the setup of Dom0 permissions to a function Roger Pau Monne
2016-09-29 13:47 ` Jan Beulich
2016-09-29 15:53 ` Roger Pau Monne
2016-09-27 15:57 ` [PATCH v2 08/30] xen/x86: do the PCI scan unconditionally Roger Pau Monne
2016-09-29 13:55 ` Jan Beulich
2016-09-29 15:11 ` Roger Pau Monne
2016-09-29 16:14 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 09/30] x86/vtd: fix and simplify mapping RMRR regions Roger Pau Monne
2016-09-29 14:18 ` Jan Beulich
2016-09-30 11:27 ` Roger Pau Monne
2016-09-30 13:21 ` Jan Beulich
2016-09-30 15:02 ` Roger Pau Monne
2016-09-30 15:09 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 10/30] xen/x86: allow the emulated APICs to be enbled for the hardware domain Roger Pau Monne
2016-09-29 14:26 ` Jan Beulich
2016-09-30 15:44 ` Roger Pau Monne
2016-09-27 15:57 ` [PATCH v2 11/30] xen/x86: split Dom0 build into PV and PVHv2 Roger Pau Monne
2016-09-30 15:03 ` Jan Beulich
2016-10-03 10:09 ` Roger Pau Monne
2016-10-04 6:54 ` Jan Beulich
2016-10-04 7:09 ` Andrew Cooper
2016-09-27 15:57 ` [PATCH v2 12/30] xen/x86: make print_e820_memory_map global Roger Pau Monne
2016-09-30 15:04 ` Jan Beulich
2016-10-03 16:23 ` Roger Pau Monne
2016-10-04 6:47 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 13/30] xen: introduce a new format specifier to print sizes in human-readable form Roger Pau Monne
2016-09-28 8:24 ` Juergen Gross
2016-09-28 11:56 ` Roger Pau Monne
2016-09-28 12:01 ` Andrew Cooper
2016-10-03 8:36 ` Paul Durrant
2016-10-11 10:27 ` Roger Pau Monne
2016-09-27 15:57 ` Roger Pau Monne [this message]
2016-09-30 15:20 ` [PATCH v2 14/30] xen/mm: add a ceil sufix to current page calculation routine Jan Beulich
2016-09-27 15:57 ` [PATCH v2 15/30] xen/x86: populate PVHv2 Dom0 physical memory map Roger Pau Monne
2016-09-30 15:52 ` Jan Beulich
2016-10-04 9:12 ` Roger Pau Monne
2016-10-04 11:16 ` Jan Beulich
2016-10-11 14:01 ` Roger Pau Monne
2016-10-12 11:51 ` Jan Beulich
2016-10-11 14:06 ` Roger Pau Monne
2016-10-12 11:58 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 16/30] xen/x86: parse Dom0 kernel for PVHv2 Roger Pau Monne
2016-10-06 15:14 ` Jan Beulich
2016-10-11 15:02 ` Roger Pau Monne
2016-09-27 15:57 ` [PATCH v2 17/30] xen/x86: setup PVHv2 Dom0 CPUs Roger Pau Monne
2016-10-06 15:20 ` Jan Beulich
2016-10-12 11:06 ` Roger Pau Monne
2016-10-12 11:32 ` Andrew Cooper
2016-10-12 12:02 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 18/30] xen/x86: setup PVHv2 Dom0 ACPI tables Roger Pau Monne
2016-10-06 15:40 ` Jan Beulich
2016-10-06 15:48 ` Andrew Cooper
2016-10-12 15:35 ` Roger Pau Monne
2016-10-12 15:55 ` Jan Beulich
2016-10-26 11:35 ` Roger Pau Monne
2016-10-26 14:10 ` Jan Beulich
2016-10-26 15:08 ` Roger Pau Monne
2016-10-26 15:16 ` Jan Beulich
2016-10-26 16:03 ` Roger Pau Monne
2016-10-27 7:25 ` Jan Beulich
2016-10-27 11:08 ` Roger Pau Monne
2016-10-26 17:14 ` Boris Ostrovsky
2016-10-27 7:27 ` Jan Beulich
2016-10-27 11:13 ` Roger Pau Monne
2016-10-27 11:25 ` Jan Beulich
2016-10-27 13:51 ` Boris Ostrovsky
2016-10-27 14:02 ` Jan Beulich
2016-10-27 14:15 ` Boris Ostrovsky
2016-10-27 14:30 ` Jan Beulich
2016-10-27 14:40 ` Boris Ostrovsky
2016-10-27 15:04 ` Roger Pau Monne
2016-10-27 15:20 ` Jan Beulich
2016-10-27 15:37 ` Roger Pau Monne
2016-10-28 13:51 ` Boris Ostrovsky
2016-09-27 15:57 ` [PATCH v2 19/30] xen/dcpi: add a dpci passthrough handler for hardware domain Roger Pau Monne
2016-10-03 9:02 ` Paul Durrant
2016-10-06 14:31 ` Roger Pau Monne
2016-10-06 15:44 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 20/30] xen/x86: add the basic infrastructure to import QEMU passthrough code Roger Pau Monne
2016-10-03 9:54 ` Paul Durrant
2016-10-06 15:08 ` Roger Pau Monne
2016-10-06 15:52 ` Lars Kurth
2016-10-07 9:13 ` Jan Beulich
2016-10-06 15:47 ` Jan Beulich
2016-10-10 12:41 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 21/30] xen/pci: split code to size BARs from pci_add_device Roger Pau Monne
2016-10-06 16:00 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 22/30] xen/x86: support PVHv2 Dom0 BAR remapping Roger Pau Monne
2016-10-03 10:10 ` Paul Durrant
2016-10-06 15:25 ` Roger Pau Monne
2016-09-27 15:57 ` [PATCH v2 23/30] xen/x86: route legacy PCI interrupts to Dom0 Roger Pau Monne
2016-10-10 13:37 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 24/30] x86/vmsi: add MSI emulation for hardware domain Roger Pau Monne
2016-09-27 15:57 ` [PATCH v2 25/30] xen/x86: add all PCI devices to PVHv2 Dom0 Roger Pau Monne
2016-10-10 13:44 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 26/30] xen/x86: add PCIe emulation Roger Pau Monne
2016-10-03 10:46 ` Paul Durrant
2016-10-06 15:53 ` Roger Pau Monne
2016-10-10 13:57 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 27/30] x86/msixtbl: disable MSI-X intercepts for domains without an ioreq server Roger Pau Monne
2016-10-10 14:18 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 28/30] xen/x86: add MSI-X emulation to PVHv2 Dom0 Roger Pau Monne
2016-10-03 10:57 ` Paul Durrant
2016-10-06 15:58 ` Roger Pau Monne
2016-10-10 16:15 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 29/30] xen/x86: allow PVHv2 to perform foreign memory mappings Roger Pau Monne
2016-09-30 17:36 ` George Dunlap
2016-10-10 14:21 ` Jan Beulich
2016-10-10 14:27 ` George Dunlap
2016-10-10 14:50 ` Jan Beulich
2016-10-10 14:58 ` George Dunlap
2016-09-27 15:57 ` [PATCH v2 30/30] xen: allow setting the store pfn HVM parameter Roger Pau Monne
2016-10-03 11:01 ` Paul Durrant
2016-09-28 12:22 ` [PATCH v2 00/30] PVHv2 Dom0 Roger Pau Monne
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1474991845-27962-15-git-send-email-roger.pau@citrix.com \
--to=roger.pau@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=boris.ostrovsky@oracle.com \
--cc=jbeulich@suse.com \
--cc=julien.grall@arm.com \
--cc=sstabellini@kernel.org \
--cc=suravee.suthikulpanit@amd.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).