* [PATCH v4 16/19] mini-os: map page allocator's bitmap to virtual kernel area for ballooning
@ 2016-08-11 11:06 Juergen Gross
2016-08-11 11:06 ` [PATCH v4 17/19] mini-os: add support for ballooning up Juergen Gross
2016-08-11 11:54 ` [PATCH v4 16/19] mini-os: map page allocator's bitmap to virtual kernel area for ballooning Samuel Thibault
0 siblings, 2 replies; 3+ messages in thread
From: Juergen Gross @ 2016-08-11 11:06 UTC (permalink / raw)
To: minios-devel, xen-devel; +Cc: Juergen Gross, samuel.thibault, wei.liu2
In case of CONFIG_BALLOON the page allocator's bitmap needs some space
to be able to grow. Remap it to kernel virtual area if the preallocated
area isn't large enough.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V4: - mm_bitmap* -> mm_alloc_bitmap* as requested by Samuel Thibault
V3: - add assertion as requested by Samuel Thibault
- rename functions to have mm_ prefix as requested by Samuel Thibault
---
balloon.c | 18 ++++++++++++++++++
include/balloon.h | 2 ++
include/mm.h | 6 ++++++
mm.c | 44 +++++++++++++++++++++++---------------------
4 files changed, 49 insertions(+), 21 deletions(-)
diff --git a/balloon.c b/balloon.c
index 1ec113d..0a3342c 100644
--- a/balloon.c
+++ b/balloon.c
@@ -44,3 +44,21 @@ void get_max_pages(void)
nr_max_pages = ret;
printk("Maximum memory size: %ld pages\n", nr_max_pages);
}
+
+void mm_alloc_bitmap_remap(void)
+{
+ unsigned long i;
+
+ if ( mm_alloc_bitmap_size >= ((nr_max_pages + 1) >> (PAGE_SHIFT + 3)) )
+ return;
+
+ for ( i = 0; i < mm_alloc_bitmap_size; i += PAGE_SIZE )
+ {
+ map_frame_rw(virt_kernel_area_end + i,
+ virt_to_mfn((unsigned long)(mm_alloc_bitmap) + i));
+ }
+
+ mm_alloc_bitmap = (unsigned long *)virt_kernel_area_end;
+ virt_kernel_area_end += round_pgup((nr_max_pages + 1) >> (PAGE_SHIFT + 3));
+ ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
+}
diff --git a/include/balloon.h b/include/balloon.h
index b0d0ebf..9154f44 100644
--- a/include/balloon.h
+++ b/include/balloon.h
@@ -31,11 +31,13 @@ extern unsigned long virt_kernel_area_end;
void get_max_pages(void);
void arch_remap_p2m(unsigned long max_pfn);
+void mm_alloc_bitmap_remap(void);
#else /* CONFIG_BALLOON */
static inline void get_max_pages(void) { }
static inline void arch_remap_p2m(unsigned long max_pfn) { }
+static inline void mm_alloc_bitmap_remap(void) { }
#endif /* CONFIG_BALLOON */
#endif /* _BALLOON_H_ */
diff --git a/include/mm.h b/include/mm.h
index 6add683..fc3128b 100644
--- a/include/mm.h
+++ b/include/mm.h
@@ -42,8 +42,14 @@
#define STACK_SIZE_PAGE_ORDER __STACK_SIZE_PAGE_ORDER
#define STACK_SIZE __STACK_SIZE
+#define round_pgdown(_p) ((_p) & PAGE_MASK)
+#define round_pgup(_p) (((_p) + (PAGE_SIZE - 1)) & PAGE_MASK)
+
extern unsigned long nr_free_pages;
+extern unsigned long *mm_alloc_bitmap;
+extern unsigned long mm_alloc_bitmap_size;
+
void init_mm(void);
unsigned long alloc_pages(int order);
#define alloc_page() alloc_pages(0)
diff --git a/mm.c b/mm.c
index 707a3e0..9e3a479 100644
--- a/mm.c
+++ b/mm.c
@@ -48,11 +48,14 @@
* One bit per page of memory. Bit set => page is allocated.
*/
-static unsigned long *alloc_bitmap;
+unsigned long *mm_alloc_bitmap;
+unsigned long mm_alloc_bitmap_size;
+
#define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
#define allocated_in_map(_pn) \
-(alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & (1UL<<((_pn)&(PAGES_PER_MAPWORD-1))))
+ (mm_alloc_bitmap[(_pn) / PAGES_PER_MAPWORD] & \
+ (1UL << ((_pn) & (PAGES_PER_MAPWORD - 1))))
unsigned long nr_free_pages;
@@ -61,8 +64,8 @@ unsigned long nr_free_pages;
* -(1<<n) sets all bits >= n.
* (1<<n)-1 sets all bits < n.
* Variable names in map_{alloc,free}:
- * *_idx == Index into `alloc_bitmap' array.
- * *_off == Bit offset within an element of the `alloc_bitmap' array.
+ * *_idx == Index into `mm_alloc_bitmap' array.
+ * *_off == Bit offset within an element of the `mm_alloc_bitmap' array.
*/
static void map_alloc(unsigned long first_page, unsigned long nr_pages)
@@ -76,13 +79,13 @@ static void map_alloc(unsigned long first_page, unsigned long nr_pages)
if ( curr_idx == end_idx )
{
- alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
+ mm_alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
}
else
{
- alloc_bitmap[curr_idx] |= -(1UL<<start_off);
- while ( ++curr_idx < end_idx ) alloc_bitmap[curr_idx] = ~0UL;
- alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
+ mm_alloc_bitmap[curr_idx] |= -(1UL<<start_off);
+ while ( ++curr_idx < end_idx ) mm_alloc_bitmap[curr_idx] = ~0UL;
+ mm_alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
}
nr_free_pages -= nr_pages;
@@ -102,13 +105,13 @@ static void map_free(unsigned long first_page, unsigned long nr_pages)
if ( curr_idx == end_idx )
{
- alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
+ mm_alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
}
else
{
- alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
- while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0;
- alloc_bitmap[curr_idx] &= -(1UL<<end_off);
+ mm_alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
+ while ( ++curr_idx != end_idx ) mm_alloc_bitmap[curr_idx] = 0;
+ mm_alloc_bitmap[curr_idx] &= -(1UL<<end_off);
}
}
@@ -137,9 +140,6 @@ static chunk_head_t *free_head[FREELIST_SIZE];
static chunk_head_t free_tail[FREELIST_SIZE];
#define FREELIST_EMPTY(_l) ((_l)->next == NULL)
-#define round_pgdown(_p) ((_p)&PAGE_MASK)
-#define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
-
/*
* Initialise allocator, placing addresses [@min,@max] in free pool.
* @min and @max are PHYSICAL addresses.
@@ -147,7 +147,7 @@ static chunk_head_t free_tail[FREELIST_SIZE];
static void init_page_allocator(unsigned long min, unsigned long max)
{
int i;
- unsigned long range, bitmap_size;
+ unsigned long range;
chunk_head_t *ch;
chunk_tail_t *ct;
for ( i = 0; i < FREELIST_SIZE; i++ )
@@ -161,14 +161,14 @@ static void init_page_allocator(unsigned long min, unsigned long max)
max = round_pgdown(max);
/* Allocate space for the allocation bitmap. */
- bitmap_size = (max+1) >> (PAGE_SHIFT+3);
- bitmap_size = round_pgup(bitmap_size);
- alloc_bitmap = (unsigned long *)to_virt(min);
- min += bitmap_size;
+ mm_alloc_bitmap_size = (max + 1) >> (PAGE_SHIFT + 3);
+ mm_alloc_bitmap_size = round_pgup(mm_alloc_bitmap_size);
+ mm_alloc_bitmap = (unsigned long *)to_virt(min);
+ min += mm_alloc_bitmap_size;
range = max - min;
/* All allocated by default. */
- memset(alloc_bitmap, ~0, bitmap_size);
+ memset(mm_alloc_bitmap, ~0, mm_alloc_bitmap_size);
/* Free up the memory we've been given to play with. */
map_free(PHYS_PFN(min), range>>PAGE_SHIFT);
@@ -198,6 +198,8 @@ static void init_page_allocator(unsigned long min, unsigned long max)
free_head[i] = ch;
ct->level = i;
}
+
+ mm_alloc_bitmap_remap();
}
--
2.6.6
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 3+ messages in thread* [PATCH v4 17/19] mini-os: add support for ballooning up
2016-08-11 11:06 [PATCH v4 16/19] mini-os: map page allocator's bitmap to virtual kernel area for ballooning Juergen Gross
@ 2016-08-11 11:06 ` Juergen Gross
2016-08-11 11:54 ` [PATCH v4 16/19] mini-os: map page allocator's bitmap to virtual kernel area for ballooning Samuel Thibault
1 sibling, 0 replies; 3+ messages in thread
From: Juergen Gross @ 2016-08-11 11:06 UTC (permalink / raw)
To: minios-devel, xen-devel; +Cc: Juergen Gross, samuel.thibault, wei.liu2
Add support for ballooning the domain up by a specified amount of
pages. Following steps are performed:
- extending the p2m map
- extending the page allocator's bitmap
- getting new memory pages from the hypervisor
- adding the memory at the current end of guest memory
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
---
V3: change "if" to "while" in balloon_up() as requested by Samuel Thibault
---
arch/arm/balloon.c | 9 ++++++
arch/x86/balloon.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
balloon.c | 64 +++++++++++++++++++++++++++++++++++++
include/balloon.h | 5 +++
mm.c | 4 +++
5 files changed, 176 insertions(+)
diff --git a/arch/arm/balloon.c b/arch/arm/balloon.c
index 549e51b..7f35328 100644
--- a/arch/arm/balloon.c
+++ b/arch/arm/balloon.c
@@ -27,4 +27,13 @@
unsigned long virt_kernel_area_end; /* TODO: find a virtual area */
+int arch_expand_p2m(unsigned long max_pfn)
+{
+ return 0;
+}
+
+void arch_pfn_add(unsigned long pfn, unsigned long mfn)
+{
+}
+
#endif
diff --git a/arch/x86/balloon.c b/arch/x86/balloon.c
index a7f20e4..42389e4 100644
--- a/arch/x86/balloon.c
+++ b/arch/x86/balloon.c
@@ -23,6 +23,7 @@
#include <mini-os/os.h>
#include <mini-os/balloon.h>
+#include <mini-os/errno.h>
#include <mini-os/lib.h>
#include <mini-os/mm.h>
@@ -30,9 +31,36 @@
unsigned long virt_kernel_area_end = VIRT_KERNEL_AREA;
+static void p2m_invalidate(unsigned long *list, unsigned long start_idx)
+{
+ unsigned long idx;
+
+ for ( idx = start_idx; idx < P2M_ENTRIES; idx++ )
+ list[idx] = INVALID_P2M_ENTRY;
+}
+
+static inline unsigned long *p2m_l3list(void)
+{
+ return mfn_to_virt(HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list);
+}
+
+static inline unsigned long *p2m_to_virt(unsigned long p2m)
+{
+ return ( p2m == INVALID_P2M_ENTRY ) ? NULL : mfn_to_virt(p2m);
+}
+
void arch_remap_p2m(unsigned long max_pfn)
{
unsigned long pfn;
+ unsigned long *l3_list, *l2_list, *l1_list;
+
+ l3_list = p2m_l3list();
+ l2_list = p2m_to_virt(l3_list[L3_P2M_IDX(max_pfn - 1)]);
+ l1_list = p2m_to_virt(l2_list[L2_P2M_IDX(max_pfn - 1)]);
+
+ p2m_invalidate(l3_list, L3_P2M_IDX(max_pfn - 1) + 1);
+ p2m_invalidate(l2_list, L2_P2M_IDX(max_pfn - 1) + 1);
+ p2m_invalidate(l1_list, L1_P2M_IDX(max_pfn - 1) + 1);
if ( p2m_pages(nr_max_pages) <= p2m_pages(max_pfn) )
return;
@@ -50,4 +78,70 @@ void arch_remap_p2m(unsigned long max_pfn)
ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
}
+int arch_expand_p2m(unsigned long max_pfn)
+{
+ unsigned long pfn;
+ unsigned long *l1_list, *l2_list, *l3_list;
+
+ p2m_chk_pfn(max_pfn - 1);
+ l3_list = p2m_l3list();
+
+ for ( pfn = (HYPERVISOR_shared_info->arch.max_pfn + P2M_MASK) & ~P2M_MASK;
+ pfn < max_pfn; pfn += P2M_ENTRIES )
+ {
+ l2_list = p2m_to_virt(l3_list[L3_P2M_IDX(pfn)]);
+ if ( !l2_list )
+ {
+ l2_list = (unsigned long*)alloc_page();
+ if ( !l2_list )
+ return -ENOMEM;
+ p2m_invalidate(l2_list, 0);
+ l3_list[L3_P2M_IDX(pfn)] = virt_to_mfn(l2_list);
+ }
+ l1_list = p2m_to_virt(l2_list[L2_P2M_IDX(pfn)]);
+ if ( !l1_list )
+ {
+ l1_list = (unsigned long*)alloc_page();
+ if ( !l1_list )
+ return -ENOMEM;
+ p2m_invalidate(l1_list, 0);
+ l2_list[L2_P2M_IDX(pfn)] = virt_to_mfn(l1_list);
+
+ if ( map_frame_rw((unsigned long)(phys_to_machine_mapping + pfn),
+ l2_list[L2_P2M_IDX(pfn)]) )
+ return -ENOMEM;
+ }
+ }
+
+ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
+
+ /* Make sure the new last page can be mapped. */
+ if ( !need_pgt((unsigned long)pfn_to_virt(max_pfn - 1)) )
+ return -ENOMEM;
+
+ return 0;
+}
+
+void arch_pfn_add(unsigned long pfn, unsigned long mfn)
+{
+ mmu_update_t mmu_updates[1];
+ pgentry_t *pgt;
+ int rc;
+
+ phys_to_machine_mapping[pfn] = mfn;
+
+ pgt = need_pgt((unsigned long)pfn_to_virt(pfn));
+ ASSERT(pgt);
+ mmu_updates[0].ptr = virt_to_mach(pgt) | MMU_NORMAL_PT_UPDATE;
+ mmu_updates[0].val = (pgentry_t)(mfn << PAGE_SHIFT) |
+ _PAGE_PRESENT | _PAGE_RW;
+ rc = HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF);
+ if ( rc < 0 )
+ {
+ printk("ERROR: build_pagetable(): PTE could not be updated\n");
+ printk(" mmu_update failed with rc=%d\n", rc);
+ do_exit();
+ }
+}
+
#endif
diff --git a/balloon.c b/balloon.c
index 0a3342c..e1af778 100644
--- a/balloon.c
+++ b/balloon.c
@@ -23,11 +23,13 @@
#include <mini-os/os.h>
#include <mini-os/balloon.h>
+#include <mini-os/errno.h>
#include <mini-os/lib.h>
#include <xen/xen.h>
#include <xen/memory.h>
unsigned long nr_max_pages;
+unsigned long nr_mem_pages;
void get_max_pages(void)
{
@@ -62,3 +64,65 @@ void mm_alloc_bitmap_remap(void)
virt_kernel_area_end += round_pgup((nr_max_pages + 1) >> (PAGE_SHIFT + 3));
ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
}
+
+#define N_BALLOON_FRAMES 64
+static unsigned long balloon_frames[N_BALLOON_FRAMES];
+
+int balloon_up(unsigned long n_pages)
+{
+ unsigned long page, pfn;
+ int rc;
+ struct xen_memory_reservation reservation = {
+ .address_bits = 0,
+ .extent_order = 0,
+ .domid = DOMID_SELF
+ };
+
+ if ( n_pages > nr_max_pages - nr_mem_pages )
+ n_pages = nr_max_pages - nr_mem_pages;
+ if ( n_pages > N_BALLOON_FRAMES )
+ n_pages = N_BALLOON_FRAMES;
+
+ /* Resize alloc_bitmap if necessary. */
+ while ( mm_alloc_bitmap_size * 8 < nr_mem_pages + n_pages )
+ {
+ page = alloc_page();
+ if ( !page )
+ return -ENOMEM;
+
+ memset((void *)page, ~0, PAGE_SIZE);
+ if ( map_frame_rw((unsigned long)mm_alloc_bitmap + mm_alloc_bitmap_size,
+ virt_to_mfn(page)) )
+ {
+ free_page((void *)page);
+ return -ENOMEM;
+ }
+
+ mm_alloc_bitmap_size += PAGE_SIZE;
+ }
+
+ rc = arch_expand_p2m(nr_mem_pages + n_pages);
+ if ( rc )
+ return rc;
+
+ /* Get new memory from hypervisor. */
+ for ( pfn = 0; pfn < n_pages; pfn++ )
+ {
+ balloon_frames[pfn] = nr_mem_pages + pfn;
+ }
+ set_xen_guest_handle(reservation.extent_start, balloon_frames);
+ reservation.nr_extents = n_pages;
+ rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+ if ( rc <= 0 )
+ return rc;
+
+ for ( pfn = 0; pfn < rc; pfn++ )
+ {
+ arch_pfn_add(nr_mem_pages + pfn, balloon_frames[pfn]);
+ free_page(pfn_to_virt(nr_mem_pages + pfn));
+ }
+
+ nr_mem_pages += rc;
+
+ return rc;
+}
diff --git a/include/balloon.h b/include/balloon.h
index 9154f44..5ec1bbb 100644
--- a/include/balloon.h
+++ b/include/balloon.h
@@ -28,10 +28,15 @@
extern unsigned long nr_max_pages;
extern unsigned long virt_kernel_area_end;
+extern unsigned long nr_mem_pages;
void get_max_pages(void);
+int balloon_up(unsigned long n_pages);
+
void arch_remap_p2m(unsigned long max_pfn);
void mm_alloc_bitmap_remap(void);
+int arch_expand_p2m(unsigned long max_pfn);
+void arch_pfn_add(unsigned long pfn, unsigned long mfn);
#else /* CONFIG_BALLOON */
diff --git a/mm.c b/mm.c
index 9e3a479..9a048c3 100644
--- a/mm.c
+++ b/mm.c
@@ -383,6 +383,10 @@ void init_mm(void)
arch_init_p2m(max_pfn);
arch_init_demand_mapping_area();
+
+#ifdef CONFIG_BALLOON
+ nr_mem_pages = max_pfn;
+#endif
}
void fini_mm(void)
--
2.6.6
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 3+ messages in thread* Re: [PATCH v4 16/19] mini-os: map page allocator's bitmap to virtual kernel area for ballooning
2016-08-11 11:06 [PATCH v4 16/19] mini-os: map page allocator's bitmap to virtual kernel area for ballooning Juergen Gross
2016-08-11 11:06 ` [PATCH v4 17/19] mini-os: add support for ballooning up Juergen Gross
@ 2016-08-11 11:54 ` Samuel Thibault
1 sibling, 0 replies; 3+ messages in thread
From: Samuel Thibault @ 2016-08-11 11:54 UTC (permalink / raw)
To: Juergen Gross; +Cc: minios-devel, xen-devel, wei.liu2
Juergen Gross, on Thu 11 Aug 2016 13:06:36 +0200, wrote:
> In case of CONFIG_BALLOON the page allocator's bitmap needs some space
> to be able to grow. Remap it to kernel virtual area if the preallocated
> area isn't large enough.
>
> Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
> ---
> V4: - mm_bitmap* -> mm_alloc_bitmap* as requested by Samuel Thibault
>
> V3: - add assertion as requested by Samuel Thibault
> - rename functions to have mm_ prefix as requested by Samuel Thibault
> ---
> balloon.c | 18 ++++++++++++++++++
> include/balloon.h | 2 ++
> include/mm.h | 6 ++++++
> mm.c | 44 +++++++++++++++++++++++---------------------
> 4 files changed, 49 insertions(+), 21 deletions(-)
>
> diff --git a/balloon.c b/balloon.c
> index 1ec113d..0a3342c 100644
> --- a/balloon.c
> +++ b/balloon.c
> @@ -44,3 +44,21 @@ void get_max_pages(void)
> nr_max_pages = ret;
> printk("Maximum memory size: %ld pages\n", nr_max_pages);
> }
> +
> +void mm_alloc_bitmap_remap(void)
> +{
> + unsigned long i;
> +
> + if ( mm_alloc_bitmap_size >= ((nr_max_pages + 1) >> (PAGE_SHIFT + 3)) )
> + return;
> +
> + for ( i = 0; i < mm_alloc_bitmap_size; i += PAGE_SIZE )
> + {
> + map_frame_rw(virt_kernel_area_end + i,
> + virt_to_mfn((unsigned long)(mm_alloc_bitmap) + i));
> + }
> +
> + mm_alloc_bitmap = (unsigned long *)virt_kernel_area_end;
> + virt_kernel_area_end += round_pgup((nr_max_pages + 1) >> (PAGE_SHIFT + 3));
> + ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
> +}
> diff --git a/include/balloon.h b/include/balloon.h
> index b0d0ebf..9154f44 100644
> --- a/include/balloon.h
> +++ b/include/balloon.h
> @@ -31,11 +31,13 @@ extern unsigned long virt_kernel_area_end;
>
> void get_max_pages(void);
> void arch_remap_p2m(unsigned long max_pfn);
> +void mm_alloc_bitmap_remap(void);
>
> #else /* CONFIG_BALLOON */
>
> static inline void get_max_pages(void) { }
> static inline void arch_remap_p2m(unsigned long max_pfn) { }
> +static inline void mm_alloc_bitmap_remap(void) { }
>
> #endif /* CONFIG_BALLOON */
> #endif /* _BALLOON_H_ */
> diff --git a/include/mm.h b/include/mm.h
> index 6add683..fc3128b 100644
> --- a/include/mm.h
> +++ b/include/mm.h
> @@ -42,8 +42,14 @@
> #define STACK_SIZE_PAGE_ORDER __STACK_SIZE_PAGE_ORDER
> #define STACK_SIZE __STACK_SIZE
>
> +#define round_pgdown(_p) ((_p) & PAGE_MASK)
> +#define round_pgup(_p) (((_p) + (PAGE_SIZE - 1)) & PAGE_MASK)
> +
> extern unsigned long nr_free_pages;
>
> +extern unsigned long *mm_alloc_bitmap;
> +extern unsigned long mm_alloc_bitmap_size;
> +
> void init_mm(void);
> unsigned long alloc_pages(int order);
> #define alloc_page() alloc_pages(0)
> diff --git a/mm.c b/mm.c
> index 707a3e0..9e3a479 100644
> --- a/mm.c
> +++ b/mm.c
> @@ -48,11 +48,14 @@
> * One bit per page of memory. Bit set => page is allocated.
> */
>
> -static unsigned long *alloc_bitmap;
> +unsigned long *mm_alloc_bitmap;
> +unsigned long mm_alloc_bitmap_size;
> +
> #define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
>
> #define allocated_in_map(_pn) \
> -(alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & (1UL<<((_pn)&(PAGES_PER_MAPWORD-1))))
> + (mm_alloc_bitmap[(_pn) / PAGES_PER_MAPWORD] & \
> + (1UL << ((_pn) & (PAGES_PER_MAPWORD - 1))))
>
> unsigned long nr_free_pages;
>
> @@ -61,8 +64,8 @@ unsigned long nr_free_pages;
> * -(1<<n) sets all bits >= n.
> * (1<<n)-1 sets all bits < n.
> * Variable names in map_{alloc,free}:
> - * *_idx == Index into `alloc_bitmap' array.
> - * *_off == Bit offset within an element of the `alloc_bitmap' array.
> + * *_idx == Index into `mm_alloc_bitmap' array.
> + * *_off == Bit offset within an element of the `mm_alloc_bitmap' array.
> */
>
> static void map_alloc(unsigned long first_page, unsigned long nr_pages)
> @@ -76,13 +79,13 @@ static void map_alloc(unsigned long first_page, unsigned long nr_pages)
>
> if ( curr_idx == end_idx )
> {
> - alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
> + mm_alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
> }
> else
> {
> - alloc_bitmap[curr_idx] |= -(1UL<<start_off);
> - while ( ++curr_idx < end_idx ) alloc_bitmap[curr_idx] = ~0UL;
> - alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
> + mm_alloc_bitmap[curr_idx] |= -(1UL<<start_off);
> + while ( ++curr_idx < end_idx ) mm_alloc_bitmap[curr_idx] = ~0UL;
> + mm_alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
> }
>
> nr_free_pages -= nr_pages;
> @@ -102,13 +105,13 @@ static void map_free(unsigned long first_page, unsigned long nr_pages)
>
> if ( curr_idx == end_idx )
> {
> - alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
> + mm_alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
> }
> else
> {
> - alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
> - while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0;
> - alloc_bitmap[curr_idx] &= -(1UL<<end_off);
> + mm_alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
> + while ( ++curr_idx != end_idx ) mm_alloc_bitmap[curr_idx] = 0;
> + mm_alloc_bitmap[curr_idx] &= -(1UL<<end_off);
> }
> }
>
> @@ -137,9 +140,6 @@ static chunk_head_t *free_head[FREELIST_SIZE];
> static chunk_head_t free_tail[FREELIST_SIZE];
> #define FREELIST_EMPTY(_l) ((_l)->next == NULL)
>
> -#define round_pgdown(_p) ((_p)&PAGE_MASK)
> -#define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
> -
> /*
> * Initialise allocator, placing addresses [@min,@max] in free pool.
> * @min and @max are PHYSICAL addresses.
> @@ -147,7 +147,7 @@ static chunk_head_t free_tail[FREELIST_SIZE];
> static void init_page_allocator(unsigned long min, unsigned long max)
> {
> int i;
> - unsigned long range, bitmap_size;
> + unsigned long range;
> chunk_head_t *ch;
> chunk_tail_t *ct;
> for ( i = 0; i < FREELIST_SIZE; i++ )
> @@ -161,14 +161,14 @@ static void init_page_allocator(unsigned long min, unsigned long max)
> max = round_pgdown(max);
>
> /* Allocate space for the allocation bitmap. */
> - bitmap_size = (max+1) >> (PAGE_SHIFT+3);
> - bitmap_size = round_pgup(bitmap_size);
> - alloc_bitmap = (unsigned long *)to_virt(min);
> - min += bitmap_size;
> + mm_alloc_bitmap_size = (max + 1) >> (PAGE_SHIFT + 3);
> + mm_alloc_bitmap_size = round_pgup(mm_alloc_bitmap_size);
> + mm_alloc_bitmap = (unsigned long *)to_virt(min);
> + min += mm_alloc_bitmap_size;
> range = max - min;
>
> /* All allocated by default. */
> - memset(alloc_bitmap, ~0, bitmap_size);
> + memset(mm_alloc_bitmap, ~0, mm_alloc_bitmap_size);
> /* Free up the memory we've been given to play with. */
> map_free(PHYS_PFN(min), range>>PAGE_SHIFT);
>
> @@ -198,6 +198,8 @@ static void init_page_allocator(unsigned long min, unsigned long max)
> free_head[i] = ch;
> ct->level = i;
> }
> +
> + mm_alloc_bitmap_remap();
> }
>
>
> --
> 2.6.6
>
--
Samuel
"...Deep Hack Mode--that mysterious and frightening state of
consciousness where Mortal Users fear to tread."
(By Matt Welsh)
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2016-08-11 11:54 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-08-11 11:06 [PATCH v4 16/19] mini-os: map page allocator's bitmap to virtual kernel area for ballooning Juergen Gross
2016-08-11 11:06 ` [PATCH v4 17/19] mini-os: add support for ballooning up Juergen Gross
2016-08-11 11:54 ` [PATCH v4 16/19] mini-os: map page allocator's bitmap to virtual kernel area for ballooning Samuel Thibault
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).