From mboxrd@z Thu Jan 1 00:00:00 1970 From: Konrad Rzeszutek Wilk Subject: Re: Load increase after memory upgrade (part2) Date: Wed, 15 Feb 2012 14:28:04 -0500 Message-ID: <20120215192804.GA21695@phenom.dumpdata.com> References: Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="LZvS9be/3tNcYl/X" Return-path: Content-Disposition: inline In-Reply-To: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xensource.com Errors-To: xen-devel-bounces@lists.xensource.com To: Carsten Schiers Cc: Konrad Rzeszutek Wilk , xen-devel , Jan Beulich , Sander Eikelenboom List-Id: xen-devel@lists.xenproject.org --LZvS9be/3tNcYl/X Content-Type: text/plain; charset=us-ascii Content-Disposition: inline On Wed, Jan 25, 2012 at 08:06:12PM +0100, Carsten Schiers wrote: > Some news: in order to prepare a clean setting, I upgraded to 3.2.1 kernel. I noticed that the load increase is > reduced a bit, but noticably. It's only a simple test, running the DomU for 2 minutes, but the idle load is aprox. > > - 2.6.32 pvops 12-13% > - 3.2.1 pvops 10-11% > - 2.6.34 XenoLinux 7-8% I took a stab at Jan's idea - it compiles but I hadn't been able to properly test it. --LZvS9be/3tNcYl/X Content-Type: text/plain; charset=us-ascii Content-Disposition: attachment; filename="vmalloc_using_xen_limit_pages.patch" diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 87f6673..6bb6f68 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -47,6 +47,7 @@ #include #include #include +#include #include @@ -2073,6 +2074,7 @@ void __init xen_init_mmu_ops(void) /* Protected by xen_reservation_lock. */ #define MAX_CONTIG_ORDER 9 /* 2MB */ static unsigned long discontig_frames[1< MAX_CONTIG_ORDER)) + return -ENOMEM; + + if (BITS_PER_LONG >> order) { + limit_map = kzalloc(BITS_TO_LONGS(1U << order) * + sizeof(*limit_map), GFP_KERNEL); + if (unlikely(!limit_map)) + return -ENOMEM; + } else + limit_map = &_limit_map; + + /* 0. Construct our per page bitmap lookup. */ + + if (address_bits && (address_bits < PAGE_SHIFT)) + return -EINVAL; + + if (order) + bitmap_zero(limit_map, 1U << order); + else + __set_bit(0, limit_map); + + /* 1. Clear the pages */ + for (i = 0; i < 1ULL << order; i++) { + void *vaddr; + page = &pages[i]; + vaddr = page_address(page); + if (address_bits) { + if (!pfn_to_mfn(virt_to_mfn(vaddr)) >> (address_bits - PAGE_SHIFT)) + continue; + __set_bit(i, limit_map); + } + if (!PageHighMem(page)) + memset(vaddr, 0, PAGE_SIZE); + else { + memset(kmap(page), 0, PAGE_SIZE); + kunmap(page); + ++n; + } + } + /* Check to see if we actually have to do any work. */ + if (bitmap_empty(limit_map, 1U << order)) { + if (limit_map != &_limit_map) + kfree(limit_map); + return 0; + } + if (n) + kmap_flush_unused(); + + spin_lock_irqsave(&xen_reservation_lock, flags); + + /* 2. Zap current PTEs. */ + n = xen_zap_page_range(pages, order, in_frames, NULL /*out_frames */, limit_map); + + /* 3. Do the exchange for non-contiguous MFNs. */ + success = xen_exchange_memory(n, 0, in_frames, + n, 0, out_frames, address_bits); + + /* 4. Map new pages in place of old pages. */ + if (success) + xen_remap_exchanged_pages(pages, order, out_frames, 0, limit_map); + else + xen_remap_exchanged_pages(pages, order, NULL, *in_frames, limit_map); + + spin_unlock_irqrestore(&xen_reservation_lock, flags); + if (limit_map != &_limit_map) + kfree(limit_map); + + return success ? 0 : -ENOMEM; +} +EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn); #ifdef CONFIG_XEN_PVHVM static void xen_hvm_exit_mmap(struct mm_struct *mm) { diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 03c85d7..ae5b1ef 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -28,4 +28,6 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long mfn, int nr, pgprot_t prot, unsigned domid); +int xen_limit_pages_to_max_mfn(struct page *pages, unsigned int order, + unsigned int address_bits); #endif /* INCLUDE_XEN_OPS_H */ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 27be2f0..4fa2066 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -31,6 +31,8 @@ #include #include +#include +#include /*** Page table manipulation functions ***/ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) @@ -1550,7 +1552,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, struct page **pages; unsigned int nr_pages, array_size, i; gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; - + gfp_t dma_mask = gfp_mask & (__GFP_DMA | __GFP_DMA32); + if (xen_pv_domain()) { + if (dma_mask == (__GFP_DMA | __GFP_DMA32)) + gfp_mask &= (__GFP_DMA | __GFP_DMA32); + } nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); @@ -1586,6 +1592,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, goto fail; } area->pages[i] = page; + if (xen_pv_domain()) { + if (dma_mask) { + if (xen_limit_pages_to_max_mfn(page, 0, 32)) { + area->nr_pages = i + 1; + goto fail; + } + if (gfp_mask & __GFP_ZERO) + clear_highpage(page); + } + } } if (map_vm_area(area, prot, &pages)) --LZvS9be/3tNcYl/X Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel --LZvS9be/3tNcYl/X--