linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Andrea Arcangeli <aarcange@redhat.com>
To: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
	Adam Litke <agl@us.ibm.com>, Avi Kivity <avi@redhat.com>,
	Izik Eidus <ieidus@redhat.com>,
	Hugh Dickins <hugh.dickins@tiscali.co.uk>,
	Nick Piggin <npiggin@suse.de>, Rik van Riel <riel@redhat.com>,
	Mel Gorman <mel@csn.ul.ie>, Dave Hansen <dave@linux.vnet.ibm.com>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Ingo Molnar <mingo@elte.hu>, Mike Travis <travis@sgi.com>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Christoph Lameter <cl@linux-foundation.org>,
	Chris Wright <chrisw@sous-sol.org>,
	bpicco@redhat.com,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	Arnd Bergmann <arnd@arndb.de>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>,
	Chris Mason <chris.mason@oracle.com>
Subject: [PATCH 56 of 67] Memory compaction core
Date: Thu, 08 Apr 2010 03:51:39 +0200	[thread overview]
Message-ID: <a86f1d01d86dffb4ab53.1270691499@v2.random> (raw)
In-Reply-To: <patchbomb.1270691443@v2.random>

From: Mel Gorman <mel@csn.ul.ie>

This patch is the core of a mechanism which compacts memory in a zone by
relocating movable pages towards the end of the zone.

A single compaction run involves a migration scanner and a free scanner.
Both scanners operate on pageblock-sized areas in the zone. The migration
scanner starts at the bottom of the zone and searches for all movable pages
within each area, isolating them onto a private list called migratelist.
The free scanner starts at the top of the zone and searches for suitable
areas and consumes the free pages within making them available for the
migration scanner. The pages isolated for migration are then migrated to
the newly isolated free pages.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
---

diff --git a/include/linux/compaction.h b/include/linux/compaction.h
new file mode 100644
--- /dev/null
+++ b/include/linux/compaction.h
@@ -0,0 +1,9 @@
+#ifndef _LINUX_COMPACTION_H
+#define _LINUX_COMPACTION_H
+
+/* Return values for compact_zone() */
+#define COMPACT_INCOMPLETE	0
+#define COMPACT_PARTIAL		1
+#define COMPACT_COMPLETE	2
+
+#endif /* _LINUX_COMPACTION_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -384,6 +384,7 @@ void put_page(struct page *page);
 void put_pages_list(struct list_head *pages);
 
 void split_page(struct page *page, unsigned int order);
+int split_free_page(struct page *page);
 
 /*
  * Compound pages have a destructor function.  Provide a
diff --git a/include/linux/swap.h b/include/linux/swap.h
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -151,6 +151,7 @@ enum {
 };
 
 #define SWAP_CLUSTER_MAX 32
+#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
 
 #define SWAP_MAP_MAX	0x3e	/* Max duplication count, in first swap_map */
 #define SWAP_MAP_BAD	0x3f	/* Note pageblock is bad, in first swap_map */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -43,6 +43,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PS
 		KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
 		KSWAPD_SKIP_CONGESTION_WAIT,
 		PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+		COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
 #ifdef CONFIG_HUGETLB_PAGE
 		HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
 #endif
diff --git a/mm/Makefile b/mm/Makefile
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
 obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
 obj-$(CONFIG_FS_XIP) += filemap_xip.o
 obj-$(CONFIG_MIGRATION) += migrate.o
+obj-$(CONFIG_COMPACTION) += compaction.o
 ifdef CONFIG_SMP
 obj-y += percpu.o
 else
diff --git a/mm/compaction.c b/mm/compaction.c
new file mode 100644
--- /dev/null
+++ b/mm/compaction.c
@@ -0,0 +1,390 @@
+/*
+ * linux/mm/compaction.c
+ *
+ * Memory compaction for the reduction of external fragmentation. Note that
+ * this heavily depends upon page migration to do all the real heavy
+ * lifting
+ *
+ * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
+ */
+#include <linux/swap.h>
+#include <linux/migrate.h>
+#include <linux/compaction.h>
+#include <linux/mm_inline.h>
+#include <linux/backing-dev.h>
+#include "internal.h"
+
+/*
+ * compact_control is used to track pages being migrated and the free pages
+ * they are being migrated to during memory compaction. The free_pfn starts
+ * at the end of a zone and migrate_pfn begins at the start. Movable pages
+ * are moved to the end of a zone during a compaction run and the run
+ * completes when free_pfn <= migrate_pfn
+ */
+struct compact_control {
+	struct list_head freepages;	/* List of free pages to migrate to */
+	struct list_head migratepages;	/* List of pages being migrated */
+	unsigned long nr_freepages;	/* Number of isolated free pages */
+	unsigned long nr_migratepages;	/* Number of pages to migrate */
+	unsigned long free_pfn;		/* isolate_freepages search base */
+	unsigned long migrate_pfn;	/* isolate_migratepages search base */
+
+	/* Account for isolated anon and file pages */
+	unsigned long nr_anon;
+	unsigned long nr_file;
+
+	struct zone *zone;
+};
+
+static unsigned long release_freepages(struct list_head *freelist)
+{
+	struct page *page, *next;
+	unsigned long count = 0;
+
+	list_for_each_entry_safe(page, next, freelist, lru) {
+		list_del(&page->lru);
+		__free_page(page);
+		count++;
+	}
+
+	return count;
+}
+
+/* Isolate free pages onto a private freelist. Must hold zone->lock */
+static unsigned long isolate_freepages_block(struct zone *zone,
+				unsigned long blockpfn,
+				struct list_head *freelist)
+{
+	unsigned long zone_end_pfn, end_pfn;
+	int total_isolated = 0;
+	struct page *cursor;
+
+	/* Get the last PFN we should scan for free pages at */
+	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+	end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
+
+	/* Find the first usable PFN in the block to initialse page cursor */
+	for (; blockpfn < end_pfn; blockpfn++) {
+		if (pfn_valid_within(blockpfn))
+			break;
+	}
+	cursor = pfn_to_page(blockpfn);
+
+	/* Isolate free pages. This assumes the block is valid */
+	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
+		int isolated, i;
+		struct page *page = cursor;
+
+		if (!pfn_valid_within(blockpfn))
+			continue;
+
+		if (!PageBuddy(page))
+			continue;
+
+		/* Found a free page, break it into order-0 pages */
+		isolated = split_free_page(page);
+		total_isolated += isolated;
+		for (i = 0; i < isolated; i++) {
+			list_add(&page->lru, freelist);
+			page++;
+		}
+
+		/* If a page was split, advance to the end of it */
+		if (isolated) {
+			blockpfn += isolated - 1;
+			cursor += isolated - 1;
+		}
+	}
+
+	return total_isolated;
+}
+
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
+{
+
+	int migratetype = get_pageblock_migratetype(page);
+
+	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+		return false;
+
+	/* If the page is a large free page, then allow migration */
+	if (PageBuddy(page) && page_order(page) >= pageblock_order)
+		return true;
+
+	/* If the block is MIGRATE_MOVABLE, allow migration */
+	if (migratetype == MIGRATE_MOVABLE)
+		return true;
+
+	/* Otherwise skip the block */
+	return false;
+}
+
+/*
+ * Based on information in the current compact_control, find blocks
+ * suitable for isolating free pages from and then isolate them.
+ */
+static void isolate_freepages(struct zone *zone,
+				struct compact_control *cc)
+{
+	struct page *page;
+	unsigned long high_pfn, low_pfn, pfn;
+	unsigned long flags;
+	int nr_freepages = cc->nr_freepages;
+	struct list_head *freelist = &cc->freepages;
+
+	pfn = cc->free_pfn;
+	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+	high_pfn = low_pfn;
+
+	/*
+	 * Isolate free pages until enough are available to migrate the
+	 * pages on cc->migratepages. We stop searching if the migrate
+	 * and free page scanners meet or enough free pages are isolated.
+	 */
+	spin_lock_irqsave(&zone->lock, flags);
+	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+					pfn -= pageblock_nr_pages) {
+		unsigned long isolated;
+
+		if (!pfn_valid(pfn))
+			continue;
+
+		/* 
+		 * Check for overlapping nodes/zones. It's possible on some
+		 * configurations to have a setup like
+		 * node0 node1 node0
+		 * i.e. it's possible that all pages within a zones range of
+		 * pages do not belong to a single zone.
+		 */
+		page = pfn_to_page(pfn);
+		if (page_zone(page) != zone)
+			continue;
+
+		/* Check the block is suitable for migration */
+		if (!suitable_migration_target(page))
+			continue;
+
+		/* Found a block suitable for isolating free pages from */
+		isolated = isolate_freepages_block(zone, pfn, freelist);
+		nr_freepages += isolated;
+
+		/*
+		 * Record the highest PFN we isolated pages from. When next
+		 * looking for free pages, the search will restart here as
+		 * page migration may have returned some pages to the allocator
+		 */
+		if (isolated)
+			high_pfn = max(high_pfn, pfn);
+	}
+	spin_unlock_irqrestore(&zone->lock, flags);
+
+	cc->free_pfn = high_pfn;
+	cc->nr_freepages = nr_freepages;
+}
+
+/* Update the number of anon and file isolated pages in the zone */
+static void acct_isolated(struct zone *zone, struct compact_control *cc)
+{
+	struct page *page;
+	unsigned int count[NR_LRU_LISTS] = { 0, };
+
+	list_for_each_entry(page, &cc->migratepages, lru) {
+		int lru = page_lru_base_type(page);
+		count[lru]++;
+	}
+
+	cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
+	cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
+	__mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon);
+	__mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file);
+}
+
+/* Similar to reclaim, but different enough that they don't share logic */
+static bool too_many_isolated(struct zone *zone)
+{
+
+	unsigned long inactive, isolated;
+
+	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
+					zone_page_state(zone, NR_INACTIVE_ANON);
+	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
+					zone_page_state(zone, NR_ISOLATED_ANON);
+
+	return isolated > inactive;
+}
+
+/*
+ * Isolate all pages that can be migrated from the block pointed to by
+ * the migrate scanner within compact_control.
+ */
+static unsigned long isolate_migratepages(struct zone *zone,
+					struct compact_control *cc)
+{
+	unsigned long low_pfn, end_pfn;
+	struct list_head *migratelist = &cc->migratepages;
+
+	/* Do not scan outside zone boundaries */
+	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
+
+	/* Only scan within a pageblock boundary */
+	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
+
+	/* Do not cross the free scanner or scan within a memory hole */
+	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
+		cc->migrate_pfn = end_pfn;
+		return 0;
+	}
+
+	/*
+	 * Ensure that there are not too many pages isolated from the LRU
+	 * list by either parallel reclaimers or compaction. If there are,
+	 * delay for some time until fewer pages are isolated
+	 */
+	while (unlikely(too_many_isolated(zone))) {
+		congestion_wait(BLK_RW_ASYNC, HZ/10);
+
+		if (fatal_signal_pending(current))
+			return 0;
+	}
+
+	/* Time to isolate some pages for migration */
+	spin_lock_irq(&zone->lru_lock);
+	for (; low_pfn < end_pfn; low_pfn++) {
+		struct page *page;
+		if (!pfn_valid_within(low_pfn))
+			continue;
+
+		/* Get the page and skip if free */
+		page = pfn_to_page(low_pfn);
+		if (PageBuddy(page)) {
+			low_pfn += (1 << page_order(page)) - 1;
+			continue;
+		}
+
+		/* Try isolate the page */
+		if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
+			continue;
+
+		/* Successfully isolated */
+		del_page_from_lru_list(zone, page, page_lru(page));
+		list_add(&page->lru, migratelist);
+		mem_cgroup_del_lru(page);
+		cc->nr_migratepages++;
+
+		/* Avoid isolating too much */
+		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+			break;
+	}
+
+	acct_isolated(zone, cc);
+
+	spin_unlock_irq(&zone->lru_lock);
+	cc->migrate_pfn = low_pfn;
+
+	return cc->nr_migratepages;
+}
+
+/*
+ * This is a migrate-callback that "allocates" freepages by taking pages
+ * from the isolated freelists in the block we are migrating to.
+ */
+static struct page *compaction_alloc(struct page *migratepage,
+					unsigned long data,
+					int **result)
+{
+	struct compact_control *cc = (struct compact_control *)data;
+	struct page *freepage;
+
+	/* Isolate free pages if necessary */
+	if (list_empty(&cc->freepages)) {
+		isolate_freepages(cc->zone, cc);
+
+		if (list_empty(&cc->freepages))
+			return NULL;
+	}
+
+	freepage = list_entry(cc->freepages.next, struct page, lru);
+	list_del(&freepage->lru);
+	cc->nr_freepages--;
+
+	return freepage;
+}
+
+/*
+ * We cannot control nr_migratepages and nr_freepages fully when migration is
+ * running as migrate_pages() has no knowledge of compact_control. When
+ * migration is complete, we count the number of pages on the lists by hand.
+ */
+static void update_nr_listpages(struct compact_control *cc)
+{
+	int nr_migratepages = 0;
+	int nr_freepages = 0;
+	struct page *page;
+
+	list_for_each_entry(page, &cc->migratepages, lru)
+		nr_migratepages++;
+	list_for_each_entry(page, &cc->freepages, lru)
+		nr_freepages++;
+
+	cc->nr_migratepages = nr_migratepages;
+	cc->nr_freepages = nr_freepages;
+}
+
+static inline int compact_finished(struct zone *zone,
+						struct compact_control *cc)
+{
+	if (fatal_signal_pending(current))
+		return COMPACT_PARTIAL;
+
+	/* Compaction run completes if the migrate and free scanner meet */
+	if (cc->free_pfn <= cc->migrate_pfn)
+		return COMPACT_COMPLETE;
+
+	return COMPACT_INCOMPLETE;
+}
+
+static int compact_zone(struct zone *zone, struct compact_control *cc)
+{
+	int ret;
+
+	/* Setup to move all movable pages to the end of the zone */
+	cc->migrate_pfn = zone->zone_start_pfn;
+	cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
+	cc->free_pfn &= ~(pageblock_nr_pages-1);
+
+	migrate_prep();
+
+	while ((ret = compact_finished(zone, cc)) == COMPACT_INCOMPLETE) {
+		unsigned long nr_migrate, nr_remaining;
+
+		if (!isolate_migratepages(zone, cc))
+			continue;
+
+		nr_migrate = cc->nr_migratepages;
+		migrate_pages(&cc->migratepages, compaction_alloc,
+						(unsigned long)cc, 0);
+		update_nr_listpages(cc);
+		nr_remaining = cc->nr_migratepages;
+
+		count_vm_event(COMPACTBLOCKS);
+		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
+		if (nr_remaining)
+			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
+
+		/* Release LRU pages not migrated */
+		if (!list_empty(&cc->migratepages)) {
+			putback_lru_pages(&cc->migratepages);
+			cc->nr_migratepages = 0;
+		}
+
+	}
+
+	/* Release free pages and check accounting */
+	cc->nr_freepages -= release_freepages(&cc->freepages);
+	VM_BUG_ON(cc->nr_freepages != 0);
+
+	return ret;
+}
+
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1191,6 +1191,45 @@ void split_page(struct page *page, unsig
 }
 
 /*
+ * Similar to split_page except the page is already free. As this is only
+ * being used for migration, the migratetype of the block also changes.
+ */
+int split_free_page(struct page *page)
+{
+	unsigned int order;
+	unsigned long watermark;
+	struct zone *zone;
+
+	BUG_ON(!PageBuddy(page));
+
+	zone = page_zone(page);
+	order = page_order(page);
+
+	/* Obey watermarks as if the page was being allocated */
+	watermark = low_wmark_pages(zone) + (1 << order);
+	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+		return 0;
+
+	/* Remove page from free list */
+	list_del(&page->lru);
+	zone->free_area[order].nr_free--;
+	rmv_page_order(page);
+	__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
+
+	/* Split into individual pages */
+	set_page_refcounted(page);
+	split_page(page, order);
+
+	if (order >= pageblock_order - 1) {
+		struct page *endpage = page + (1 << order) - 1;
+		for (; page < endpage; page += pageblock_nr_pages)
+			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+	}
+
+	return 1 << order;
+}
+
+/*
  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
  * or two.
diff --git a/mm/vmstat.c b/mm/vmstat.c
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -896,6 +896,11 @@ static const char * const vmstat_text[] 
 	"allocstall",
 
 	"pgrotated",
+
+	"compact_blocks_moved",
+	"compact_pages_moved",
+	"compact_pagemigrate_failed",
+
 #ifdef CONFIG_HUGETLB_PAGE
 	"htlb_buddy_alloc_success",
 	"htlb_buddy_alloc_fail",

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2010-04-08  2:57 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-04-08  1:50 [PATCH 00 of 67] Transparent Hugepage Support #18 Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 01 of 67] define MADV_HUGEPAGE Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 02 of 67] compound_lock Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 03 of 67] alter compound get_page/put_page Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 04 of 67] update futex compound knowledge Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 05 of 67] fix bad_page to show the real reason the page is bad Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 06 of 67] clear compound mapping Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 07 of 67] add native_set_pmd_at Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 08 of 67] add pmd paravirt ops Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 09 of 67] no paravirt version of pmd ops Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 10 of 67] export maybe_mkwrite Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 11 of 67] comment reminder in destroy_compound_page Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 12 of 67] config_transparent_hugepage Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 13 of 67] special pmd_trans_* functions Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 14 of 67] add pmd mangling generic functions Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 15 of 67] add pmd mangling functions to x86 Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 16 of 67] bail out gup_fast on splitting pmd Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 17 of 67] pte alloc trans splitting Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 18 of 67] add pmd mmu_notifier helpers Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 19 of 67] clear page compound Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 20 of 67] add pmd_huge_pte to mm_struct Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 21 of 67] This fixes some minor issues that bugged me while going over the code: Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 22 of 67] Split out functions to handle hugetlb ranges, pte ranges and unmapped Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 23 of 67] Instead of passing a start address and a number of pages into the helper Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 24 of 67] Do page table walks with the well-known nested loops we use in several Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 25 of 67] split_huge_page_mm/vma Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 26 of 67] split_huge_page paging Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 27 of 67] clear_copy_huge_page Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 28 of 67] kvm mmu transparent hugepage support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 29 of 67] _GFP_NO_KSWAPD Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 30 of 67] don't alloc harder for gfp nomemalloc even if nowait Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 31 of 67] transparent hugepage core Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 32 of 67] verify pmd_trans_huge isn't leaking Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 33 of 67] madvise(MADV_HUGEPAGE) Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 34 of 67] pmd_trans_huge migrate bugcheck Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 35 of 67] memcg compound Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 36 of 67] memcg huge memory Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 37 of 67] transparent hugepage vmstat Andrea Arcangeli
2010-04-08 11:53   ` Avi Kivity
2010-04-08  1:51 ` [PATCH 38 of 67] khugepaged Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 39 of 67] don't leave orhpaned swap cache after ksm merging Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 40 of 67] skip transhuge pages in ksm for now Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 41 of 67] remove PG_buddy Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 42 of 67] add x86 32bit support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 43 of 67] mincore transparent hugepage support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 44 of 67] add pmd_modify Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 45 of 67] mprotect: pass vma down to page table walkers Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 46 of 67] mprotect: transparent huge page support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 47 of 67] set recommended min free kbytes Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 48 of 67] remove lumpy_reclaim Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 49 of 67] Take a reference to the anon_vma before migrating Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 50 of 67] Do not try to migrate unmapped anonymous pages Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 51 of 67] Share the anon_vma ref counts between KSM and page migration Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 52 of 67] Allow CONFIG_MIGRATION to be set without CONFIG_NUMA or memory hot-remove Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 53 of 67] Export unusable free space index via /proc/unusable_index Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 54 of 67] Export fragmentation index via /proc/extfrag_index Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 55 of 67] Move definition for LRU isolation modes to a header Andrea Arcangeli
2010-04-08  1:51 ` Andrea Arcangeli [this message]
2010-04-08 16:18   ` [PATCH 56 of 67] Memory compaction core Johannes Weiner
2010-04-08 16:46     ` Andrea Arcangeli
2010-04-08 17:09       ` Andrea Arcangeli
2010-04-08 17:14         ` Andrea Arcangeli
2010-04-08 17:56           ` Johannes Weiner
2010-04-08 17:58             ` Andrea Arcangeli
2010-04-08 18:48               ` Johannes Weiner
2010-04-08 21:23                 ` Andrea Arcangeli
2010-04-08 21:32                   ` Andrea Arcangeli
2010-04-09 10:51                   ` Mel Gorman
2010-04-09 15:37                     ` Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 57 of 67] Add /proc trigger for memory compaction Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 58 of 67] Add /sys trigger for per-node " Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 59 of 67] Direct compact when a high-order allocation fails Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 60 of 67] Add a tunable that decides when memory should be compacted and when it should be reclaimed Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 61 of 67] Allow the migration of PageSwapCache pages Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 62 of 67] do not display compaction-related stats when !CONFIG_COMPACTION Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 63 of 67] disable migreate_prep() Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 64 of 67] page buddy can go away before reading page_order Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 65 of 67] select CONFIG_COMPACTION if TRANSPARENT_HUGEPAGE enabled Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 66 of 67] enable direct defrag Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 67 of 67] memcg fix prepare migration Andrea Arcangeli
2010-04-08  3:57   ` Daisuke Nishimura
2010-04-13  1:29     ` Andrew Morton
2010-04-09  8:13   ` KAMEZAWA Hiroyuki
2010-04-08  9:39 ` [PATCH 00 of 67] Transparent Hugepage Support #18 Avi Kivity
2010-04-08 11:44   ` Avi Kivity
2010-04-08 15:23     ` Andrea Arcangeli
2010-04-08 15:27       ` Avi Kivity
2010-04-08 16:02         ` Andrea Arcangeli
2010-04-08 15:32       ` Christoph Lameter
2010-04-08 23:17         ` Andrea Arcangeli
2010-04-09  8:45     ` Avi Kivity
2010-04-09 15:50       ` Andrea Arcangeli
2010-04-09 17:44         ` Avi Kivity
2010-04-09  2:05 ` Transparent Hugepage Support #19 Andrea Arcangeli
2010-04-09 15:43   ` Andrea Arcangeli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a86f1d01d86dffb4ab53.1270691499@v2.random \
    --to=aarcange@redhat.com \
    --cc=agl@us.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=avi@redhat.com \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=bpicco@redhat.com \
    --cc=chris.mason@oracle.com \
    --cc=chrisw@sous-sol.org \
    --cc=cl@linux-foundation.org \
    --cc=dave@linux.vnet.ibm.com \
    --cc=hannes@cmpxchg.org \
    --cc=hugh.dickins@tiscali.co.uk \
    --cc=ieidus@redhat.com \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=kosaki.motohiro@jp.fujitsu.com \
    --cc=linux-mm@kvack.org \
    --cc=mel@csn.ul.ie \
    --cc=mingo@elte.hu \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=nishimura@mxp.nes.nec.co.jp \
    --cc=npiggin@suse.de \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=travis@sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).