linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/5] page migration: ifdef out code
@ 2006-05-09  6:51 Christoph Lameter
  2006-05-09  6:51 ` [PATCH 2/5] page migration: Update comments Christoph Lameter
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Christoph Lameter @ 2006-05-09  6:51 UTC (permalink / raw)
  To: akpm; +Cc: linux-mm, Christoph Lameter

ifdef around migration codeClean up various minor things.

Put #ifdef CONFIG_MIGRATION around two locations that would
generate code for the non migration case.

Signed-off-by: Christoph Lameter <clameter@sgi.com>

Index: linux-2.6.17-rc3-mm1/mm/mprotect.c
===================================================================
--- linux-2.6.17-rc3-mm1.orig/mm/mprotect.c	2006-05-01 09:48:43.582547283 -0700
+++ linux-2.6.17-rc3-mm1/mm/mprotect.c	2006-05-04 22:55:27.738093185 -0700
@@ -45,6 +45,7 @@ static void change_pte_range(struct mm_s
 			ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
 			set_pte_at(mm, addr, pte, ptent);
 			lazy_mmu_prot_update(ptent);
+#ifdef CONFIG_MIGRATION
 		} else if (!pte_file(oldpte)) {
 			swp_entry_t entry = pte_to_swp_entry(oldpte);
 
@@ -57,6 +58,7 @@ static void change_pte_range(struct mm_s
 				set_pte_at(mm, addr, pte,
 					swp_entry_to_pte(entry));
 			}
+#endif
 		}
 
 	} while (pte++, addr += PAGE_SIZE, addr != end);
Index: linux-2.6.17-rc3-mm1/mm/rmap.c
===================================================================
--- linux-2.6.17-rc3-mm1.orig/mm/rmap.c	2006-05-01 09:48:43.606959827 -0700
+++ linux-2.6.17-rc3-mm1/mm/rmap.c	2006-05-04 22:55:27.739069687 -0700
@@ -596,6 +596,7 @@ static int try_to_unmap_one(struct page 
 				spin_unlock(&mmlist_lock);
 			}
 			dec_mm_counter(mm, anon_rss);
+#ifdef CONFIG_MIGRATION
 		} else {
 			/*
 			 * Store the pfn of the page in a special migration
@@ -604,17 +605,21 @@ static int try_to_unmap_one(struct page 
 			 */
 			BUG_ON(!migration);
 			entry = make_migration_entry(page, pte_write(pteval));
+#endif
 		}
 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
 		BUG_ON(pte_file(*pte));
-	} else if (!migration)
-		dec_mm_counter(mm, file_rss);
-	else {
+	} else
+#ifdef CONFIG_MIGRATION
+	if (migration) {
 		/* Establish migration entry for a file page */
 		swp_entry_t entry;
 		entry = make_migration_entry(page, pte_write(pteval));
 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
-	}
+	} else
+#endif
+		dec_mm_counter(mm, file_rss);
+
 
 	page_remove_rmap(page);
 	page_cache_release(page);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 2/5] page migration: Update comments
  2006-05-09  6:51 [PATCH 1/5] page migration: ifdef out code Christoph Lameter
@ 2006-05-09  6:51 ` Christoph Lameter
  2006-05-09  6:51 ` [PATCH 3/5] page migration: Remove useless mapping checks Christoph Lameter
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2006-05-09  6:51 UTC (permalink / raw)
  To: akpm; +Cc: linux-mm, Christoph Lameter

Fix comments

Edit comments to be nicer better and reflect the current state
of page migration.

Remove useless BUG_ON() in migrate_entry_wait() since
migration_entry_to_page() already does a BUG_ON().

Signed-off-by: Christoph Lameter <clameter@sgi.com>

Index: linux-2.6.17-rc3-mm1/include/linux/swap.h
===================================================================
--- linux-2.6.17-rc3-mm1.orig/include/linux/swap.h	2006-05-01 09:48:43.178275544 -0700
+++ linux-2.6.17-rc3-mm1/include/linux/swap.h	2006-05-04 22:55:37.540218299 -0700
@@ -32,7 +32,7 @@ static inline int current_is_kswapd(void
 #ifndef CONFIG_MIGRATION
 #define MAX_SWAPFILES		(1 << MAX_SWAPFILES_SHIFT)
 #else
-/* Use last entry for page migration swap entries */
+/* Use last two entries for page migration swap entries */
 #define MAX_SWAPFILES		((1 << MAX_SWAPFILES_SHIFT)-2)
 #define SWP_MIGRATION_READ	MAX_SWAPFILES
 #define SWP_MIGRATION_WRITE	(MAX_SWAPFILES + 1)
Index: linux-2.6.17-rc3-mm1/mm/migrate.c
===================================================================
--- linux-2.6.17-rc3-mm1.orig/mm/migrate.c	2006-05-01 09:48:43.581570781 -0700
+++ linux-2.6.17-rc3-mm1/mm/migrate.c	2006-05-04 22:55:37.541194801 -0700
@@ -121,8 +121,7 @@ static inline int is_swap_pte(pte_t pte)
 }
 
 /*
- * Restore a potential migration pte to a working pte entry for
- * anonymous pages.
+ * Restore a potential migration pte to a working pte entry
  */
 static void remove_migration_pte(struct vm_area_struct *vma, unsigned long addr,
 		struct page *old, struct page *new)
@@ -181,9 +180,8 @@ out:
 }
 
 /*
- * Note that remove_file_migration_ptes will only work on regular mappings
- * specialized other mappings will simply be unmapped and do not use
- * migration entries.
+ * Note that remove_file_migration_ptes will only work on regular mappings,
+ * Nonlinear mappings do not use migration entries.
  */
 static void remove_file_migration_ptes(struct page *old, struct page *new)
 {
@@ -269,9 +267,6 @@ void migration_entry_wait(struct mm_stru
 
 	page = migration_entry_to_page(entry);
 
-	/* Pages with migration entries are always locked */
-	BUG_ON(!PageLocked(page));
-
 	get_page(page);
 	pte_unmap_unlock(ptep, ptl);
 	wait_on_page_locked(page);
@@ -282,7 +277,7 @@ out:
 }
 
 /*
- * Remove or replace the page in the mapping.
+ * Replace the page in the mapping.
  *
  * The number of remaining references must be:
  * 1 for anonymous pages without a mapping
@@ -466,19 +461,20 @@ int buffer_migrate_page(struct address_s
 }
 EXPORT_SYMBOL(buffer_migrate_page);
 
+/*
+ * Default handling if a filesystem does not provide a migration function.
+ */
 static int fallback_migrate_page(struct address_space *mapping,
 	struct page *newpage, struct page *page)
 {
-	/*
-	 * Default handling if a filesystem does not provide
-	 * a migration function. We can only migrate clean
-	 * pages so try to write out any dirty pages first.
-	 */
 	if (PageDirty(page)) {
 		/*
-		 * Remove the migration entries because pageout() may
-		 * unlock which may result in migration entries pointing
-		 * to unlocked pages.
+		 * A dirty page may imply that the underlying filesystem has
+		 * the page on some queue. So the page must be clean for
+		 * migration. Writeout may mean we loose the lock and the
+		 * page state is no longer what we checked for earlier.
+		 * At this point we know that the migration attempt cannot
+		 * be successful.
 		 */
 		remove_migration_ptes(page, page);
 
@@ -490,7 +486,7 @@ static int fallback_migrate_page(struct 
 	}
 
 	/*
-	 * Buffers are managed in a filesystem specific way.
+	 * Buffers may be managed in a filesystem specific way.
 	 * We must have no buffers or drop them.
 	 */
 	if (page_has_buffers(page) &&
Index: linux-2.6.17-rc3-mm1/Documentation/vm/page_migration
===================================================================
--- linux-2.6.17-rc3-mm1.orig/Documentation/vm/page_migration	2006-04-26 19:19:25.000000000 -0700
+++ linux-2.6.17-rc3-mm1/Documentation/vm/page_migration	2006-05-07 22:34:33.385854538 -0700
@@ -62,15 +62,15 @@ A. In kernel use of migrate_pages()
    It also prevents the swapper or other scans to encounter
    the page.
 
-2. Generate a list of newly allocates page. These pages will contain the
+2. Generate a list of newly allocates pages. These pages will contain the
    contents of the pages from the first list after page migration is
    complete.
 
 3. The migrate_pages() function is called which attempts
    to do the migration. It returns the moved pages in the
    list specified as the third parameter and the failed
-   migrations in the fourth parameter. The first parameter
-   will contain the pages that could still be retried.
+   migrations in the fourth parameter. When the function
+   returns the first list will contain the pages that could still be retried.
 
 4. The leftover pages of various types are returned
    to the LRU using putback_to_lru_pages() or otherwise
@@ -93,83 +93,58 @@ Steps:
 
 2. Insure that writeback is complete.
 
-3. Make sure that the page has assigned swap cache entry if
-   it is an anonyous page. The swap cache reference is necessary
-   to preserve the information contain in the page table maps while
-   page migration occurs.
-
-4. Prep the new page that we want to move to. It is locked
+3. Prep the new page that we want to move to. It is locked
    and set to not being uptodate so that all accesses to the new
    page immediately lock while the move is in progress.
 
-5. All the page table references to the page are either dropped (file
-   backed pages) or converted to swap references (anonymous pages).
-   This should decrease the reference count.
+4. The new page is prepped with some settings from the old page so that
+   accesses to the new page will discover a page with the correct settings.
+
+5. All the page table references to the page are converted
+   to migration entries or dropped (nonlinear vmas).
+   This decrease the mapcount of a page. If the resulting
+   mapcount is not zero then we do not migrate the page.
+   All user space processes that attempt to access the page
+   will now wait on the page lock.
 
 6. The radix tree lock is taken. This will cause all processes trying
-   to reestablish a pte to block on the radix tree spinlock.
+   to access the page via the mapping to block on the radix tree spinlock.
 
 7. The refcount of the page is examined and we back out if references remain
    otherwise we know that we are the only one referencing this page.
 
 8. The radix tree is checked and if it does not contain the pointer to this
-   page then we back out because someone else modified the mapping first.
-
-9. The mapping is checked. If the mapping is gone then a truncate action may
-   be in progress and we back out.
+   page then we back out because someone else modified the radix tree.
 
-10. The new page is prepped with some settings from the old page so that
-   accesses to the new page will be discovered to have the correct settings.
+9. The radix tree is changed to point to the new page.
 
-11. The radix tree is changed to point to the new page.
+10. The reference count of the old page is dropped because the radix tree
+    reference is gone. A reference to the new page is established because
+    the new page is referenced to by the radix tree.
 
-12. The reference count of the old page is dropped because the radix tree
-    reference is gone.
+11. The radix tree lock is dropped. With that lookups in the mapping
+    become possible again. Processes will move from spinning on the tree_lock
+    to sleeping on the locked new page.
 
-13. The radix tree lock is dropped. With that lookups become possible again
-    and other processes will move from spinning on the tree lock to sleeping on
-    the locked new page.
+12. The page contents are copied to the new page.
 
-14. The page contents are copied to the new page.
+13. The remaining page flags are copied to the new page.
 
-15. The remaining page flags are copied to the new page.
+14. The old page flags are cleared to indicate that the page does
+    not provide any information anymore.
 
-16. The old page flags are cleared to indicate that the page does
-    not use any information anymore.
+15. Queued up writeback on the new page is triggered.
 
-17. Queued up writeback on the new page is triggered.
-
-18. If swap pte's were generated for the page then replace them with real
-    ptes. This will reenable access for processes not blocked by the page lock.
+16. If migration entries were page then replace them with real ptes. Doing
+    so will enable access for user space processes not already waiting for
+    the page lock.
 
 19. The page locks are dropped from the old and new page.
-    Processes waiting on the page lock can continue.
+    Processes waiting on the page lock will redo their page faults
+    and will reach the new page.
 
 20. The new page is moved to the LRU and can be scanned by the swapper
     etc again.
 
-TODO list
----------
-
-- Page migration requires the use of swap handles to preserve the
-  information of the anonymous page table entries. This means that swap
-  space is reserved but never used. The maximum number of swap handles used
-  is determined by CHUNK_SIZE (see mm/mempolicy.c) per ongoing migration.
-  Reservation of pages could be avoided by having a special type of swap
-  handle that does not require swap space and that would only track the page
-  references. Something like that was proposed by Marcelo Tosatti in the
-  past (search for migration cache on lkml or linux-mm@kvack.org).
-
-- Page migration unmaps ptes for file backed pages and requires page
-  faults to reestablish these ptes. This could be optimized by somehow
-  recording the references before migration and then reestablish them later.
-  However, there are several locking challenges that have to be overcome
-  before this is possible.
-
-- Page migration generates read ptes for anonymous pages. Dirty page
-  faults are required to make the pages writable again. It may be possible
-  to generate a pte marked dirty if it is known that the page is dirty and
-  that this process has the only reference to that page.
-
-Christoph Lameter, March 8, 2006.
+Christoph Lameter, May 8, 2006.
 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 3/5] page migration: Remove useless mapping checks
  2006-05-09  6:51 [PATCH 1/5] page migration: ifdef out code Christoph Lameter
  2006-05-09  6:51 ` [PATCH 2/5] page migration: Update comments Christoph Lameter
@ 2006-05-09  6:51 ` Christoph Lameter
  2006-05-09  6:52 ` [PATCH 4/5] page migration: Fix up remove_migration_ptes() Christoph Lameter
  2006-05-09  6:52 ` [PATCH 5/5] page migration: Replace call to pageout() with writepage() Christoph Lameter
  3 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2006-05-09  6:51 UTC (permalink / raw)
  To: akpm; +Cc: linux-mm, Christoph Lameter

Remove another check for mapping

Page migration still checked for mapping being NULL after
taking the tree_lock. However the mapping never changes for a locked page.
Remove two more checks for mapping being NULL.

Signed-off-by: Christoph Lameter <clameter@sgi.com>

Index: linux-2.6.17-rc3/mm/migrate.c
===================================================================
--- linux-2.6.17-rc3.orig/mm/migrate.c	2006-04-30 22:45:53.794977846 -0700
+++ linux-2.6.17-rc3/mm/migrate.c	2006-05-03 21:49:52.957619975 -0700
@@ -291,7 +291,7 @@
 
 	if (!mapping) {
 		/* Anonymous page */
-		if (page_count(page) != 1 || !page->mapping)
+		if (page_count(page) != 1)
 			return -EAGAIN;
 		return 0;
 	}
@@ -302,8 +302,7 @@
 						&mapping->page_tree,
 						page_index(page));
 
-	if (!page_mapping(page) ||
-			page_count(page) != 2 + !!PagePrivate(page) ||
+	if (page_count(page) != 2 + !!PagePrivate(page) ||
 			*radix_pointer != page) {
 		write_unlock_irq(&mapping->tree_lock);
 		return -EAGAIN;

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 4/5] page migration: Fix up remove_migration_ptes()
  2006-05-09  6:51 [PATCH 1/5] page migration: ifdef out code Christoph Lameter
  2006-05-09  6:51 ` [PATCH 2/5] page migration: Update comments Christoph Lameter
  2006-05-09  6:51 ` [PATCH 3/5] page migration: Remove useless mapping checks Christoph Lameter
@ 2006-05-09  6:52 ` Christoph Lameter
  2006-05-09  6:52 ` [PATCH 5/5] page migration: Replace call to pageout() with writepage() Christoph Lameter
  3 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2006-05-09  6:52 UTC (permalink / raw)
  To: akpm; +Cc: linux-mm, Christoph Lameter

Fix up remove_migration_ptes()

Add the update_mmu/lazy_mmu_update() calls that most arches need
and that IA64 needs for executable pages.

Also move the call to page_address_in_vma into remove_migrate_pte()
and check for the possible -EFAULT return code.

Signed-off-by: Christoph Lameter <clameter@sgi.com>

Index: linux-2.6.17-rc3-mm1/mm/migrate.c
===================================================================
--- linux-2.6.17-rc3-mm1.orig/mm/migrate.c	2006-05-08 01:46:23.369211137 -0700
+++ linux-2.6.17-rc3-mm1/mm/migrate.c	2006-05-08 23:11:42.859814459 -0700
@@ -123,7 +123,7 @@ static inline int is_swap_pte(pte_t pte)
 /*
  * Restore a potential migration pte to a working pte entry
  */
-static void remove_migration_pte(struct vm_area_struct *vma, unsigned long addr,
+static void remove_migration_pte(struct vm_area_struct *vma,
 		struct page *old, struct page *new)
 {
 	struct mm_struct *mm = vma->vm_mm;
@@ -133,6 +133,10 @@ static void remove_migration_pte(struct 
  	pmd_t *pmd;
 	pte_t *ptep, pte;
  	spinlock_t *ptl;
+	unsigned long addr = page_address_in_vma(new, vma);
+
+	if (addr == -EFAULT)
+		return;
 
  	pgd = pgd_offset(mm, addr);
 	if (!pgd_present(*pgd))
@@ -175,6 +179,10 @@ static void remove_migration_pte(struct 
 	else
 		page_add_file_rmap(new);
 
+	/* No need to invalidate - it was non-present before */
+	update_mmu_cache(vma, addr, pte);
+	lazy_mmu_prot_update(pte);
+
 out:
 	pte_unmap_unlock(ptep, ptl);
 }
@@ -196,7 +204,7 @@ static void remove_file_migration_ptes(s
 	spin_lock(&mapping->i_mmap_lock);
 
 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
-		remove_migration_pte(vma, page_address_in_vma(new, vma), old, new);
+		remove_migration_pte(vma, old, new);
 
 	spin_unlock(&mapping->i_mmap_lock);
 }
@@ -223,8 +231,7 @@ static void remove_anon_migration_ptes(s
 	spin_lock(&anon_vma->lock);
 
 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
-		remove_migration_pte(vma, page_address_in_vma(new, vma),
-					old, new);
+		remove_migration_pte(vma, old, new);
 
 	spin_unlock(&anon_vma->lock);
 }

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 5/5] page migration: Replace call to pageout() with writepage()
  2006-05-09  6:51 [PATCH 1/5] page migration: ifdef out code Christoph Lameter
                   ` (2 preceding siblings ...)
  2006-05-09  6:52 ` [PATCH 4/5] page migration: Fix up remove_migration_ptes() Christoph Lameter
@ 2006-05-09  6:52 ` Christoph Lameter
  3 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2006-05-09  6:52 UTC (permalink / raw)
  To: akpm; +Cc: linux-mm, Christoph Lameter

page migration: Do not use pageout() but writepage() for fallback.

Migration cannot use pageout for fallback since the migration entries have
to be removed before calling writepage. writepage (and therefore pageout)
may drop the lock and expose migration entries. Removing migration
entries in turn increases the mapcount which results in pageout()
not writing out the page. sigh.

This problem was re-introduced with the use of migration entries for file
backed pages.

Implement our own writeout() function (this approach was posted already last
week but not included in the patch reorg) and undo the export of pageout()
since page migration was the only user of pageout().

Also remove a definition for remove_vma_swap() that was somehow left over
from earlier changes.

Signed-off-by: Christoph Lameter <clameter@sgi.com>

Index: linux-2.6.17-rc3-mm1/mm/migrate.c
===================================================================
--- linux-2.6.17-rc3-mm1.orig/mm/migrate.c	2006-05-08 23:11:42.859814459 -0700
+++ linux-2.6.17-rc3-mm1/mm/migrate.c	2006-05-08 23:13:15.904821312 -0700
@@ -24,6 +24,7 @@
 #include <linux/topology.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
+#include <linux/writeback.h>
 
 #include "internal.h"
 
@@ -468,28 +469,58 @@ int buffer_migrate_page(struct address_s
 EXPORT_SYMBOL(buffer_migrate_page);
 
 /*
- * Default handling if a filesystem does not provide a migration function.
+ * Writeback a page to clean the dirty state
  */
-static int fallback_migrate_page(struct address_space *mapping,
-	struct page *newpage, struct page *page)
+static int writeout(struct address_space *mapping, struct page *page)
 {
-	if (PageDirty(page)) {
-		/*
-		 * A dirty page may imply that the underlying filesystem has
-		 * the page on some queue. So the page must be clean for
-		 * migration. Writeout may mean we loose the lock and the
-		 * page state is no longer what we checked for earlier.
-		 * At this point we know that the migration attempt cannot
-		 * be successful.
-		 */
-		remove_migration_ptes(page, page);
+	struct writeback_control wbc = {
+		.sync_mode = WB_SYNC_NONE,
+		.nr_to_write = 1,
+		.range_start = 0,
+		.range_end = LLONG_MAX,
+		.nonblocking = 1,
+		.for_reclaim = 1
+	};
+	int rc;
 
-		if (pageout(page, mapping) == PAGE_SUCCESS)
-			/* unlocked. Relock */
-			lock_page(page);
+	if (!mapping->a_ops->writepage)
+		/* No write method for the address space */
+		return -EINVAL;
 
+	if (!clear_page_dirty_for_io(page))
+		/* Someone else already triggered a write */
 		return -EAGAIN;
-	}
+
+	/*
+	 * A dirty page may imply that the underlying filesystem has
+	 * the page on some queue. So the page must be clean for
+	 * migration. Writeout may mean we loose the lock and the
+	 * page state is no longer what we checked for earlier.
+	 * At this point we know that the migration attempt cannot
+	 * be successful.
+	 */
+	remove_migration_ptes(page, page);
+
+	rc = mapping->a_ops->writepage(page, &wbc);
+	if (rc < 0)
+		/* I/O Error writing */
+		return -EIO;
+
+	if (rc != AOP_WRITEPAGE_ACTIVATE)
+		/* unlocked. Relock */
+		lock_page(page);
+
+	return -EAGAIN;
+}
+
+/*
+ * Default handling if a filesystem does not provide a migration function.
+ */
+static int fallback_migrate_page(struct address_space *mapping,
+	struct page *newpage, struct page *page)
+{
+	if (PageDirty(page))
+		return writeout(mapping, page);
 
 	/*
 	 * Buffers may be managed in a filesystem specific way.
Index: linux-2.6.17-rc3-mm1/include/linux/swap.h
===================================================================
--- linux-2.6.17-rc3-mm1.orig/include/linux/swap.h	2006-05-08 01:44:39.629546798 -0700
+++ linux-2.6.17-rc3-mm1/include/linux/swap.h	2006-05-08 23:13:15.960481920 -0700
@@ -188,20 +188,6 @@ extern unsigned long shrink_all_memory(u
 extern int vm_swappiness;
 extern int remove_mapping(struct address_space *mapping, struct page *page);
 
-/* possible outcome of pageout() */
-typedef enum {
-	/* failed to write page out, page is locked */
-	PAGE_KEEP,
-	/* move page to the active list, page is locked */
-	PAGE_ACTIVATE,
-	/* page has been sent to the disk successfully, page is unlocked */
-	PAGE_SUCCESS,
-	/* page is clean and locked */
-	PAGE_CLEAN,
-} pageout_t;
-
-extern pageout_t pageout(struct page *page, struct address_space *mapping);
-
 #ifdef CONFIG_NUMA
 extern int zone_reclaim_mode;
 extern int zone_reclaim_interval;
@@ -264,7 +250,6 @@ extern int remove_exclusive_swap_page(st
 struct backing_dev_info;
 
 extern spinlock_t swap_lock;
-extern int remove_vma_swap(struct vm_area_struct *vma, struct page *page);
 
 /* linux/mm/thrash.c */
 extern struct mm_struct * swap_token_mm;
Index: linux-2.6.17-rc3-mm1/mm/vmscan.c
===================================================================
--- linux-2.6.17-rc3-mm1.orig/mm/vmscan.c	2006-05-08 00:48:20.410392574 -0700
+++ linux-2.6.17-rc3-mm1/mm/vmscan.c	2006-05-08 23:13:15.975129449 -0700
@@ -291,11 +291,23 @@ static void handle_write_error(struct ad
 	unlock_page(page);
 }
 
+/* possible outcome of pageout() */
+typedef enum {
+	/* failed to write page out, page is locked */
+	PAGE_KEEP,
+	/* move page to the active list, page is locked */
+	PAGE_ACTIVATE,
+	/* page has been sent to the disk successfully, page is unlocked */
+	PAGE_SUCCESS,
+	/* page is clean and locked */
+	PAGE_CLEAN,
+} pageout_t;
+
 /*
  * pageout is called by shrink_page_list() for each dirty page.
  * Calls ->writepage().
  */
-pageout_t pageout(struct page *page, struct address_space *mapping)
+static pageout_t pageout(struct page *page, struct address_space *mapping)
 {
 	/*
 	 * If the page is dirty, only perform writeback if that write

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2006-05-09  6:52 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-05-09  6:51 [PATCH 1/5] page migration: ifdef out code Christoph Lameter
2006-05-09  6:51 ` [PATCH 2/5] page migration: Update comments Christoph Lameter
2006-05-09  6:51 ` [PATCH 3/5] page migration: Remove useless mapping checks Christoph Lameter
2006-05-09  6:52 ` [PATCH 4/5] page migration: Fix up remove_migration_ptes() Christoph Lameter
2006-05-09  6:52 ` [PATCH 5/5] page migration: Replace call to pageout() with writepage() Christoph Lameter

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).