linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Tim Chen <tim.c.chen@linux.intel.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Vladimir Davydov <vdavydov@virtuozzo.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Michal Hocko <mhocko@suse.cz>, Minchan Kim <minchan@kernel.org>,
	Hugh Dickins <hughd@google.com>
Cc: "Kirill A.Shutemov" <kirill.shutemov@linux.intel.com>,
	Andi Kleen <andi@firstfloor.org>, Aaron Lu <aaron.lu@intel.com>,
	Huang Ying <ying.huang@intel.com>, linux-mm <linux-mm@kvack.org>,
	linux-kernel@vger.kernel.org
Subject: [PATCH 2/7] mm: Group the processing of anonymous pages to be swapped in shrink_page_list
Date: Tue, 03 May 2016 14:01:38 -0700	[thread overview]
Message-ID: <1462309298.21143.9.camel@linux.intel.com> (raw)
In-Reply-To: <cover.1462306228.git.tim.c.chen@linux.intel.com>

This is a clean up patch to reorganize the processing of anonymous
pages in shrink_page_list.

We delay the processing of swapping anonymous pages in shrink_page_list
and put them together on a separate list.A A This prepares for batching
of pages to be swapped.A A The processing of the list of anonymous pages
to be swapped is consolidated in the function shrink_anon_page_list.

Functionally, there is no change in the logic of how pages are processed,
just the order of processing of the anonymous pages and file mapped
pages in shrink_page_list.

Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
A mm/vmscan.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++----
A 1 file changed, 77 insertions(+), 5 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5542005..132ba02 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1083,6 +1083,58 @@ static void pg_finish(struct page *page,
A 	}
A }
A 
+static unsigned long shrink_anon_page_list(struct list_head *page_list,
+	struct zone *zone,
+	struct scan_control *sc,
+	struct list_head *swap_pages,
+	struct list_head *ret_pages,
+	struct list_head *free_pages,
+	enum ttu_flags ttu_flags,
+	int *pgactivate,
+	int n,
+	bool clean)
+{
+	unsigned long nr_reclaimed = 0;
+	enum pg_result pg_dispose;
+
+	while (n > 0) {
+		struct page *page;
+		int swap_ret = SWAP_SUCCESS;
+
+		--n;
+		if (list_empty(swap_pages))
+		A A A A A A A return nr_reclaimed;
+
+		page = lru_to_page(swap_pages);
+
+		list_del(&page->lru);
+
+		/*
+		* Anonymous process memory has backing store?
+		* Try to allocate it some swap space here.
+		*/
+
+		if (!add_to_swap(page, page_list)) {
+			pg_finish(page, PG_ACTIVATE_LOCKED, swap_ret, &nr_reclaimed,
+					pgactivate, ret_pages, free_pages);
+			continue;
+		}
+
+		if (clean)
+			pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
+				PAGEREF_RECLAIM_CLEAN, true, true, &swap_ret, page);
+		else
+			pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
+				PAGEREF_RECLAIM, true, true, &swap_ret, page);
+
+		pg_finish(page, pg_dispose, swap_ret, &nr_reclaimed,
+				pgactivate, ret_pages, free_pages);
+	}
+	return nr_reclaimed;
+}
+
+
+
A /*
A  * shrink_page_list() returns the number of reclaimed pages
A  */
@@ -1099,6 +1151,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
A {
A 	LIST_HEAD(ret_pages);
A 	LIST_HEAD(free_pages);
+	LIST_HEAD(swap_pages);
+	LIST_HEAD(swap_pages_clean);
A 	int pgactivate = 0;
A 	unsigned long nr_unqueued_dirty = 0;
A 	unsigned long nr_dirty = 0;
@@ -1106,6 +1160,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
A 	unsigned long nr_reclaimed = 0;
A 	unsigned long nr_writeback = 0;
A 	unsigned long nr_immediate = 0;
+	unsigned long nr_swap = 0;
+	unsigned long nr_swap_clean = 0;
A 
A 	cond_resched();
A 
@@ -1271,12 +1327,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
A 				pg_dispose = PG_KEEP_LOCKED;
A 				goto finish;
A 			}
-			if (!add_to_swap(page, page_list)) {
-				pg_dispose = PG_ACTIVATE_LOCKED;
-				goto finish;
+			if (references == PAGEREF_RECLAIM_CLEAN) {
+				list_add(&page->lru, &swap_pages_clean);
+				++nr_swap_clean;
+			} else {
+				list_add(&page->lru, &swap_pages);
+				++nr_swap;
A 			}
-			lazyfree = true;
-			may_enter_fs = 1;
+
+			pg_dispose = PG_NEXT;
+			goto finish;
+
A 		}
A 
A 		pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
@@ -1288,6 +1349,17 @@ finish:
A 
A 	}
A 
+	nr_reclaimed += shrink_anon_page_list(page_list, zone, sc,
+						&swap_pages_clean, &ret_pages,
+						&free_pages, ttu_flags,
+						&pgactivate, nr_swap_clean,
+						true);
+	nr_reclaimed += shrink_anon_page_list(page_list, zone, sc,
+						&swap_pages, &ret_pages,
+						&free_pages, ttu_flags,
+						&pgactivate, nr_swap,
+						false);
+
A 	mem_cgroup_uncharge_list(&free_pages);
A 	try_to_unmap_flush();
A 	free_hot_cold_page_list(&free_pages, true);
--A 
2.5.5

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-05-03 21:01 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <cover.1462306228.git.tim.c.chen@linux.intel.com>
2016-05-03 21:00 ` [PATCH 0/7] mm: Improve swap path scalability with batched operations Tim Chen
2016-05-04 12:45   ` Michal Hocko
2016-05-04 17:13     ` Tim Chen
2016-05-04 19:49       ` Michal Hocko
2016-05-04 21:05         ` Andi Kleen
2016-05-04 21:25         ` Johannes Weiner
2016-05-05  0:08           ` Minchan Kim
2016-05-05  7:49           ` Michal Hocko
2016-05-05 15:56             ` Tim Chen
2016-05-03 21:01 ` [PATCH 1/7] mm: Cleanup - Reorganize the shrink_page_list code into smaller functions Tim Chen
2016-05-27 16:40   ` Tim Chen
2016-05-30  8:48     ` Michal Hocko
2016-05-03 21:01 ` Tim Chen [this message]
2016-05-03 21:02 ` [PATCH 3/7] mm: Add new functions to allocate swap slots in batches Tim Chen
2016-05-03 21:02 ` [PATCH 4/7] mm: Shrink page list batch allocates swap slots for page swapping Tim Chen
2016-05-03 21:02 ` [PATCH 5/7] mm: Batch addtion of pages to swap cache Tim Chen
2016-05-03 21:03 ` [PATCH 6/7] mm: Cleanup - Reorganize code to group handling of page Tim Chen
2016-05-03 21:03 ` [PATCH 7/7] mm: Batch unmapping of pages that are in swap cache Tim Chen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1462309298.21143.9.camel@linux.intel.com \
    --to=tim.c.chen@linux.intel.com \
    --cc=aaron.lu@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=andi@firstfloor.org \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.cz \
    --cc=minchan@kernel.org \
    --cc=vdavydov@virtuozzo.com \
    --cc=ying.huang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).