From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756635AbcECVBo (ORCPT ); Tue, 3 May 2016 17:01:44 -0400 Received: from mga03.intel.com ([134.134.136.65]:13119 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756478AbcECVBn (ORCPT ); Tue, 3 May 2016 17:01:43 -0400 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.24,574,1455004800"; d="scan'208";a="798050804" Message-ID: <1462309298.21143.9.camel@linux.intel.com> Subject: [PATCH 2/7] mm: Group the processing of anonymous pages to be swapped in shrink_page_list From: Tim Chen To: Andrew Morton , Vladimir Davydov , Johannes Weiner , Michal Hocko , Minchan Kim , Hugh Dickins Cc: "Kirill A.Shutemov" , Andi Kleen , Aaron Lu , Huang Ying , linux-mm , linux-kernel@vger.kernel.org Date: Tue, 03 May 2016 14:01:38 -0700 In-Reply-To: References: Content-Type: text/plain; charset="UTF-8" X-Mailer: Evolution 3.18.5.2 (3.18.5.2-1.fc23) Mime-Version: 1.0 Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This is a clean up patch to reorganize the processing of anonymous pages in shrink_page_list. We delay the processing of swapping anonymous pages in shrink_page_list and put them together on a separate list.  This prepares for batching of pages to be swapped.  The processing of the list of anonymous pages to be swapped is consolidated in the function shrink_anon_page_list. Functionally, there is no change in the logic of how pages are processed, just the order of processing of the anonymous pages and file mapped pages in shrink_page_list. Signed-off-by: Tim Chen ---  mm/vmscan.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++----  1 file changed, 77 insertions(+), 5 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 5542005..132ba02 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1083,6 +1083,58 @@ static void pg_finish(struct page *page,   }  }   +static unsigned long shrink_anon_page_list(struct list_head *page_list, + struct zone *zone, + struct scan_control *sc, + struct list_head *swap_pages, + struct list_head *ret_pages, + struct list_head *free_pages, + enum ttu_flags ttu_flags, + int *pgactivate, + int n, + bool clean) +{ + unsigned long nr_reclaimed = 0; + enum pg_result pg_dispose; + + while (n > 0) { + struct page *page; + int swap_ret = SWAP_SUCCESS; + + --n; + if (list_empty(swap_pages)) +        return nr_reclaimed; + + page = lru_to_page(swap_pages); + + list_del(&page->lru); + + /* + * Anonymous process memory has backing store? + * Try to allocate it some swap space here. + */ + + if (!add_to_swap(page, page_list)) { + pg_finish(page, PG_ACTIVATE_LOCKED, swap_ret, &nr_reclaimed, + pgactivate, ret_pages, free_pages); + continue; + } + + if (clean) + pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags, + PAGEREF_RECLAIM_CLEAN, true, true, &swap_ret, page); + else + pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags, + PAGEREF_RECLAIM, true, true, &swap_ret, page); + + pg_finish(page, pg_dispose, swap_ret, &nr_reclaimed, + pgactivate, ret_pages, free_pages); + } + return nr_reclaimed; +} + + +  /*   * shrink_page_list() returns the number of reclaimed pages   */ @@ -1099,6 +1151,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,  {   LIST_HEAD(ret_pages);   LIST_HEAD(free_pages); + LIST_HEAD(swap_pages); + LIST_HEAD(swap_pages_clean);   int pgactivate = 0;   unsigned long nr_unqueued_dirty = 0;   unsigned long nr_dirty = 0; @@ -1106,6 +1160,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,   unsigned long nr_reclaimed = 0;   unsigned long nr_writeback = 0;   unsigned long nr_immediate = 0; + unsigned long nr_swap = 0; + unsigned long nr_swap_clean = 0;     cond_resched();   @@ -1271,12 +1327,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,   pg_dispose = PG_KEEP_LOCKED;   goto finish;   } - if (!add_to_swap(page, page_list)) { - pg_dispose = PG_ACTIVATE_LOCKED; - goto finish; + if (references == PAGEREF_RECLAIM_CLEAN) { + list_add(&page->lru, &swap_pages_clean); + ++nr_swap_clean; + } else { + list_add(&page->lru, &swap_pages); + ++nr_swap;   } - lazyfree = true; - may_enter_fs = 1; + + pg_dispose = PG_NEXT; + goto finish; +   }     pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags, @@ -1288,6 +1349,17 @@ finish:     }   + nr_reclaimed += shrink_anon_page_list(page_list, zone, sc, + &swap_pages_clean, &ret_pages, + &free_pages, ttu_flags, + &pgactivate, nr_swap_clean, + true); + nr_reclaimed += shrink_anon_page_list(page_list, zone, sc, + &swap_pages, &ret_pages, + &free_pages, ttu_flags, + &pgactivate, nr_swap, + false); +   mem_cgroup_uncharge_list(&free_pages);   try_to_unmap_flush();   free_hot_cold_page_list(&free_pages, true); --  2.5.5