From: Chen Yucong <slaoub@gmail.com>
To: akpm@linux-foundation.org
Cc: mgorman@suse.de, hannes@cmpxchg.org, mhocko@suse.cz,
riel@redhat.com, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, Chen Yucong <slaoub@gmail.com>
Subject: [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption
Date: Fri, 13 Jun 2014 13:58:08 +0800 [thread overview]
Message-ID: <1402639088-4845-1-git-send-email-slaoub@gmail.com> (raw)
shrink_page_list() has too many arguments that have already reached ten.
Some of those arguments and temporary variables introduces extra 80 bytes
on the stack. This patch wraps five parameters into writeback_stats and removes
some temporary variables, thus making the relative functions to consume fewer
stack space.
Before mm/vmscan.c is changed:
text data bss dec hex filename
6876698 957224 966656 8800578 864942 vmlinux-3.15
After mm/vmscan.c is changed:
text data bss dec hex filename
6876506 957224 966656 8800386 864882 vmlinux-3.15
scripts/checkstack.pl can be used for checking the change of the target function stack.
Before mm/vmscan.c is changed:
0xffffffff810af103 shrink_inactive_list []: 152
0xffffffff810af43d shrink_inactive_list []: 152
-------------------------------------------------------------
0xffffffff810aede8 reclaim_clean_pages_from_list []: 184
0xffffffff810aeef8 reclaim_clean_pages_from_list []: 184
-------------------------------------------------------------
0xffffffff810ae582 shrink_page_list []: 232
0xffffffff810aedb5 shrink_page_list []: 232
After mm/vmscan.c is changed::
0xffffffff810af078 shrink_inactive_list []: 120
0xffffffff810af36d shrink_inactive_list []: 120
-------------------------------------------------------------
With: struct writeback_stats dummy = {};
0xffffffff810aed6c reclaim_clean_pages_from_list []: 152
0xffffffff810aee68 reclaim_clean_pages_from_list []: 152
-------------------------------------------------------------
With: static struct writeback_stats dummy ={};
0xffffffff810aed69 reclaim_clean_pages_from_list []: 120
0xffffffff810aee4d reclaim_clean_pages_from_list []: 120
--------------------------------------------------------------------------------------
0xffffffff810ae586 shrink_page_list []: 184 ---> sub $0xb8,%rsp
0xffffffff810aed36 shrink_page_list []: 184 ---> add $0xb8,%rsp
Via the above figures, we can find that the difference value of the stack is 32 for
shrink_inactive_list and reclaim_clean_pages_from_list, and this value is 48(232-184)
for shrink_page_list. From the hierarchy of functions called, the total difference
value is 80(32+48) for this change.
Changes since v1: https://lkml.org/lkml/2014/6/12/159
* Rename arg_container to writeback_stats
* Change the the way of initializing writeback_stats object.
Signed-off-by: Chen Yucong <slaoub@gmail.com>
---
mm/vmscan.c | 62 ++++++++++++++++++++++++++---------------------------------
1 file changed, 27 insertions(+), 35 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a8ffe4e..3f28e39 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -791,28 +791,31 @@ static void page_check_dirty_writeback(struct page *page,
}
/*
+ * Callers pass a prezeroed writeback_stats into the shrink functions to gather
+ * statistics about how many pages of particular states were processed
+ */
+struct writeback_stats {
+ unsigned long nr_dirty;
+ unsigned long nr_unqueued_dirty;
+ unsigned long nr_congested;
+ unsigned long nr_writeback;
+ unsigned long nr_immediate;
+};
+
+/*
* shrink_page_list() returns the number of reclaimed pages
*/
static unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone,
struct scan_control *sc,
enum ttu_flags ttu_flags,
- unsigned long *ret_nr_dirty,
- unsigned long *ret_nr_unqueued_dirty,
- unsigned long *ret_nr_congested,
- unsigned long *ret_nr_writeback,
- unsigned long *ret_nr_immediate,
+ struct writeback_stats *ws,
bool force_reclaim)
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
int pgactivate = 0;
- unsigned long nr_unqueued_dirty = 0;
- unsigned long nr_dirty = 0;
- unsigned long nr_congested = 0;
unsigned long nr_reclaimed = 0;
- unsigned long nr_writeback = 0;
- unsigned long nr_immediate = 0;
cond_resched();
@@ -858,10 +861,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
page_check_dirty_writeback(page, &dirty, &writeback);
if (dirty || writeback)
- nr_dirty++;
+ ws->nr_dirty++;
if (dirty && !writeback)
- nr_unqueued_dirty++;
+ ws->nr_unqueued_dirty++;
/*
* Treat this page as congested if the underlying BDI is or if
@@ -872,7 +875,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
mapping = page_mapping(page);
if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
(writeback && PageReclaim(page)))
- nr_congested++;
+ ws->nr_congested++;
/*
* If a page at the tail of the LRU is under writeback, there
@@ -916,7 +919,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (current_is_kswapd() &&
PageReclaim(page) &&
zone_is_reclaim_writeback(zone)) {
- nr_immediate++;
+ ws->nr_immediate++;
goto keep_locked;
/* Case 2 above */
@@ -934,7 +937,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* and it's also appropriate in global reclaim.
*/
SetPageReclaim(page);
- nr_writeback++;
+ ws->nr_writeback++;
goto keep_locked;
@@ -1132,11 +1135,6 @@ keep:
list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate);
mem_cgroup_uncharge_end();
- *ret_nr_dirty += nr_dirty;
- *ret_nr_congested += nr_congested;
- *ret_nr_unqueued_dirty += nr_unqueued_dirty;
- *ret_nr_writeback += nr_writeback;
- *ret_nr_immediate += nr_immediate;
return nr_reclaimed;
}
@@ -1148,7 +1146,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
.priority = DEF_PRIORITY,
.may_unmap = 1,
};
- unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
+ unsigned long ret;
+ static struct writeback_stats dummy = { };
struct page *page, *next;
LIST_HEAD(clean_pages);
@@ -1161,8 +1160,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
}
ret = shrink_page_list(&clean_pages, zone, &sc,
- TTU_UNMAP|TTU_IGNORE_ACCESS,
- &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+ TTU_UNMAP|TTU_IGNORE_ACCESS, &dummy, true);
list_splice(&clean_pages, page_list);
mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
return ret;
@@ -1469,11 +1467,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
unsigned long nr_scanned;
unsigned long nr_reclaimed = 0;
unsigned long nr_taken;
- unsigned long nr_dirty = 0;
- unsigned long nr_congested = 0;
- unsigned long nr_unqueued_dirty = 0;
- unsigned long nr_writeback = 0;
- unsigned long nr_immediate = 0;
+ struct writeback_stats ws = { };
isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru);
struct zone *zone = lruvec_zone(lruvec);
@@ -1515,9 +1509,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
return 0;
nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
- &nr_dirty, &nr_unqueued_dirty, &nr_congested,
- &nr_writeback, &nr_immediate,
- false);
+ &ws, false);
spin_lock_irq(&zone->lru_lock);
@@ -1554,7 +1546,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* of pages under pages flagged for immediate reclaim and stall if any
* are encountered in the nr_immediate check below.
*/
- if (nr_writeback && nr_writeback == nr_taken)
+ if (ws.nr_writeback && ws.nr_writeback == nr_taken)
zone_set_flag(zone, ZONE_WRITEBACK);
/*
@@ -1566,7 +1558,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* Tag a zone as congested if all the dirty pages scanned were
* backed by a congested BDI and wait_iff_congested will stall.
*/
- if (nr_dirty && nr_dirty == nr_congested)
+ if (ws.nr_dirty && ws.nr_dirty == ws.nr_congested)
zone_set_flag(zone, ZONE_CONGESTED);
/*
@@ -1576,7 +1568,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* pages from reclaim context. It will forcibly stall in the
* next check.
*/
- if (nr_unqueued_dirty == nr_taken)
+ if (ws.nr_unqueued_dirty == nr_taken)
zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
/*
@@ -1585,7 +1577,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* implies that pages are cycling through the LRU faster than
* they are written so also forcibly stall.
*/
- if ((nr_unqueued_dirty == nr_taken || nr_immediate) &&
+ if ((ws.nr_unqueued_dirty == nr_taken || ws.nr_immediate) &&
current_may_throttle())
congestion_wait(BLK_RW_ASYNC, HZ/10);
}
--
1.7.10.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next reply other threads:[~2014-06-13 5:59 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-06-13 5:58 Chen Yucong [this message]
2014-06-13 13:47 ` [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption Chen Yucong
2014-06-13 14:17 ` Johannes Weiner
2014-06-25 6:59 ` Chen Yucong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1402639088-4845-1-git-send-email-slaoub@gmail.com \
--to=slaoub@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=mhocko@suse.cz \
--cc=riel@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).