From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-f65.google.com ([74.125.82.65]:41569 "EHLO mail-wm0-f65.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752322AbdKXJak (ORCPT ); Fri, 24 Nov 2017 04:30:40 -0500 Received: by mail-wm0-f65.google.com with SMTP id b189so21130337wmd.0 for ; Fri, 24 Nov 2017 01:30:39 -0800 (PST) From: Michal Hocko To: Greg KH Cc: Jiri Slaby , jaewon31.kim@samsung.com, akpm@linux-foundation.org, js1304@gmail.com, minchan@kernel.org, stable@vger.kernel.org, torvalds@linux-foundation.org, Yang Shi , Arnd Bergmann , Joonsoo Kim , Michal Hocko Subject: [PATCH] mm: check the return value of lookup_page_ext for all call sites Date: Fri, 24 Nov 2017 10:30:32 +0100 Message-Id: <20171124093032.32071-1-mhocko@kernel.org> In-Reply-To: <20171124092832.esq7pxxfeepok4yt@dhcp22.suse.cz> References: <20171124092832.esq7pxxfeepok4yt@dhcp22.suse.cz> Sender: stable-owner@vger.kernel.org List-ID: From: Yang Shi commit f86e4271978bd93db466d6a95dad4b0fdcdb04f6 upstream. Per the discussion with Joonsoo Kim [1], we need check the return value of lookup_page_ext() for all call sites since it might return NULL in some cases, although it is unlikely, i.e. memory hotplug. Tested with ltp with "page_owner=0". [1] http://lkml.kernel.org/r/20160519002809.GA10245@js1304-P5Q-DELUXE [akpm@linux-foundation.org: fix build-breaking typos] [arnd@arndb.de: fix build problems from lookup_page_ext] Link: http://lkml.kernel.org/r/6285269.2CksypHdYp@wuerfel [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/1464023768-31025-1-git-send-email-yang.shi@linaro.org Signed-off-by: Yang Shi Signed-off-by: Arnd Bergmann Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Michal Hocko --- include/linux/page_idle.h | 43 ++++++++++++++++++++++++++++++++++++------- mm/debug-pagealloc.c | 6 ++++++ mm/page_alloc.c | 6 ++++++ mm/page_owner.c | 16 ++++++++++++++++ mm/vmstat.c | 2 ++ 5 files changed, 66 insertions(+), 7 deletions(-) diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index bf268fa92c5b..fec40271339f 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; static inline bool page_is_young(struct page *page) { - return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline void set_page_young(struct page *page) { - set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool test_and_clear_page_young(struct page *page) { - return test_and_clear_bit(PAGE_EXT_YOUNG, - &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool page_is_idle(struct page *page) { - return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void set_page_idle(struct page *page) { - set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void clear_page_idle(struct page *page) { - clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + clear_bit(PAGE_EXT_IDLE, &page_ext->flags); } #endif /* CONFIG_64BIT */ diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c index 5bf5906ce13b..3b8f1b83610e 100644 --- a/mm/debug-pagealloc.c +++ b/mm/debug-pagealloc.c @@ -34,6 +34,8 @@ static inline void set_page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); + if (!page_ext) + return; __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } @@ -42,6 +44,8 @@ static inline void clear_page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); + if (!page_ext) + return; __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } @@ -50,6 +54,8 @@ static inline bool page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); + if (!page_ext) + return false; return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6b5421ae86c6..38aca81deeaf 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -560,6 +560,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); INIT_LIST_HEAD(&page->lru); @@ -577,6 +580,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); set_page_private(page, 0); diff --git a/mm/page_owner.c b/mm/page_owner.c index 983c3a10fa07..dd6b9cebf981 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -53,6 +53,8 @@ void __reset_page_owner(struct page *page, unsigned int order) for (i = 0; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); + if (unlikely(!page_ext)) + continue; __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); } } @@ -60,6 +62,7 @@ void __reset_page_owner(struct page *page, unsigned int order) void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { struct page_ext *page_ext = lookup_page_ext(page); + struct stack_trace trace = { .nr_entries = 0, .max_entries = ARRAY_SIZE(page_ext->trace_entries), @@ -67,6 +70,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) .skip = 3, }; + if (unlikely(!page_ext)) + return; + save_stack_trace(&trace); page_ext->order = order; @@ -79,6 +85,12 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) gfp_t __get_page_owner_gfp(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + /* + * The caller just returns 0 if no valid gfp + * So return 0 here too. + */ + return 0; return page_ext->gfp_mask; } @@ -194,6 +206,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) } page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; /* * Some pages could be missed by concurrent allocation or free, @@ -257,6 +271,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) continue; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; /* Maybe overraping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) diff --git a/mm/vmstat.c b/mm/vmstat.c index c54fd2924f25..c344e3609c53 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1091,6 +1091,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m, continue; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; -- 2.15.0