From: Vlastimil Babka <vbabka@suse.cz>
To: Mel Gorman <mgorman@suse.de>, Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>, Jan Kara <jack@suse.cz>,
Michal Hocko <mhocko@suse.cz>, Hugh Dickins <hughd@google.com>,
Peter Zijlstra <peterz@infradead.org>,
Dave Hansen <dave.hansen@intel.com>,
Linux Kernel <linux-kernel@vger.kernel.org>,
Linux-MM <linux-mm@kvack.org>,
Linux-FSDevel <linux-fsdevel@vger.kernel.org>
Subject: Re: [PATCH 10/19] mm: page_alloc: Reduce number of times page_to_pfn is called
Date: Tue, 13 May 2014 15:27:29 +0200 [thread overview]
Message-ID: <53721DC1.1040006@suse.cz> (raw)
In-Reply-To: <1399974350-11089-11-git-send-email-mgorman@suse.de>
On 05/13/2014 11:45 AM, Mel Gorman wrote:
> In the free path we calculate page_to_pfn multiple times. Reduce that.
>
> Signed-off-by: Mel Gorman <mgorman@suse.de>
> Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Just two comments.
I just don't like #define but I can live with that.
__free_one_page() is marked inline so presumably it would reuse
page_to_pfn() result in its caller already. But it seems to me it's
quite large so I wonder if it gets inlined anyway, and if the attribute
still makes sense...
> ---
> include/linux/mmzone.h | 9 +++++++--
> include/linux/pageblock-flags.h | 33 +++++++++++++--------------------
> mm/page_alloc.c | 34 +++++++++++++++++++---------------
> 3 files changed, 39 insertions(+), 37 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 835aa3d..bd6f504 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -78,10 +78,15 @@ extern int page_group_by_mobility_disabled;
> #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
> #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
>
> -static inline int get_pageblock_migratetype(struct page *page)
> +#define get_pageblock_migratetype(page) \
> + get_pfnblock_flags_mask(page, page_to_pfn(page), \
> + PB_migrate_end, MIGRATETYPE_MASK)
> +
> +static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
> {
> BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
> - return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK);
> + return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
> + MIGRATETYPE_MASK);
> }
>
> struct free_area {
> diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
> index c08730c..2baeee1 100644
> --- a/include/linux/pageblock-flags.h
> +++ b/include/linux/pageblock-flags.h
> @@ -65,33 +65,26 @@ extern int pageblock_order;
> /* Forward declaration */
> struct page;
>
> -unsigned long get_pageblock_flags_mask(struct page *page,
> +unsigned long get_pfnblock_flags_mask(struct page *page,
> + unsigned long pfn,
> unsigned long end_bitidx,
> unsigned long mask);
> -void set_pageblock_flags_mask(struct page *page,
> +
> +void set_pfnblock_flags_mask(struct page *page,
> unsigned long flags,
> + unsigned long pfn,
> unsigned long end_bitidx,
> unsigned long mask);
>
> /* Declarations for getting and setting flags. See mm/page_alloc.c */
> -static inline unsigned long get_pageblock_flags_group(struct page *page,
> - int start_bitidx, int end_bitidx)
> -{
> - unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
> - unsigned long mask = (1 << nr_flag_bits) - 1;
> -
> - return get_pageblock_flags_mask(page, end_bitidx, mask);
> -}
> -
> -static inline void set_pageblock_flags_group(struct page *page,
> - unsigned long flags,
> - int start_bitidx, int end_bitidx)
> -{
> - unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
> - unsigned long mask = (1 << nr_flag_bits) - 1;
> -
> - set_pageblock_flags_mask(page, flags, end_bitidx, mask);
> -}
> +#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
> + get_pfnblock_flags_mask(page, page_to_pfn(page), \
> + end_bitidx, \
> + (1 << (end_bitidx - start_bitidx + 1)) - 1)
> +#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
> + set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
> + end_bitidx, \
> + (1 << (end_bitidx - start_bitidx + 1)) - 1)
>
> #ifdef CONFIG_COMPACTION
> #define get_pageblock_skip(page) \
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index b438eb7..3948f0a 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -559,6 +559,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
> */
>
> static inline void __free_one_page(struct page *page,
> + unsigned long pfn,
> struct zone *zone, unsigned int order,
> int migratetype)
> {
> @@ -575,7 +576,7 @@ static inline void __free_one_page(struct page *page,
>
> VM_BUG_ON(migratetype == -1);
>
> - page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
> + page_idx = pfn & ((1 << MAX_ORDER) - 1);
>
> VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
> VM_BUG_ON_PAGE(bad_range(zone, page), page);
> @@ -710,7 +711,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
> list_del(&page->lru);
> mt = get_freepage_migratetype(page);
> /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
> - __free_one_page(page, zone, 0, mt);
> + __free_one_page(page, page_to_pfn(page), zone, 0, mt);
> trace_mm_page_pcpu_drain(page, 0, mt);
> if (likely(!is_migrate_isolate_page(page))) {
> __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
> @@ -722,13 +723,15 @@ static void free_pcppages_bulk(struct zone *zone, int count,
> spin_unlock(&zone->lock);
> }
>
> -static void free_one_page(struct zone *zone, struct page *page, int order,
> +static void free_one_page(struct zone *zone,
> + struct page *page, unsigned long pfn,
> + int order,
> int migratetype)
> {
> spin_lock(&zone->lock);
> zone->pages_scanned = 0;
>
> - __free_one_page(page, zone, order, migratetype);
> + __free_one_page(page, pfn, zone, order, migratetype);
> if (unlikely(!is_migrate_isolate(migratetype)))
> __mod_zone_freepage_state(zone, 1 << order, migratetype);
> spin_unlock(&zone->lock);
> @@ -765,15 +768,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
> {
> unsigned long flags;
> int migratetype;
> + unsigned long pfn = page_to_pfn(page);
>
> if (!free_pages_prepare(page, order))
> return;
>
> local_irq_save(flags);
> __count_vm_events(PGFREE, 1 << order);
> - migratetype = get_pageblock_migratetype(page);
> + migratetype = get_pfnblock_migratetype(page, pfn);
> set_freepage_migratetype(page, migratetype);
> - free_one_page(page_zone(page), page, order, migratetype);
> + free_one_page(page_zone(page), page, pfn, order, migratetype);
> local_irq_restore(flags);
> }
>
> @@ -1376,12 +1380,13 @@ void free_hot_cold_page(struct page *page, int cold)
> struct zone *zone = page_zone(page);
> struct per_cpu_pages *pcp;
> unsigned long flags;
> + unsigned long pfn = page_to_pfn(page);
> int migratetype;
>
> if (!free_pages_prepare(page, 0))
> return;
>
> - migratetype = get_pageblock_migratetype(page);
> + migratetype = get_pfnblock_migratetype(page, pfn);
> set_freepage_migratetype(page, migratetype);
> local_irq_save(flags);
> __count_vm_event(PGFREE);
> @@ -1395,7 +1400,7 @@ void free_hot_cold_page(struct page *page, int cold)
> */
> if (migratetype >= MIGRATE_PCPTYPES) {
> if (unlikely(is_migrate_isolate(migratetype))) {
> - free_one_page(zone, page, 0, migratetype);
> + free_one_page(zone, page, pfn, 0, migratetype);
> goto out;
> }
> migratetype = MIGRATE_MOVABLE;
> @@ -6032,17 +6037,16 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
> * @end_bitidx: The last bit of interest
> * returns pageblock_bits flags
> */
> -unsigned long get_pageblock_flags_mask(struct page *page,
> +unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
> unsigned long end_bitidx,
> unsigned long mask)
> {
> struct zone *zone;
> unsigned long *bitmap;
> - unsigned long pfn, bitidx, word_bitidx;
> + unsigned long bitidx, word_bitidx;
> unsigned long word;
>
> zone = page_zone(page);
> - pfn = page_to_pfn(page);
> bitmap = get_pageblock_bitmap(zone, pfn);
> bitidx = pfn_to_bitidx(zone, pfn);
> word_bitidx = bitidx / BITS_PER_LONG;
> @@ -6054,25 +6058,25 @@ unsigned long get_pageblock_flags_mask(struct page *page,
> }
>
> /**
> - * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
> + * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
> * @page: The page within the block of interest
> * @start_bitidx: The first bit of interest
> * @end_bitidx: The last bit of interest
> * @flags: The flags to set
> */
> -void set_pageblock_flags_mask(struct page *page, unsigned long flags,
> +void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
> + unsigned long pfn,
> unsigned long end_bitidx,
> unsigned long mask)
> {
> struct zone *zone;
> unsigned long *bitmap;
> - unsigned long pfn, bitidx, word_bitidx;
> + unsigned long bitidx, word_bitidx;
> unsigned long old_word, word;
>
> BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
>
> zone = page_zone(page);
> - pfn = page_to_pfn(page);
> bitmap = get_pageblock_bitmap(zone, pfn);
> bitidx = pfn_to_bitidx(zone, pfn);
> word_bitidx = bitidx / BITS_PER_LONG;
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-05-13 13:27 UTC|newest]
Thread overview: 103+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-13 9:45 [PATCH 00/19] Misc page alloc, shmem, mark_page_accessed and page_waitqueue optimisations v3r33 Mel Gorman
2014-05-13 9:45 ` [PATCH 01/19] mm: page_alloc: Do not update zlc unless the zlc is active Mel Gorman
2014-05-13 9:45 ` [PATCH 02/19] mm: page_alloc: Do not treat a zone that cannot be used for dirty pages as "full" Mel Gorman
2014-05-13 9:45 ` [PATCH 03/19] jump_label: Expose the reference count Mel Gorman
2014-05-13 9:45 ` [PATCH 04/19] mm: page_alloc: Use jump labels to avoid checking number_of_cpusets Mel Gorman
2014-05-13 10:58 ` Peter Zijlstra
2014-05-13 12:28 ` Mel Gorman
2014-05-13 9:45 ` [PATCH 05/19] mm: page_alloc: Calculate classzone_idx once from the zonelist ref Mel Gorman
2014-05-13 22:25 ` Andrew Morton
2014-05-14 6:32 ` Mel Gorman
2014-05-14 20:29 ` Mel Gorman
2014-05-13 9:45 ` [PATCH 06/19] mm: page_alloc: Only check the zone id check if pages are buddies Mel Gorman
2014-05-13 9:45 ` [PATCH 07/19] mm: page_alloc: Only check the alloc flags and gfp_mask for dirty once Mel Gorman
2014-05-13 9:45 ` [PATCH 08/19] mm: page_alloc: Take the ALLOC_NO_WATERMARK check out of the fast path Mel Gorman
2014-05-13 9:45 ` [PATCH 09/19] mm: page_alloc: Use word-based accesses for get/set pageblock bitmaps Mel Gorman
2014-05-22 9:24 ` Vlastimil Babka
2014-05-22 18:23 ` Andrew Morton
2014-05-22 18:45 ` Vlastimil Babka
2014-05-13 9:45 ` [PATCH 10/19] mm: page_alloc: Reduce number of times page_to_pfn is called Mel Gorman
2014-05-13 13:27 ` Vlastimil Babka [this message]
2014-05-13 14:09 ` Mel Gorman
2014-05-13 9:45 ` [PATCH 11/19] mm: page_alloc: Lookup pageblock migratetype with IRQs enabled during free Mel Gorman
2014-05-13 13:36 ` Vlastimil Babka
2014-05-13 14:23 ` Mel Gorman
2014-05-13 9:45 ` [PATCH 12/19] mm: page_alloc: Use unsigned int for order in more places Mel Gorman
2014-05-13 9:45 ` [PATCH 13/19] mm: page_alloc: Convert hot/cold parameter and immediate callers to bool Mel Gorman
2014-05-13 9:45 ` [PATCH 14/19] mm: shmem: Avoid atomic operation during shmem_getpage_gfp Mel Gorman
2014-05-13 9:45 ` [PATCH 15/19] mm: Do not use atomic operations when releasing pages Mel Gorman
2014-05-13 9:45 ` [PATCH 16/19] mm: Do not use unnecessary atomic operations when adding pages to the LRU Mel Gorman
2014-05-13 9:45 ` [PATCH 17/19] fs: buffer: Do not use unnecessary atomic operations when discarding buffers Mel Gorman
2014-05-13 11:09 ` Peter Zijlstra
2014-05-13 12:50 ` Mel Gorman
2014-05-13 13:49 ` Jan Kara
2014-05-13 14:30 ` Mel Gorman
2014-05-13 14:01 ` Peter Zijlstra
2014-05-13 14:46 ` Mel Gorman
2014-05-13 13:50 ` Jan Kara
2014-05-13 22:29 ` Andrew Morton
2014-05-14 6:12 ` Mel Gorman
2014-05-13 9:45 ` [PATCH 18/19] mm: Non-atomically mark page accessed during page cache allocation where possible Mel Gorman
2014-05-13 14:29 ` Theodore Ts'o
2014-05-20 15:49 ` [PATCH] mm: non-atomically mark page accessed during page cache allocation where possible -fix Mel Gorman
2014-05-20 19:34 ` Andrew Morton
2014-05-21 12:09 ` Mel Gorman
2014-05-21 22:11 ` Andrew Morton
2014-05-22 0:07 ` Mel Gorman
2014-05-22 5:35 ` Prabhakar Lad
2014-05-13 9:45 ` [PATCH 19/19] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath Mel Gorman
2014-05-13 12:53 ` Mel Gorman
2014-05-13 14:17 ` Peter Zijlstra
2014-05-13 15:27 ` Paul E. McKenney
2014-05-13 15:44 ` Peter Zijlstra
2014-05-13 16:14 ` Paul E. McKenney
2014-05-13 18:57 ` Oleg Nesterov
2014-05-13 20:24 ` Paul E. McKenney
2014-05-14 14:25 ` Oleg Nesterov
2014-05-13 18:22 ` Oleg Nesterov
2014-05-13 18:18 ` Oleg Nesterov
2014-05-13 18:24 ` Peter Zijlstra
2014-05-13 18:52 ` Paul E. McKenney
2014-05-13 19:31 ` Oleg Nesterov
2014-05-13 20:32 ` Paul E. McKenney
2014-05-14 16:11 ` Oleg Nesterov
2014-05-14 16:17 ` Peter Zijlstra
2014-05-16 13:51 ` [PATCH 0/1] ptrace: task_clear_jobctl_trapping()->wake_up_bit() needs mb() Oleg Nesterov
2014-05-16 13:51 ` [PATCH 1/1] " Oleg Nesterov
2014-05-21 9:29 ` Peter Zijlstra
2014-05-21 19:19 ` Andrew Morton
2014-05-21 19:18 ` [PATCH 0/1] " Andrew Morton
2014-05-14 19:29 ` [PATCH 19/19] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath Oleg Nesterov
2014-05-14 20:53 ` Mel Gorman
2014-05-15 10:48 ` [PATCH] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath v4 Mel Gorman
2014-05-15 13:20 ` Peter Zijlstra
2014-05-15 13:29 ` Peter Zijlstra
2014-05-15 15:34 ` Oleg Nesterov
2014-05-15 15:45 ` Peter Zijlstra
2014-05-15 16:18 ` Mel Gorman
2014-05-15 15:03 ` Oleg Nesterov
2014-05-15 21:24 ` Andrew Morton
2014-05-21 12:15 ` [PATCH] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath v5 Mel Gorman
2014-05-21 13:02 ` Peter Zijlstra
2014-05-21 15:33 ` Mel Gorman
2014-05-21 16:08 ` Peter Zijlstra
2014-05-21 21:26 ` Andrew Morton
2014-05-21 21:33 ` Peter Zijlstra
2014-05-21 21:50 ` Andrew Morton
2014-05-22 0:07 ` Mel Gorman
2014-05-22 7:20 ` Peter Zijlstra
2014-05-22 10:40 ` [PATCH] mm: filemap: Avoid unnecessary barriers and waitqueue lookups in unlock_page fastpath v7 Mel Gorman
2014-05-22 10:56 ` Peter Zijlstra
2014-05-22 13:00 ` Mel Gorman
2014-05-22 14:40 ` Mel Gorman
2014-05-22 15:04 ` Peter Zijlstra
2014-05-22 15:36 ` Mel Gorman
2014-05-22 16:58 ` [PATCH] mm: filemap: Avoid unnecessary barriers and waitqueue lookups in unlock_page fastpath v8 Mel Gorman
2014-05-22 6:45 ` [PATCH] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath v5 Peter Zijlstra
2014-05-22 8:46 ` Mel Gorman
2014-05-22 17:47 ` Andrew Morton
2014-05-22 19:53 ` Mel Gorman
2014-05-21 23:35 ` Mel Gorman
2014-05-13 16:52 ` [PATCH 19/19] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath Peter Zijlstra
2014-05-14 7:31 ` Mel Gorman
2014-05-19 8:57 ` [PATCH] mm: Avoid unnecessary atomic operations during end_page_writeback Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=53721DC1.1040006@suse.cz \
--to=vbabka@suse.cz \
--cc=akpm@linux-foundation.org \
--cc=dave.hansen@intel.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=jack@suse.cz \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=mhocko@suse.cz \
--cc=peterz@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).