From: Mel Gorman <mgorman@suse.de>
To: Linux-MM <linux-mm@kvack.org>,
Linux-FSDevel <linux-fsdevel@vger.kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
Vlastimil Babka <vbabka@suse.cz>, Jan Kara <jack@suse.cz>,
Michal Hocko <mhocko@suse.cz>, Hugh Dickins <hughd@google.com>,
Mel Gorman <mgorman@suse.de>,
Linux Kernel <linux-kernel@vger.kernel.org>
Subject: [PATCH 11/17] mm: page_alloc: Use unsigned int for order in more places
Date: Thu, 1 May 2014 09:44:42 +0100 [thread overview]
Message-ID: <1398933888-4940-12-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1398933888-4940-1-git-send-email-mgorman@suse.de>
X86 prefers the use of unsigned types for iterators and there is a
tendency to mix whether a signed or unsigned type if used for page
order. This converts a number of sites in mm/page_alloc.c to use
unsigned int for order where possible.
Signed-off-by: Mel Gorman <mgorman@suse.de>
---
include/linux/mmzone.h | 8 ++++----
mm/page_alloc.c | 43 +++++++++++++++++++++++--------------------
2 files changed, 27 insertions(+), 24 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2c3037a..d20403d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -818,10 +818,10 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
extern struct mutex zonelists_mutex;
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
- int classzone_idx, int alloc_flags);
-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
- int classzone_idx, int alloc_flags);
+bool zone_watermark_ok(struct zone *z, unsigned int order,
+ unsigned long mark, int classzone_idx, int alloc_flags);
+bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
+ unsigned long mark, int classzone_idx, int alloc_flags);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2e55bc8..087c178 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -408,7 +408,8 @@ static int destroy_compound_page(struct page *page, unsigned long order)
return bad;
}
-static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
+static inline void prep_zero_page(struct page *page, unsigned int order,
+ gfp_t gfp_flags)
{
int i;
@@ -452,7 +453,7 @@ static inline void set_page_guard_flag(struct page *page) { }
static inline void clear_page_guard_flag(struct page *page) { }
#endif
-static inline void set_page_order(struct page *page, int order)
+static inline void set_page_order(struct page *page, unsigned int order)
{
set_page_private(page, order);
__SetPageBuddy(page);
@@ -503,7 +504,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
* For recording page's order, we use page_private(page).
*/
static inline int page_is_buddy(struct page *page, struct page *buddy,
- int order)
+ unsigned int order)
{
if (!pfn_valid_within(page_to_pfn(buddy)))
return 0;
@@ -725,7 +726,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
static void free_one_page(struct zone *zone,
struct page *page, unsigned long pfn,
- int order,
+ unsigned int order,
int migratetype)
{
spin_lock(&zone->lock);
@@ -896,7 +897,7 @@ static inline int check_new_page(struct page *page)
return 0;
}
-static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
{
int i;
@@ -1104,16 +1105,17 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
/* Remove an element from the buddy allocator from the fallback list */
static inline struct page *
-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
+__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
{
struct free_area *area;
- int current_order;
+ unsigned int current_order;
struct page *page;
int migratetype, new_type, i;
/* Find the largest possible block of pages in the other list */
- for (current_order = MAX_ORDER-1; current_order >= order;
- --current_order) {
+ for (current_order = MAX_ORDER-1;
+ current_order >= order && current_order <= MAX_ORDER-1;
+ --current_order) {
for (i = 0;; i++) {
migratetype = fallbacks[start_migratetype][i];
@@ -1341,7 +1343,7 @@ void mark_free_pages(struct zone *zone)
{
unsigned long pfn, max_zone_pfn;
unsigned long flags;
- int order, t;
+ unsigned int order, t;
struct list_head *curr;
if (zone_is_empty(zone))
@@ -1537,8 +1539,8 @@ int split_free_page(struct page *page)
*/
static inline
struct page *buffered_rmqueue(struct zone *preferred_zone,
- struct zone *zone, int order, gfp_t gfp_flags,
- int migratetype)
+ struct zone *zone, unsigned int order,
+ gfp_t gfp_flags, int migratetype)
{
unsigned long flags;
struct page *page;
@@ -1687,8 +1689,9 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
* Return true if free pages are above 'mark'. This takes into account the order
* of the allocation.
*/
-static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
- int classzone_idx, int alloc_flags, long free_pages)
+static bool __zone_watermark_ok(struct zone *z, unsigned int order,
+ unsigned long mark, int classzone_idx, int alloc_flags,
+ long free_pages)
{
/* free_pages my go negative - that's OK */
long min = mark;
@@ -1722,15 +1725,15 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
return true;
}
-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, int alloc_flags)
{
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
zone_page_state(z, NR_FREE_PAGES));
}
-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
- int classzone_idx, int alloc_flags)
+bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
+ unsigned long mark, int classzone_idx, int alloc_flags)
{
long free_pages = zone_page_state(z, NR_FREE_PAGES);
@@ -4123,7 +4126,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
static void __meminit zone_init_free_lists(struct zone *zone)
{
- int order, t;
+ unsigned int order, t;
for_each_migratetype_order(order, t) {
INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
zone->free_area[order].nr_free = 0;
@@ -6447,7 +6450,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *page;
struct zone *zone;
- int order, i;
+ unsigned int order, i;
unsigned long pfn;
unsigned long flags;
/* find the first valid pfn */
@@ -6499,7 +6502,7 @@ bool is_free_buddy_page(struct page *page)
struct zone *zone = page_zone(page);
unsigned long pfn = page_to_pfn(page);
unsigned long flags;
- int order;
+ unsigned int order;
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
--
1.8.4.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-05-01 8:45 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-01 8:44 [PATCH 00/17] Misc page alloc, shmem, mark_page_accessed and page_waitqueue optimisations Mel Gorman
2014-05-01 8:44 ` [PATCH 01/17] mm: page_alloc: Do not update zlc unless the zlc is active Mel Gorman
2014-05-01 13:25 ` Johannes Weiner
2014-05-06 15:04 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 02/17] mm: page_alloc: Do not treat a zone that cannot be used for dirty pages as "full" Mel Gorman
2014-05-06 15:09 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 03/17] mm: page_alloc: Use jump labels to avoid checking number_of_cpusets Mel Gorman
2014-05-06 15:10 ` Rik van Riel
2014-05-06 20:23 ` Peter Zijlstra
2014-05-06 22:21 ` Mel Gorman
2014-05-07 9:04 ` Peter Zijlstra
2014-05-07 9:43 ` Mel Gorman
2014-05-01 8:44 ` [PATCH 04/17] mm: page_alloc: Calculate classzone_idx once from the zonelist ref Mel Gorman
2014-05-06 16:01 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 05/17] mm: page_alloc: Only check the zone id check if pages are buddies Mel Gorman
2014-05-06 16:48 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 06/17] mm: page_alloc: Only check the alloc flags and gfp_mask for dirty once Mel Gorman
2014-05-06 17:24 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 07/17] mm: page_alloc: Take the ALLOC_NO_WATERMARK check out of the fast path Mel Gorman
2014-05-06 17:25 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 08/17] mm: page_alloc: Use word-based accesses for get/set pageblock bitmaps Mel Gorman
2014-05-02 22:34 ` Sasha Levin
2014-05-04 13:14 ` Mel Gorman
2014-05-05 12:40 ` Vlastimil Babka
2014-05-06 9:13 ` Mel Gorman
2014-05-06 14:42 ` Vlastimil Babka
2014-05-06 15:12 ` Mel Gorman
2014-05-06 20:34 ` Peter Zijlstra
2014-05-06 22:24 ` Mel Gorman
2014-05-01 8:44 ` [PATCH 09/17] mm: page_alloc: Reduce number of times page_to_pfn is called Mel Gorman
2014-05-06 18:47 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 10/17] mm: page_alloc: Lookup pageblock migratetype with IRQs enabled during free Mel Gorman
2014-05-06 18:48 ` Rik van Riel
2014-05-01 8:44 ` Mel Gorman [this message]
2014-05-01 14:35 ` [PATCH 11/17] mm: page_alloc: Use unsigned int for order in more places Dave Hansen
2014-05-01 15:11 ` Mel Gorman
2014-05-01 15:38 ` Dave Hansen
2014-05-06 18:49 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 12/17] mm: page_alloc: Convert hot/cold parameter and immediate callers to bool Mel Gorman
2014-05-06 18:49 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 13/17] mm: shmem: Avoid atomic operation during shmem_getpage_gfp Mel Gorman
2014-05-06 18:53 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 14/17] mm: Do not use atomic operations when releasing pages Mel Gorman
2014-05-01 13:29 ` Johannes Weiner
2014-05-01 13:39 ` Mel Gorman
2014-05-01 13:47 ` Johannes Weiner
2014-05-06 18:54 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 15/17] mm: Do not use unnecessary atomic operations when adding pages to the LRU Mel Gorman
2014-05-01 13:33 ` Johannes Weiner
2014-05-01 13:40 ` Mel Gorman
2014-05-06 15:30 ` Vlastimil Babka
2014-05-06 15:55 ` Mel Gorman
2014-05-01 8:44 ` [PATCH 16/17] mm: Non-atomically mark page accessed during page cache allocation where possible Mel Gorman
2014-05-01 8:44 ` [PATCH 17/17] mm: filemap: Avoid unnecessary barries and waitqueue lookup in unlock_page fastpath Mel Gorman
2014-05-05 10:50 ` Jan Kara
2014-05-07 9:03 ` Mel Gorman
2014-05-06 20:30 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1398933888-4940-12-git-send-email-mgorman@suse.de \
--to=mgorman@suse.de \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=jack@suse.cz \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.cz \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).