linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Minchan Kim <minchan@kernel.org>
To: Joonsoo Kim <js1304@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Mel Gorman <mel@csn.ul.ie>,
	Peter Zijlstra <a.p.zijlstra@chello.nl>
Subject: Re: [PATCH v2 4/5] mm, highmem: makes flush_all_zero_pkmaps() return index of first flushed entry
Date: Thu, 1 Nov 2012 14:03:47 +0900	[thread overview]
Message-ID: <20121101050347.GD24883@bbox> (raw)
In-Reply-To: <1351702597-10795-5-git-send-email-js1304@gmail.com>

On Thu, Nov 01, 2012 at 01:56:36AM +0900, Joonsoo Kim wrote:
> In current code, after flush_all_zero_pkmaps() is invoked,
> then re-iterate all pkmaps. It can be optimized if flush_all_zero_pkmaps()
> return index of first flushed entry. With this index,
> we can immediately map highmem page to virtual address represented by index.
> So change return type of flush_all_zero_pkmaps()
> and return index of first flushed entry.
> 
> Additionally, update last_pkmap_nr to this index.
> It is certain that entry which is below this index is occupied by other mapping,
> therefore updating last_pkmap_nr to this index is reasonable optimization.
> 
> Cc: Mel Gorman <mel@csn.ul.ie>
> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
> Cc: Minchan Kim <minchan@kernel.org>
> Signed-off-by: Joonsoo Kim <js1304@gmail.com>
> 
> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> index ef788b5..97ad208 100644
> --- a/include/linux/highmem.h
> +++ b/include/linux/highmem.h
> @@ -32,6 +32,7 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
>  
>  #ifdef CONFIG_HIGHMEM
>  #include <asm/highmem.h>
> +#define PKMAP_INVALID_INDEX (LAST_PKMAP)
>  
>  /* declarations for linux/mm/highmem.c */
>  unsigned int nr_free_highpages(void);
> diff --git a/mm/highmem.c b/mm/highmem.c
> index d98b0a9..b365f7b 100644
> --- a/mm/highmem.c
> +++ b/mm/highmem.c
> @@ -106,10 +106,10 @@ struct page *kmap_to_page(void *vaddr)
>  	return virt_to_page(addr);
>  }
>  
> -static void flush_all_zero_pkmaps(void)
> +static unsigned int flush_all_zero_pkmaps(void)
>  {
>  	int i;
> -	int need_flush = 0;
> +	unsigned int index = PKMAP_INVALID_INDEX;
>  
>  	flush_cache_kmaps();
>  
> @@ -141,10 +141,13 @@ static void flush_all_zero_pkmaps(void)
>  			  &pkmap_page_table[i]);
>  
>  		set_page_address(page, NULL);
> -		need_flush = 1;
> +		if (index == PKMAP_INVALID_INDEX)
> +			index = i;
>  	}
> -	if (need_flush)
> +	if (index != PKMAP_INVALID_INDEX)
>  		flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
> +
> +	return index;
>  }
>  
>  /**
> @@ -152,14 +155,19 @@ static void flush_all_zero_pkmaps(void)
>   */
>  void kmap_flush_unused(void)
>  {
> +	unsigned int index;
> +
>  	lock_kmap();
> -	flush_all_zero_pkmaps();
> +	index = flush_all_zero_pkmaps();
> +	if (index != PKMAP_INVALID_INDEX && (index < last_pkmap_nr))
> +		last_pkmap_nr = index;

I don't know how kmap_flush_unused is really fast path so how my nitpick
is effective. Anyway,
What problem happens if we do following as?

lock()
index = flush_all_zero_pkmaps();
if (index != PKMAP_INVALID_INDEX)
        last_pkmap_nr = index;
unlock();

Normally, last_pkmap_nr is increased with searching empty slot in
map_new_virtual. So I expect return value of flush_all_zero_pkmaps
in kmap_flush_unused normally become either less than last_pkmap_nr
or last_pkmap_nr + 1.

 
>  	unlock_kmap();
>  }
>  
>  static inline unsigned long map_new_virtual(struct page *page)
>  {
>  	unsigned long vaddr;
> +	unsigned int index = PKMAP_INVALID_INDEX;
>  	int count;
>  
>  start:
> @@ -168,40 +176,45 @@ start:
>  	for (;;) {
>  		last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
>  		if (!last_pkmap_nr) {
> -			flush_all_zero_pkmaps();
> -			count = LAST_PKMAP;
> +			index = flush_all_zero_pkmaps();
> +			break;
>  		}
> -		if (!pkmap_count[last_pkmap_nr])
> +		if (!pkmap_count[last_pkmap_nr]) {
> +			index = last_pkmap_nr;
>  			break;	/* Found a usable entry */
> -		if (--count)
> -			continue;
> -
> -		/*
> -		 * Sleep for somebody else to unmap their entries
> -		 */
> -		{
> -			DECLARE_WAITQUEUE(wait, current);
> -
> -			__set_current_state(TASK_UNINTERRUPTIBLE);
> -			add_wait_queue(&pkmap_map_wait, &wait);
> -			unlock_kmap();
> -			schedule();
> -			remove_wait_queue(&pkmap_map_wait, &wait);
> -			lock_kmap();
> -
> -			/* Somebody else might have mapped it while we slept */
> -			if (page_address(page))
> -				return (unsigned long)page_address(page);
> -
> -			/* Re-start */
> -			goto start;
>  		}
> +		if (--count == 0)
> +			break;
>  	}
> -	vaddr = PKMAP_ADDR(last_pkmap_nr);
> +
> +	/*
> +	 * Sleep for somebody else to unmap their entries
> +	 */
> +	if (index == PKMAP_INVALID_INDEX) {
> +		DECLARE_WAITQUEUE(wait, current);
> +
> +		__set_current_state(TASK_UNINTERRUPTIBLE);
> +		add_wait_queue(&pkmap_map_wait, &wait);
> +		unlock_kmap();
> +		schedule();
> +		remove_wait_queue(&pkmap_map_wait, &wait);
> +		lock_kmap();
> +
> +		/* Somebody else might have mapped it while we slept */
> +		vaddr = (unsigned long)page_address(page);
> +		if (vaddr)
> +			return vaddr;
> +
> +		/* Re-start */
> +		goto start;
> +	}
> +
> +	vaddr = PKMAP_ADDR(index);
>  	set_pte_at(&init_mm, vaddr,
> -		   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
> +		   &(pkmap_page_table[index]), mk_pte(page, kmap_prot));
>  
> -	pkmap_count[last_pkmap_nr] = 1;
> +	pkmap_count[index] = 1;
> +	last_pkmap_nr = index;
>  	set_page_address(page, (void *)vaddr);
>  
>  	return vaddr;
> -- 
> 1.7.9.5
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2012-11-01  4:57 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <Yes>
2012-07-16 16:14 ` [PATCH 1/3] mm: correct return value of migrate_pages() Joonsoo Kim
2012-07-16 16:14   ` [PATCH 2/3] mm: fix possible incorrect return value of migrate_pages() syscall Joonsoo Kim
2012-07-16 17:26     ` Christoph Lameter
2012-07-16 17:40     ` Michal Nazarewicz
2012-07-16 17:59       ` JoonSoo Kim
2012-07-17 13:02         ` Michal Nazarewicz
2012-07-16 16:14   ` [PATCH 3/3] mm: fix return value in __alloc_contig_migrate_range() Joonsoo Kim
2012-07-16 17:29     ` Christoph Lameter
2012-07-16 17:40     ` Michal Nazarewicz
2012-07-16 18:40       ` JoonSoo Kim
2012-07-17 13:16         ` Michal Nazarewicz
2012-07-16 17:14   ` [PATCH 4] mm: fix possible incorrect return value of move_pages() syscall Joonsoo Kim
2012-07-16 17:30     ` Christoph Lameter
2012-07-16 17:23   ` [PATCH 1/3] mm: correct return value of migrate_pages() Christoph Lameter
2012-07-16 17:32     ` JoonSoo Kim
2012-07-16 17:37       ` Christoph Lameter
2012-07-16 17:40   ` Michal Nazarewicz
2012-07-16 17:57     ` JoonSoo Kim
2012-07-16 18:05       ` Christoph Lameter
2012-07-17 12:33 ` [PATCH 1/4 v2] mm: correct return value of migrate_pages() and migrate_huge_pages() Joonsoo Kim
2012-07-17 12:33   ` [PATCH 2/4 v2] mm: fix possible incorrect return value of migrate_pages() syscall Joonsoo Kim
2012-07-17 14:28     ` Christoph Lameter
2012-07-17 15:41       ` JoonSoo Kim
2012-07-17 12:33   ` [PATCH 3/4 v2] mm: fix return value in __alloc_contig_migrate_range() Joonsoo Kim
2012-07-17 13:25     ` Michal Nazarewicz
2012-07-17 15:45       ` JoonSoo Kim
2012-07-17 15:49         ` [PATCH 3/4 v3] " Joonsoo Kim
2012-07-17 12:33   ` [PATCH 4/4 v2] mm: fix possible incorrect return value of move_pages() syscall Joonsoo Kim
2012-07-27 17:55 ` [RESEND PATCH 1/4 v3] mm: correct return value of migrate_pages() and migrate_huge_pages() Joonsoo Kim
2012-07-27 17:55   ` [RESEND PATCH 2/4 v3] mm: fix possible incorrect return value of migrate_pages() syscall Joonsoo Kim
2012-07-27 20:57     ` Christoph Lameter
2012-07-28  6:16       ` JoonSoo Kim
2012-07-30 19:30         ` Christoph Lameter
2012-07-27 17:55   ` [RESEND PATCH 3/4 v3] mm: fix return value in __alloc_contig_migrate_range() Joonsoo Kim
2012-07-27 17:55   ` [RESEND PATCH 4/4 v3] mm: fix possible incorrect return value of move_pages() syscall Joonsoo Kim
2012-07-27 20:54     ` Christoph Lameter
2012-07-28  6:09       ` JoonSoo Kim
2012-07-30 19:29         ` Christoph Lameter
2012-07-31  3:34           ` JoonSoo Kim
2012-07-31 14:04             ` Christoph Lameter
2012-08-01  5:15           ` Michael Kerrisk
2012-08-01 18:00             ` Christoph Lameter
2012-08-02  5:52               ` Michael Kerrisk
2012-08-24 16:05 ` [PATCH 1/2] slub: rename cpu_partial to max_cpu_object Joonsoo Kim
2012-08-24 16:05   ` [PATCH 2/2] slub: correct the calculation of the number of cpu objects in get_partial_node Joonsoo Kim
2012-08-24 16:15     ` Christoph Lameter
2012-08-24 16:28       ` JoonSoo Kim
2012-08-24 16:31         ` Christoph Lameter
2012-08-24 16:40           ` JoonSoo Kim
2012-08-24 16:12   ` [PATCH 1/2] slub: rename cpu_partial to max_cpu_object Christoph Lameter
2012-08-25 14:11 ` [PATCH 1/2] slab: do ClearSlabPfmemalloc() for all pages of slab Joonsoo Kim
2012-08-25 14:11   ` [PATCH 2/2] slab: fix starting index for finding another object Joonsoo Kim
2012-09-03 10:08   ` [PATCH 1/2] slab: do ClearSlabPfmemalloc() for all pages of slab Mel Gorman
2012-10-20 15:48 ` [PATCH for-v3.7 1/2] slub: optimize poorly inlined kmalloc* functions Joonsoo Kim
2012-10-20 15:48   ` [PATCH for-v3.7 2/2] slub: optimize kmalloc* inlining for GFP_DMA Joonsoo Kim
2012-10-22 14:31     ` Christoph Lameter
2012-10-23  2:29       ` JoonSoo Kim
2012-10-23  6:16         ` Eric Dumazet
2012-10-23 16:12           ` JoonSoo Kim
2012-10-24  8:05   ` [PATCH for-v3.7 1/2] slub: optimize poorly inlined kmalloc* functions Pekka Enberg
2012-10-24 13:36     ` Christoph Lameter
2012-10-28 19:12 ` [PATCH 0/5] minor clean-up and optimize highmem related code Joonsoo Kim
2012-10-28 19:12   ` [PATCH 1/5] mm, highmem: use PKMAP_NR() to calculate an index of pkmap Joonsoo Kim
2012-10-29  1:48     ` Minchan Kim
2012-10-28 19:12   ` [PATCH 2/5] mm, highmem: remove useless pool_lock Joonsoo Kim
2012-10-29  1:52     ` Minchan Kim
2012-10-30 21:31     ` Andrew Morton
2012-10-31  5:14       ` Minchan Kim
2012-10-31 15:01       ` JoonSoo Kim
2012-10-28 19:12   ` [PATCH 3/5] mm, highmem: remove page_address_pool list Joonsoo Kim
2012-10-29  1:57     ` Minchan Kim
2012-10-28 19:12   ` [PATCH 4/5] mm, highmem: makes flush_all_zero_pkmaps() return index of last flushed entry Joonsoo Kim
2012-10-29  2:06     ` Minchan Kim
2012-10-29 13:12       ` JoonSoo Kim
2012-10-28 19:12   ` [PATCH 5/5] mm, highmem: get virtual address of the page using PKMAP_ADDR() Joonsoo Kim
2012-10-29  2:09     ` Minchan Kim
2012-10-29  2:12   ` [PATCH 0/5] minor clean-up and optimize highmem related code Minchan Kim
2012-10-29 13:15     ` JoonSoo Kim
2012-10-31 17:11       ` JoonSoo Kim
2012-10-31 16:56 ` [PATCH v2 " Joonsoo Kim
2012-10-31 16:56   ` [PATCH v2 1/5] mm, highmem: use PKMAP_NR() to calculate an index of pkmap Joonsoo Kim
2012-10-31 16:56   ` [PATCH v2 2/5] mm, highmem: remove useless pool_lock Joonsoo Kim
2012-10-31 16:56   ` [PATCH v2 3/5] mm, highmem: remove page_address_pool list Joonsoo Kim
2012-10-31 16:56   ` [PATCH v2 4/5] mm, highmem: makes flush_all_zero_pkmaps() return index of first flushed entry Joonsoo Kim
2012-11-01  5:03     ` Minchan Kim [this message]
2012-11-02 19:07       ` JoonSoo Kim
2012-11-02 22:42         ` Minchan Kim
2012-11-13  0:30           ` JoonSoo Kim
2012-11-13 12:49             ` Minchan Kim
2012-11-13 14:12               ` JoonSoo Kim
2012-11-13 15:01                 ` Minchan Kim
2012-11-14 17:09                   ` JoonSoo Kim
2012-11-19 23:46                     ` Minchan Kim
2012-11-27 15:01                       ` JoonSoo Kim
2012-10-31 16:56   ` [PATCH v2 5/5] mm, highmem: get virtual address of the page using PKMAP_ADDR() Joonsoo Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20121101050347.GD24883@bbox \
    --to=minchan@kernel.org \
    --cc=a.p.zijlstra@chello.nl \
    --cc=akpm@linux-foundation.org \
    --cc=js1304@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mel@csn.ul.ie \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).