linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Mike Rapoport <rppt@linux.ibm.com>
To: Wei Li <liwei213@huawei.com>
Cc: song.bao.hua@hisilicon.com, linux-arm-kernel@lists.infradead.org,
	steve.capper@arm.com, catalin.marinas@arm.com,
	sujunfei2@hisilicon.com, linux-kernel@vger.kernel.org,
	fengbaopeng2@hisilicon.com, saberlily.xia@hisilicon.com,
	puck.chen@hisilicon.com, will@kernel.org, nsaenzjulienne@suse.de,
	butao@hisilicon.com
Subject: Re: [PATCH] arm64: mm: free unused memmap for sparse memory model that define VMEMMAP
Date: Wed, 22 Jul 2020 09:07:05 +0300	[thread overview]
Message-ID: <20200722060705.GK802087@linux.ibm.com> (raw)
In-Reply-To: <20200721073203.107862-1-liwei213@huawei.com>

Hi,

On Tue, Jul 21, 2020 at 03:32:03PM +0800, Wei Li wrote:
> For the memory hole, sparse memory model that define SPARSEMEM_VMEMMAP
> do not free the reserved memory for the page map, this patch do it.

Are there numbers showing how much memory is actually freed?

The freeing of empty memmap would become rather complex with these
changes, do the memory savings justify it?

> Signed-off-by: Wei Li <liwei213@huawei.com>
> Signed-off-by: Chen Feng <puck.chen@hisilicon.com>
> Signed-off-by: Xia Qing <saberlily.xia@hisilicon.com>
> ---
>  arch/arm64/mm/init.c | 81 +++++++++++++++++++++++++++++++++++++++++++++-------
>  1 file changed, 71 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 1e93cfc7c47a..d1b56b47d5ba 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -441,7 +441,48 @@ void __init bootmem_init(void)
>  	memblock_dump_all();
>  }
> 
> -#ifndef CONFIG_SPARSEMEM_VMEMMAP
> +#ifdef CONFIG_SPARSEMEM_VMEMMAP
> +#define VMEMMAP_PAGE_INUSE 0xFD
> +static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
> +{
> +	unsigned long addr, end;
> +	unsigned long next;
> +	pmd_t *pmd;
> +	void *page_addr;
> +	phys_addr_t phys_addr;
> +
> +	addr = (unsigned long)pfn_to_page(start_pfn);
> +	end = (unsigned long)pfn_to_page(end_pfn);
> +
> +	pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
> +	for (; addr < end; addr = next, pmd++) {
> +		next = pmd_addr_end(addr, end);
> +
> +		if (!pmd_present(*pmd))
> +			continue;
> +
> +		if (IS_ALIGNED(addr, PMD_SIZE) &&
> +			IS_ALIGNED(next, PMD_SIZE)) {
> +			phys_addr = __pfn_to_phys(pmd_pfn(*pmd));
> +			free_bootmem(phys_addr, PMD_SIZE);
> +			pmd_clear(pmd);
> +		} else {
> +			/* If here, we are freeing vmemmap pages. */
> +			memset((void *)addr, VMEMMAP_PAGE_INUSE, next - addr);
> +			page_addr = page_address(pmd_page(*pmd));
> +
> +			if (!memchr_inv(page_addr, VMEMMAP_PAGE_INUSE,
> +				PMD_SIZE)) {
> +				phys_addr = __pfn_to_phys(pmd_pfn(*pmd));
> +				free_bootmem(phys_addr, PMD_SIZE);
> +				pmd_clear(pmd);
> +			}
> +		}
> +	}
> +
> +	flush_tlb_all();
> +}
> +#else
>  static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
>  {
>  	struct page *start_pg, *end_pg;
> @@ -468,31 +509,53 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
>  		memblock_free(pg, pgend - pg);
>  }
> 
> +#endif
> +
>  /*
>   * The mem_map array can get very big. Free the unused area of the memory map.
>   */
>  static void __init free_unused_memmap(void)
>  {
> -	unsigned long start, prev_end = 0;
> +	unsigned long start, cur_start, prev_end = 0;
>  	struct memblock_region *reg;
> 
>  	for_each_memblock(memory, reg) {
> -		start = __phys_to_pfn(reg->base);
> +		cur_start = __phys_to_pfn(reg->base);
> 
>  #ifdef CONFIG_SPARSEMEM
>  		/*
>  		 * Take care not to free memmap entries that don't exist due
>  		 * to SPARSEMEM sections which aren't present.
>  		 */
> -		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
> -#endif
> +		start = min(cur_start, ALIGN(prev_end, PAGES_PER_SECTION));
> +
>  		/*
> -		 * If we had a previous bank, and there is a space between the
> -		 * current bank and the previous, free it.
> +		 * Free memory in the case of:
> +		 * 1. if cur_start - prev_end <= PAGES_PER_SECTION,
> +		 * free pre_end ~ cur_start.
> +		 * 2. if cur_start - prev_end > PAGES_PER_SECTION,
> +		 * free pre_end ~ ALIGN(prev_end, PAGES_PER_SECTION).
>  		 */
>  		if (prev_end && prev_end < start)
>  			free_memmap(prev_end, start);
> 
> +		/*
> +		 * Free memory in the case of:
> +		 * if cur_start - prev_end > PAGES_PER_SECTION,
> +		 * free ALIGN_DOWN(cur_start, PAGES_PER_SECTION) ~ cur_start.
> +		 */
> +		if (cur_start > start &&
> +		    !IS_ALIGNED(cur_start, PAGES_PER_SECTION))
> +			free_memmap(ALIGN_DOWN(cur_start, PAGES_PER_SECTION),
> +				    cur_start);
> +#else
> +		/*
> +		 * If we had a previous bank, and there is a space between the
> +		 * current bank and the previous, free it.
> +		 */
> +		if (prev_end && prev_end < cur_start)
> +			free_memmap(prev_end, cur_start);
> +#endif
>  		/*
>  		 * Align up here since the VM subsystem insists that the
>  		 * memmap entries are valid from the bank end aligned to
> @@ -507,7 +570,6 @@ static void __init free_unused_memmap(void)
>  		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
>  #endif
>  }
> -#endif	/* !CONFIG_SPARSEMEM_VMEMMAP */
> 
>  /*
>   * mem_init() marks the free areas in the mem_map and tells us how much memory
> @@ -524,9 +586,8 @@ void __init mem_init(void)
> 
>  	set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
> 
> -#ifndef CONFIG_SPARSEMEM_VMEMMAP
>  	free_unused_memmap();
> -#endif
> +
>  	/* this will put all unused low memory onto the freelists */
>  	memblock_free_all();
> 
> --
> 2.15.0
> 

-- 
Sincerely yours,
Mike.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2020-07-22  6:10 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-21  7:32 [PATCH] arm64: mm: free unused memmap for sparse memory model that define VMEMMAP Wei Li
2020-07-22  6:07 ` Mike Rapoport [this message]
2020-07-22  8:41   ` 答复: " liwei (CM)
2020-07-22 12:49     ` Catalin Marinas
2020-07-22 13:40       ` 答复: " liwei (CM)
2020-07-23 11:29         ` Catalin Marinas
2020-07-23 13:18           ` Mike Rapoport
2020-07-24  3:40             ` 答复: " liwei (CM)
2020-07-23  2:33 ` Anshuman Khandual
2020-07-23  3:28   ` 答复: " liwei (CM)
     [not found] <20200708015555.14946-1-liwei213@huawei.com>
2020-07-08  7:18 ` Song Bao Hua (Barry Song)
2020-07-08  7:51   ` 答复: " liwei (CM)
2020-07-09 12:27     ` Song Bao Hua (Barry Song)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200722060705.GK802087@linux.ibm.com \
    --to=rppt@linux.ibm.com \
    --cc=butao@hisilicon.com \
    --cc=catalin.marinas@arm.com \
    --cc=fengbaopeng2@hisilicon.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=liwei213@huawei.com \
    --cc=nsaenzjulienne@suse.de \
    --cc=puck.chen@hisilicon.com \
    --cc=saberlily.xia@hisilicon.com \
    --cc=song.bao.hua@hisilicon.com \
    --cc=steve.capper@arm.com \
    --cc=sujunfei2@hisilicon.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).