linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Lance Yang <lance.yang@linux.dev>
To: Dev Jain <dev.jain@arm.com>, akpm@linux-foundation.org
Cc: ryan.roberts@arm.com, david@redhat.com, willy@infradead.org,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	catalin.marinas@arm.com, will@kernel.org,
	Liam.Howlett@oracle.com, lorenzo.stoakes@oracle.com,
	vbabka@suse.cz, jannh@google.com, anshuman.khandual@arm.com,
	peterx@redhat.com, joey.gouly@arm.com, ioworker0@gmail.com,
	baohua@kernel.org, kevin.brodsky@arm.com,
	quic_zhenhuah@quicinc.com, christophe.leroy@csgroup.eu,
	yangyicong@hisilicon.com, linux-arm-kernel@lists.infradead.org,
	namit@vmware.com, hughd@google.com, yang@os.amperecomputing.com,
	ziy@nvidia.com
Subject: Re: [PATCH 6/7] mm: Batch around can_change_pte_writable()
Date: Mon, 28 Apr 2025 23:54:34 +0800	[thread overview]
Message-ID: <6a0b4d81-a1b8-4533-8b4e-de270e39c5aa@linux.dev> (raw)
In-Reply-To: <29c70c06-42c2-4bc0-a56e-443a1200fde0@linux.dev>



On 2025/4/28 21:16, Lance Yang wrote:
> 
> 
> On 2025/4/28 20:50, Lance Yang wrote:
>> Hey Dev,
>>
>> On 2025/4/28 20:04, Dev Jain wrote:
>>> In preparation for patch 7, we need to properly batch around
>>> can_change_pte_writable(). We batch around pte_needs_soft_dirty_wp() by
>>> the corresponding fpb flag, we batch around the page-anon exclusive 
>>> check
>>> using folio_maybe_mapped_shared(); modify_prot_start_ptes() collects the
>>> dirty and access bits across the batch, therefore batching across
>>> pte_dirty(): this is correct since the dirty bit on the PTE really
>>> is just an indication that the folio got written to, so even if
>>> the PTE is not actually dirty (but one of the PTEs in the batch is),
>>> the wp-fault optimization can be made.
>>>
>>> Signed-off-by: Dev Jain <dev.jain@arm.com>
>>> ---
>>>   include/linux/mm.h | 4 ++--
>>>   mm/gup.c           | 2 +-
>>>   mm/huge_memory.c   | 4 ++--
>>>   mm/memory.c        | 6 +++---
>>>   mm/mprotect.c      | 9 ++++++---
>>>   5 files changed, 14 insertions(+), 11 deletions(-)
>>>
>>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>>> index 5eb0d77c4438..ffa02e15863f 100644
>>> --- a/include/linux/mm.h
>>> +++ b/include/linux/mm.h
>>> @@ -2710,8 +2710,8 @@ int get_cmdline(struct task_struct *task, char 
>>> *buffer, int buflen);
>>>   #define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
>>>                           MM_CP_UFFD_WP_RESOLVE)
>>> -bool can_change_pte_writable(struct vm_area_struct *vma, unsigned 
>>> long addr,
>>> -                 pte_t pte);
>>> +bool can_change_ptes_writable(struct vm_area_struct *vma, unsigned 
>>> long addr,
>>> +                 pte_t pte, struct folio *folio, unsigned int nr);
>>>   extern long change_protection(struct mmu_gather *tlb,
>>>                     struct vm_area_struct *vma, unsigned long start,
>>>                     unsigned long end, unsigned long cp_flags);
>>> diff --git a/mm/gup.c b/mm/gup.c
>>> index 84461d384ae2..6a605fc5f2cb 100644
>>> --- a/mm/gup.c
>>> +++ b/mm/gup.c
>>> @@ -614,7 +614,7 @@ static inline bool can_follow_write_common(struct 
>>> page *page,
>>>           return false;
>>>       /*
>>> -     * See can_change_pte_writable(): we broke COW and could map the 
>>> page
>>> +     * See can_change_ptes_writable(): we broke COW and could map 
>>> the page
>>>        * writable if we have an exclusive anonymous page ...
>>>        */
>>>       return page && PageAnon(page) && PageAnonExclusive(page);
>>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>>> index 28c87e0e036f..e5496c0d9e7e 100644
>>> --- a/mm/huge_memory.c
>>> +++ b/mm/huge_memory.c
>>> @@ -2032,12 +2032,12 @@ static inline bool 
>>> can_change_pmd_writable(struct vm_area_struct *vma,
>>>           return false;
>>>       if (!(vma->vm_flags & VM_SHARED)) {
>>> -        /* See can_change_pte_writable(). */
>>> +        /* See can_change_ptes_writable(). */
>>>           page = vm_normal_page_pmd(vma, addr, pmd);
>>>           return page && PageAnon(page) && PageAnonExclusive(page);
>>>       }
>>> -    /* See can_change_pte_writable(). */
>>> +    /* See can_change_ptes_writable(). */
>>>       return pmd_dirty(pmd);
>>>   }
>>> diff --git a/mm/memory.c b/mm/memory.c
>>> index b9e8443aaa86..b1fda3de8d27 100644
>>> --- a/mm/memory.c
>>> +++ b/mm/memory.c
>>> @@ -750,7 +750,7 @@ static void restore_exclusive_pte(struct 
>>> vm_area_struct *vma,
>>>           pte = pte_mkuffd_wp(pte);
>>>       if ((vma->vm_flags & VM_WRITE) &&
>>> -        can_change_pte_writable(vma, address, pte)) {
>>> +        can_change_ptes_writable(vma, address, pte, NULL, 1)) {
>>>           if (folio_test_dirty(folio))
>>>               pte = pte_mkdirty(pte);
>>>           pte = pte_mkwrite(pte, vma);
>>> @@ -5767,7 +5767,7 @@ static void numa_rebuild_large_mapping(struct 
>>> vm_fault *vmf, struct vm_area_stru
>>>               ptent = pte_modify(ptent, vma->vm_page_prot);
>>>               writable = pte_write(ptent);
>>>               if (!writable && pte_write_upgrade &&
>>> -                can_change_pte_writable(vma, addr, ptent))
>>> +                can_change_ptes_writable(vma, addr, ptent, NULL, 1))
>>>                   writable = true;
>>>           }
>>> @@ -5808,7 +5808,7 @@ static vm_fault_t do_numa_page(struct vm_fault 
>>> *vmf)
>>>        */
>>>       writable = pte_write(pte);
>>>       if (!writable && pte_write_upgrade &&
>>> -        can_change_pte_writable(vma, vmf->address, pte))
>>> +        can_change_ptes_writable(vma, vmf->address, pte, NULL, 1))
>>>           writable = true;
>>>       folio = vm_normal_folio(vma, vmf->address, pte);
>>> diff --git a/mm/mprotect.c b/mm/mprotect.c
>>> index 33eabc995584..362fd7e5457d 100644
>>> --- a/mm/mprotect.c
>>> +++ b/mm/mprotect.c
>>> @@ -40,8 +40,8 @@
>>>   #include "internal.h"
>>> -bool can_change_pte_writable(struct vm_area_struct *vma, unsigned 
>>> long addr,
>>> -                 pte_t pte)
>>> +bool can_change_ptes_writable(struct vm_area_struct *vma, unsigned 
>>> long addr,
>>> +                  pte_t pte, struct folio *folio, unsigned int nr)
>>>   {
>>>       struct page *page;
>>> @@ -67,6 +67,9 @@ bool can_change_pte_writable(struct vm_area_struct 
>>> *vma, unsigned long addr,
>>>            * write-fault handler similarly would map them writable 
>>> without
>>>            * any additional checks while holding the PT lock.
>>>            */
>>> +        if (unlikely(nr != 1))
>>> +            return !folio_maybe_mapped_shared(folio);
>>> +
>>>           page = vm_normal_page(vma, addr, pte);
>>>           return page && PageAnon(page) && PageAnonExclusive(page);
>>>       }
>>
>> IIUC, As mentioned in the comment above, we should do the same 
>> anonymous check
>> to large folios. And folio_maybe_mapped_shared() already handles both 
>> order-0
>> and large folios nicely, so we could simplify the logic as follows:
> 
> Forget to add:
> 
> Note that the exclusive flag is set only for non-large folios or the head
> page of large folios during mapping, so PageAnonExclusive() will always
> return false for tail pages of large folios, IIUC.

Correction: the exclusive flag would be set for all sub pages of large 
folios
during mapping.

Thanks,
Lance

> 
> Thanks,
> Lance
> 
>>
>> diff --git a/mm/mprotect.c b/mm/mprotect.c
>> index 1605e89349d2..df56a30bb241 100644
>> --- a/mm/mprotect.c
>> +++ b/mm/mprotect.c
>> @@ -43,8 +43,6 @@
>>   bool can_change_ptes_writable(struct vm_area_struct *vma, unsigned 
>> long addr,
>>                                pte_t pte, struct folio *folio, 
>> unsigned int nr)
>>   {
>> -       struct page *page;
>> -
>>          if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
>>                  return false;
>>
>> @@ -67,11 +65,7 @@ bool can_change_ptes_writable(struct vm_area_struct 
>> *vma, unsigned long addr,
>>                   * write-fault handler similarly would map them 
>> writable without
>>                   * any additional checks while holding the PT lock.
>>                   */
>> -               if (unlikely(nr != 1))
>> -                       return !folio_maybe_mapped_shared(folio);
>> -
>> -               page = vm_normal_page(vma, addr, pte);
>> -               return page && PageAnon(page) && PageAnonExclusive(page);
>> +               return folio_test_anon(folio) && ! 
>> folio_maybe_mapped_shared(folio);
>>          }
>>
>>          VM_WARN_ON_ONCE(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte));
>> -- 
>>
>> Thanks,
>> Lance
>>
>>> @@ -222,7 +225,7 @@ static long change_pte_range(struct mmu_gather *tlb,
>>>                */
>>>               if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
>>>                   !pte_write(ptent) &&
>>> -                can_change_pte_writable(vma, addr, ptent))
>>> +                can_change_ptes_writable(vma, addr, ptent, folio, 1))
>>>                   ptent = pte_mkwrite(ptent, vma);
>>>               ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
>>
> 



  reply	other threads:[~2025-04-28 15:58 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-28 12:04 [PATCH 0/7] Optimize mprotect for large folios Dev Jain
2025-04-28 12:04 ` [PATCH 1/7] mm: Refactor code in mprotect Dev Jain
2025-04-28 12:04 ` [PATCH 2/7] mm: Optimize mprotect() by batch-skipping PTEs Dev Jain
2025-04-28 12:04 ` [PATCH 3/7] mm: Add batched versions of ptep_modify_prot_start/commit Dev Jain
2025-04-28 12:04 ` [PATCH 4/7] arm64: Add batched version of ptep_modify_prot_start Dev Jain
2025-04-28 18:06   ` Zi Yan
2025-04-29  4:44     ` Dev Jain
2025-04-28 12:04 ` [PATCH 5/7] arm64: Add batched version of ptep_modify_prot_commit Dev Jain
2025-04-28 12:04 ` [PATCH 6/7] mm: Batch around can_change_pte_writable() Dev Jain
2025-04-28 12:50   ` Lance Yang
2025-04-28 12:59     ` Dev Jain
2025-04-28 13:23       ` Lance Yang
2025-04-29  4:59         ` Dev Jain
2025-04-28 13:16     ` Lance Yang
2025-04-28 15:54       ` Lance Yang [this message]
2025-04-28 12:04 ` [PATCH 7/7] mm: Optimize mprotect() through PTE-batching Dev Jain
2025-04-28 12:52 ` [PATCH 0/7] Optimize mprotect for large folios Dev Jain
2025-04-28 13:31 ` Lance Yang
2025-04-29  4:40   ` Dev Jain

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6a0b4d81-a1b8-4533-8b4e-de270e39c5aa@linux.dev \
    --to=lance.yang@linux.dev \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=anshuman.khandual@arm.com \
    --cc=baohua@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=christophe.leroy@csgroup.eu \
    --cc=david@redhat.com \
    --cc=dev.jain@arm.com \
    --cc=hughd@google.com \
    --cc=ioworker0@gmail.com \
    --cc=jannh@google.com \
    --cc=joey.gouly@arm.com \
    --cc=kevin.brodsky@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lorenzo.stoakes@oracle.com \
    --cc=namit@vmware.com \
    --cc=peterx@redhat.com \
    --cc=quic_zhenhuah@quicinc.com \
    --cc=ryan.roberts@arm.com \
    --cc=vbabka@suse.cz \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=yang@os.amperecomputing.com \
    --cc=yangyicong@hisilicon.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).