* [RFC] Simplify users of vma_address_end()
@ 2022-01-19 17:50 Matthew Wilcox
2022-01-24 4:40 ` Hugh Dickins
0 siblings, 1 reply; 2+ messages in thread
From: Matthew Wilcox @ 2022-01-19 17:50 UTC (permalink / raw)
To: Hugh Dickins; +Cc: linux-mm, Dave Hansen
Hi Hugh,
What do you think to this simplification? Dave dislikes the ?: usage
in this context, and I thought we could usefully put the condition
inside the (inline) function.
I could also go for:
if (!PageCompound(page))
return address + PAGE_SIZE;
VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
in case anyone starts to create compound KSM pages.
diff --git a/mm/internal.h b/mm/internal.h
index c774075b3893..7cd33ee4df32 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -462,13 +462,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
* Assumes that vma_address() already returned a good starting address.
* If page is a compound head, the entire compound page is considered.
*/
-static inline unsigned long
-vma_address_end(struct page *page, struct vm_area_struct *vma)
+static inline unsigned long vma_address_end(struct page *page,
+ unsigned long address, struct vm_area_struct *vma)
{
pgoff_t pgoff;
- unsigned long address;
- VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
+ if (PageKsm(page) || !PageCompound(page))
+ return address + PAGE_SIZE;
pgoff = page_to_pgoff(page) + compound_nr(page);
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
/* Check for address beyond vma (or wrapped through 0?) */
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index f7b331081791..fcd7b9ccfb1e 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -181,15 +181,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return true;
}
- /*
- * Seek to next pte only makes sense for THP.
- * But more important than that optimization, is to filter out
- * any PageKsm page: whose page->index misleads vma_address()
- * and vma_address_end() to disaster.
- */
- end = PageTransCompound(page) ?
- vma_address_end(page, pvmw->vma) :
- pvmw->address + PAGE_SIZE;
+ end = vma_address_end(page, pvmw->address, pvmw->vma);
if (pvmw->pte)
goto next_pte;
restart:
diff --git a/mm/rmap.c b/mm/rmap.c
index a531b64d53fa..5d5dc2a60a26 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -946,7 +946,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
0, vma, vma->vm_mm, address,
- vma_address_end(page, vma));
+ vma_address_end(page, address, vma));
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(&pvmw)) {
@@ -1453,8 +1453,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Note that the page can not be free in this function as call of
* try_to_unmap() must hold a reference on the page.
*/
- range.end = PageKsm(page) ?
- address + PAGE_SIZE : vma_address_end(page, vma);
+ range.end = vma_address_end(page, address, vma);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address, range.end);
if (PageHuge(page)) {
@@ -1757,8 +1756,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
* Note that the page can not be free in this function as call of
* try_to_unmap() must hold a reference on the page.
*/
- range.end = PageKsm(page) ?
- address + PAGE_SIZE : vma_address_end(page, vma);
+ range.end = vma_address_end(page, address, vma);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address, range.end);
if (PageHuge(page)) {
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [RFC] Simplify users of vma_address_end()
2022-01-19 17:50 [RFC] Simplify users of vma_address_end() Matthew Wilcox
@ 2022-01-24 4:40 ` Hugh Dickins
0 siblings, 0 replies; 2+ messages in thread
From: Hugh Dickins @ 2022-01-24 4:40 UTC (permalink / raw)
To: Matthew Wilcox; +Cc: Hugh Dickins, linux-mm, Dave Hansen
On Wed, 19 Jan 2022, Matthew Wilcox wrote:
> Hi Hugh,
>
> What do you think to this simplification? Dave dislikes the ?: usage
> in this context, and I thought we could usefully put the condition
> inside the (inline) function.
Yes, that's an improvement. I think when I created vma_address_end()
originally (long before posting), there was only one caller who needed
to worry about PageKsm, so I avoided inflicting that awkwardness on the
others; but now it looks like most (of its few) callers do have to worry.
But I'll suggest one nit, which you can ignore if you feel strongly
otherwise: I'd prefer vma_address_end(page, vma, address) as a better
match to vma_address(page, vma).
>
> I could also go for:
>
> if (!PageCompound(page))
> return address + PAGE_SIZE;
> VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
Another way to do it, yes: I don't mind either way.
Thanks,
Hugh
>
> in case anyone starts to create compound KSM pages.
>
> diff --git a/mm/internal.h b/mm/internal.h
> index c774075b3893..7cd33ee4df32 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -462,13 +462,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
> * Assumes that vma_address() already returned a good starting address.
> * If page is a compound head, the entire compound page is considered.
> */
> -static inline unsigned long
> -vma_address_end(struct page *page, struct vm_area_struct *vma)
> +static inline unsigned long vma_address_end(struct page *page,
> + unsigned long address, struct vm_area_struct *vma)
> {
> pgoff_t pgoff;
> - unsigned long address;
>
> - VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
> + if (PageKsm(page) || !PageCompound(page))
> + return address + PAGE_SIZE;
> pgoff = page_to_pgoff(page) + compound_nr(page);
> address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
> /* Check for address beyond vma (or wrapped through 0?) */
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index f7b331081791..fcd7b9ccfb1e 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -181,15 +181,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
> return true;
> }
>
> - /*
> - * Seek to next pte only makes sense for THP.
> - * But more important than that optimization, is to filter out
> - * any PageKsm page: whose page->index misleads vma_address()
> - * and vma_address_end() to disaster.
> - */
> - end = PageTransCompound(page) ?
> - vma_address_end(page, pvmw->vma) :
> - pvmw->address + PAGE_SIZE;
> + end = vma_address_end(page, pvmw->address, pvmw->vma);
> if (pvmw->pte)
> goto next_pte;
> restart:
> diff --git a/mm/rmap.c b/mm/rmap.c
> index a531b64d53fa..5d5dc2a60a26 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -946,7 +946,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
> */
> mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
> 0, vma, vma->vm_mm, address,
> - vma_address_end(page, vma));
> + vma_address_end(page, address, vma));
> mmu_notifier_invalidate_range_start(&range);
>
> while (page_vma_mapped_walk(&pvmw)) {
> @@ -1453,8 +1453,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
> * Note that the page can not be free in this function as call of
> * try_to_unmap() must hold a reference on the page.
> */
> - range.end = PageKsm(page) ?
> - address + PAGE_SIZE : vma_address_end(page, vma);
> + range.end = vma_address_end(page, address, vma);
> mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
> address, range.end);
> if (PageHuge(page)) {
> @@ -1757,8 +1756,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
> * Note that the page can not be free in this function as call of
> * try_to_unmap() must hold a reference on the page.
> */
> - range.end = PageKsm(page) ?
> - address + PAGE_SIZE : vma_address_end(page, vma);
> + range.end = vma_address_end(page, address, vma);
> mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
> address, range.end);
> if (PageHuge(page)) {
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2022-01-24 4:40 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-01-19 17:50 [RFC] Simplify users of vma_address_end() Matthew Wilcox
2022-01-24 4:40 ` Hugh Dickins
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).