* [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd()
@ 2019-03-30 5:41 Aneesh Kumar K.V
2019-04-01 8:14 ` Jan Kara
0 siblings, 1 reply; 2+ messages in thread
From: Aneesh Kumar K.V @ 2019-03-30 5:41 UTC (permalink / raw)
To: dan.j.williams, akpm, Jan Kara
Cc: linux-mm, linuxppc-dev, Aneesh Kumar K.V, stable, linux-nvdimm
With some architectures like ppc64, set_pmd_at() cannot cope with
a situation where there is already some (different) valid entry present.
Use pmdp_set_access_flags() instead to modify the pfn which is built to
deal with modifying existing PMD entries.
This is similar to
commit cae85cb8add3 ("mm/memory.c: fix modifying of page protection by insert_pfn()")
We also do similar update w.r.t insert_pfn_pud eventhough ppc64 don't support
pud pfn entries now.
CC: stable@vger.kernel.org
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
mm/huge_memory.c | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 404acdcd0455..f7dca413c4b2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -755,6 +755,20 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
ptl = pmd_lock(mm, pmd);
+ if (!pmd_none(*pmd)) {
+ if (write) {
+ if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
+ WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
+ goto out_unlock;
+ }
+ entry = pmd_mkyoung(*pmd);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
+ update_mmu_cache_pmd(vma, addr, pmd);
+ }
+ goto out_unlock;
+ }
+
entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pmd_mkdevmap(entry);
@@ -770,6 +784,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
set_pmd_at(mm, addr, pmd, entry);
update_mmu_cache_pmd(vma, addr, pmd);
+out_unlock:
spin_unlock(ptl);
}
@@ -821,6 +836,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
ptl = pud_lock(mm, pud);
+ if (!pud_none(*pud)) {
+ if (write) {
+ if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
+ WARN_ON_ONCE(!is_huge_zero_pud(*pud));
+ goto out_unlock;
+ }
+ entry = pud_mkyoung(*pud);
+ entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
+ if (pudp_set_access_flags(vma, addr, pud, entry, 1))
+ update_mmu_cache_pud(vma, addr, pud);
+ }
+ goto out_unlock;
+ }
+
entry = pud_mkhuge(pfn_t_pud(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pud_mkdevmap(entry);
@@ -830,6 +859,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
}
set_pud_at(mm, addr, pud, entry);
update_mmu_cache_pud(vma, addr, pud);
+
+out_unlock:
spin_unlock(ptl);
}
--
2.20.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd()
2019-03-30 5:41 [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd() Aneesh Kumar K.V
@ 2019-04-01 8:14 ` Jan Kara
0 siblings, 0 replies; 2+ messages in thread
From: Jan Kara @ 2019-04-01 8:14 UTC (permalink / raw)
To: Aneesh Kumar K.V
Cc: Jan Kara, linux-nvdimm, stable, linux-mm, dan.j.williams,
linuxppc-dev, akpm
On Sat 30-03-19 11:11:21, Aneesh Kumar K.V wrote:
> With some architectures like ppc64, set_pmd_at() cannot cope with
> a situation where there is already some (different) valid entry present.
>
> Use pmdp_set_access_flags() instead to modify the pfn which is built to
> deal with modifying existing PMD entries.
>
> This is similar to
> commit cae85cb8add3 ("mm/memory.c: fix modifying of page protection by insert_pfn()")
>
> We also do similar update w.r.t insert_pfn_pud eventhough ppc64 don't support
> pud pfn entries now.
>
> CC: stable@vger.kernel.org
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Thanks for fixing this! The patch looks good to me. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
> ---
> mm/huge_memory.c | 31 +++++++++++++++++++++++++++++++
> 1 file changed, 31 insertions(+)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 404acdcd0455..f7dca413c4b2 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -755,6 +755,20 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
> spinlock_t *ptl;
>
> ptl = pmd_lock(mm, pmd);
> + if (!pmd_none(*pmd)) {
> + if (write) {
> + if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
> + WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
> + goto out_unlock;
> + }
> + entry = pmd_mkyoung(*pmd);
> + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
> + if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
> + update_mmu_cache_pmd(vma, addr, pmd);
> + }
> + goto out_unlock;
> + }
> +
> entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
> if (pfn_t_devmap(pfn))
> entry = pmd_mkdevmap(entry);
> @@ -770,6 +784,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
>
> set_pmd_at(mm, addr, pmd, entry);
> update_mmu_cache_pmd(vma, addr, pmd);
> +out_unlock:
> spin_unlock(ptl);
> }
>
> @@ -821,6 +836,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
> spinlock_t *ptl;
>
> ptl = pud_lock(mm, pud);
> + if (!pud_none(*pud)) {
> + if (write) {
> + if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
> + WARN_ON_ONCE(!is_huge_zero_pud(*pud));
> + goto out_unlock;
> + }
> + entry = pud_mkyoung(*pud);
> + entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
> + if (pudp_set_access_flags(vma, addr, pud, entry, 1))
> + update_mmu_cache_pud(vma, addr, pud);
> + }
> + goto out_unlock;
> + }
> +
> entry = pud_mkhuge(pfn_t_pud(pfn, prot));
> if (pfn_t_devmap(pfn))
> entry = pud_mkdevmap(entry);
> @@ -830,6 +859,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
> }
> set_pud_at(mm, addr, pud, entry);
> update_mmu_cache_pud(vma, addr, pud);
> +
> +out_unlock:
> spin_unlock(ptl);
> }
>
> --
> 2.20.1
>
--
Jan Kara <jack@suse.com>
SUSE Labs, CR
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2019-04-01 8:16 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-03-30 5:41 [PATCH] mm: Fix modifying of page protection by insert_pfn_pmd() Aneesh Kumar K.V
2019-04-01 8:14 ` Jan Kara
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).