From: Chih-En Lin <shiyn.lin@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Qi Zheng <zhengqi.arch@bytedance.com>,
David Hildenbrand <david@redhat.com>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
Christophe Leroy <christophe.leroy@csgroup.eu>,
John Hubbard <jhubbard@nvidia.com>, Nadav Amit <namit@vmware.com>,
Barry Song <baohua@kernel.org>,
Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Steven Rostedt <rostedt@goodmis.org>,
Masami Hiramatsu <mhiramat@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Mark Rutland <mark.rutland@arm.com>,
Alexander Shishkin <alexander.shishkin@linux.intel.com>,
Jiri Olsa <jolsa@kernel.org>, Namhyung Kim <namhyung@kernel.org>,
Ian Rogers <irogers@google.com>,
Adrian Hunter <adrian.hunter@intel.com>,
Yu Zhao <yuzhao@google.com>, Steven Barrett <steven@liquorix.net>,
Juergen Gross <jgross@suse.com>, Peter Xu <peterx@redhat.com>,
Kefeng Wang <wangkefeng.wang@huawei.com>,
Tong Tiangen <tongtiangen@huawei.com>,
Christoph Hellwig <hch@infradead.org>,
"Liam R. Howlett" <Liam.Howlett@Oracle.com>,
Yang Shi <shy828301@gmail.com>, Vlastimil Babka <vbabka@suse.cz>,
Alex Sierra <alex.sierra@amd.com>,
Vincent Whitchurch <vincent.whitchurch@axis.com>,
Anshuman Khandual <anshuman.khandual@arm.com>,
Li kunyu <kunyu@nfschina.com>, Liu Shixin <liushixin2@huawei.com>,
Hugh Dickins <hughd@google.com>, Minchan Kim <minchan@kernel.org>,
Joey Gouly <joey.gouly@arm.com>,
Chih-En Lin <shiyn.lin@gmail.com>, Michal Hocko <mhocko@suse.com>,
Suren Baghdasaryan <surenb@google.com>,
"Zach O'Keefe" <zokeefe@google.com>,
Gautam Menghani <gautammenghani201@gmail.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Mark Brown <broonie@kernel.org>,
"Eric W. Biederman" <ebiederm@xmission.com>,
Andrei Vagin <avagin@gmail.com>,
Shakeel Butt <shakeelb@google.com>,
Daniel Bristot de Oliveira <bristot@kernel.org>,
"Jason A. Donenfeld" <Jason@zx2c4.com>,
Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Alexey Gladkov <legion@kernel.org>,
x86@kernel.org, linux-kernel@vger.kernel.org,
linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
linux-trace-kernel@vger.kernel.org,
linux-perf-users@vger.kernel.org,
Dinglan Peng <peng301@purdue.edu>,
Pedro Fonseca <pfonseca@purdue.edu>,
Jim Huang <jserv@ccns.ncku.edu.tw>,
Huichun Feng <foxhoundsk.tw@gmail.com>
Subject: [PATCH v5 06/17] mm/rmap: Break COW PTE in rmap walking
Date: Fri, 14 Apr 2023 22:23:30 +0800 [thread overview]
Message-ID: <20230414142341.354556-7-shiyn.lin@gmail.com> (raw)
In-Reply-To: <20230414142341.354556-1-shiyn.lin@gmail.com>
Some of the features (unmap, migrate, device exclusive, mkclean, etc)
might modify the pte entry via rmap. Add a new page vma mapped walk
flag, PVMW_BREAK_COW_PTE, to indicate the rmap walking to break COW PTE.
Signed-off-by: Chih-En Lin <shiyn.lin@gmail.com>
---
include/linux/rmap.h | 2 ++
mm/migrate.c | 3 ++-
mm/page_vma_mapped.c | 4 ++++
mm/rmap.c | 9 +++++----
mm/vmscan.c | 3 ++-
5 files changed, 15 insertions(+), 6 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b87d01660412..57e9b72dc63a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -377,6 +377,8 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
#define PVMW_SYNC (1 << 0)
/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
+/* Break COW-ed PTE during walking */
+#define PVMW_BREAK_COW_PTE (1 << 2)
struct page_vma_mapped_walk {
unsigned long pfn;
diff --git a/mm/migrate.c b/mm/migrate.c
index db3f154446af..38933993af14 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -184,7 +184,8 @@ void putback_movable_pages(struct list_head *l)
static bool remove_migration_pte(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr, void *old)
{
- DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
+ DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr,
+ PVMW_SYNC | PVMW_MIGRATION | PVMW_BREAK_COW_PTE);
while (page_vma_mapped_walk(&pvmw)) {
rmap_t rmap_flags = RMAP_NONE;
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 4e448cfbc6ef..1750b3460828 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -254,6 +254,10 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
step_forward(pvmw, PMD_SIZE);
continue;
}
+ if (pvmw->flags & PVMW_BREAK_COW_PTE) {
+ if (break_cow_pte(vma, pvmw->pmd, pvmw->address))
+ return not_found(pvmw);
+ }
if (!map_pte(pvmw))
goto next_pte;
this_pte:
diff --git a/mm/rmap.c b/mm/rmap.c
index 8632e02661ac..5582da6d72fc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1006,7 +1006,8 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address,
+ PVMW_SYNC | PVMW_BREAK_COW_PTE);
int *cleaned = arg;
*cleaned += page_vma_mkclean_one(&pvmw);
@@ -1450,7 +1451,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
pte_t pteval;
struct page *subpage;
bool anon_exclusive, ret = true;
@@ -1810,7 +1811,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
pte_t pteval;
struct page *subpage;
bool anon_exclusive, ret = true;
@@ -2177,7 +2178,7 @@ static bool page_make_device_exclusive_one(struct folio *folio,
struct vm_area_struct *vma, unsigned long address, void *priv)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
struct make_exclusive_args *args = priv;
pte_t pteval;
struct page *subpage;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9c1c5e8b24b8..4abbd036f927 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1892,7 +1892,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
/*
* The folio is mapped into the page tables of one or more
- * processes. Try to unmap it here.
+ * processes. Try to unmap it here. Also, since it will write
+ * to the page tables, break COW PTE if they are.
*/
if (folio_mapped(folio)) {
enum ttu_flags flags = TTU_BATCH_FLUSH;
--
2.34.1
next prev parent reply other threads:[~2023-04-14 14:26 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-14 14:23 [PATCH v5 00/17] Introduce Copy-On-Write to Page Table Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 01/17] mm: Split out the present cases from zap_pte_range() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 02/17] mm: Allow user to control COW PTE via prctl Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 03/17] mm: Add Copy-On-Write PTE to fork() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 04/17] mm: Add break COW PTE fault and helper functions Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 05/17] mm: Handle COW-ed PTE during zapping Chih-En Lin
2023-04-14 14:23 ` Chih-En Lin [this message]
2023-04-14 14:23 ` [PATCH v5 07/17] mm/khugepaged: Break COW PTE before scanning pte Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 08/17] mm/ksm: Break COW PTE before modify shared PTE Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 09/17] mm/madvise: Handle COW-ed PTE with madvise() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 10/17] mm/gup: Trigger break COW PTE before calling follow_pfn_pte() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 11/17] mm/mprotect: Break COW PTE before changing protection Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 12/17] mm/userfaultfd: Support COW PTE Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 13/17] mm/migrate_device: " Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 14/17] fs/proc: Support COW PTE with clear_refs_write Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 15/17] events/uprobes: Break COW PTE before replacing page Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 16/17] mm: fork: Enable COW PTE to fork system call Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 17/17] mm: Check the unexpected modification of COW-ed PTE Chih-En Lin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230414142341.354556-7-shiyn.lin@gmail.com \
--to=shiyn.lin@gmail.com \
--cc=Jason@zx2c4.com \
--cc=Liam.Howlett@Oracle.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=akpm@linux-foundation.org \
--cc=alex.sierra@amd.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=anshuman.khandual@arm.com \
--cc=avagin@gmail.com \
--cc=baohua@kernel.org \
--cc=bp@alien8.de \
--cc=bristot@kernel.org \
--cc=broonie@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=christophe.leroy@csgroup.eu \
--cc=dave.hansen@linux.intel.com \
--cc=david@redhat.com \
--cc=ebiederm@xmission.com \
--cc=foxhoundsk.tw@gmail.com \
--cc=gautammenghani201@gmail.com \
--cc=gregkh@linuxfoundation.org \
--cc=hch@infradead.org \
--cc=hpa@zytor.com \
--cc=hughd@google.com \
--cc=irogers@google.com \
--cc=jgross@suse.com \
--cc=jhubbard@nvidia.com \
--cc=joey.gouly@arm.com \
--cc=jolsa@kernel.org \
--cc=jserv@ccns.ncku.edu.tw \
--cc=kunyu@nfschina.com \
--cc=legion@kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=liushixin2@huawei.com \
--cc=mark.rutland@arm.com \
--cc=mhiramat@kernel.org \
--cc=mhocko@suse.com \
--cc=minchan@kernel.org \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=namit@vmware.com \
--cc=pasha.tatashin@soleen.com \
--cc=peng301@purdue.edu \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=pfonseca@purdue.edu \
--cc=rostedt@goodmis.org \
--cc=shakeelb@google.com \
--cc=shy828301@gmail.com \
--cc=steven@liquorix.net \
--cc=surenb@google.com \
--cc=tglx@linutronix.de \
--cc=tongtiangen@huawei.com \
--cc=vbabka@suse.cz \
--cc=vincent.whitchurch@axis.com \
--cc=wangkefeng.wang@huawei.com \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
--cc=yuzhao@google.com \
--cc=zhengqi.arch@bytedance.com \
--cc=zokeefe@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).