linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: maple-tree@lists.infradead.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,
	"Liam R. Howlett" <Liam.Howlett@oracle.com>
Subject: [PATCH v2 17/36] mm: Update validate_mm() to use vma iterator
Date: Fri,  5 May 2023 13:41:45 -0400	[thread overview]
Message-ID: <20230505174204.2665599-18-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20230505174204.2665599-1-Liam.Howlett@oracle.com>

Use the vma iterator in the validation code and combine the code to
check the maple tree into the main validate_mm() function.

Introduce a new function vma_iter_dump_tree() to dump the maple tree in
hex layout.

Replace all calls to validate_mm_mt() with validate_mm().

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 include/linux/mmdebug.h |  14 ++++++
 mm/debug.c              |   9 ++++
 mm/internal.h           |   3 +-
 mm/mmap.c               | 101 ++++++++++++++++------------------------
 4 files changed, 66 insertions(+), 61 deletions(-)

diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index b8728d11c9490..7c3e7b0b0e8fd 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -8,10 +8,12 @@
 struct page;
 struct vm_area_struct;
 struct mm_struct;
+struct vma_iterator;
 
 void dump_page(struct page *page, const char *reason);
 void dump_vma(const struct vm_area_struct *vma);
 void dump_mm(const struct mm_struct *mm);
+void vma_iter_dump_tree(const struct vma_iterator *vmi);
 
 #ifdef CONFIG_DEBUG_VM
 #define VM_BUG_ON(cond) BUG_ON(cond)
@@ -74,6 +76,17 @@ void dump_mm(const struct mm_struct *mm);
 	}								\
 	unlikely(__ret_warn_once);					\
 })
+#define VM_WARN_ON_ONCE_MM(cond, mm)		({			\
+	static bool __section(".data.once") __warned;			\
+	int __ret_warn_once = !!(cond);					\
+									\
+	if (unlikely(__ret_warn_once && !__warned)) {			\
+		dump_mm(mm);						\
+		__warned = true;					\
+		WARN_ON(1);						\
+	}								\
+	unlikely(__ret_warn_once);					\
+})
 
 #define VM_WARN_ON(cond) (void)WARN_ON(cond)
 #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
@@ -90,6 +103,7 @@ void dump_mm(const struct mm_struct *mm);
 #define VM_WARN_ON_ONCE_PAGE(cond, page)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ON_FOLIO(cond, folio)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ON_ONCE_FOLIO(cond, folio)  BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_MM(cond, mm)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
 #endif
diff --git a/mm/debug.c b/mm/debug.c
index c7b228097bd98..ee533a5ceb79d 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -268,4 +268,13 @@ void page_init_poison(struct page *page, size_t size)
 	if (page_init_poisoning)
 		memset(page, PAGE_POISON_PATTERN, size);
 }
+
+void vma_iter_dump_tree(const struct vma_iterator *vmi)
+{
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+	mas_dump(&vmi->mas);
+	mt_dump(vmi->mas.tree, mt_dump_hex);
+#endif	/* CONFIG_DEBUG_VM_MAPLE_TREE */
+}
+
 #endif		/* CONFIG_DEBUG_VM */
diff --git a/mm/internal.h b/mm/internal.h
index 4c195920f5656..8d1a8bd001247 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1051,13 +1051,14 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
 		printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
 		printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
 		printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
-		mt_dump(vmi->mas.tree, mt_dump_hex);
+		vma_iter_dump_tree(vmi);
 	}
 	if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last <  vma->vm_start)) {
 		printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
 		printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
 		printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
 		mt_dump(vmi->mas.tree, mt_dump_hex);
+		vma_iter_dump_tree(vmi);
 	}
 #endif
 
diff --git a/mm/mmap.c b/mm/mmap.c
index 76eaf12f13903..bcebfd9266324 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -299,62 +299,44 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 	return origbrk;
 }
 
-#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
-extern void mt_validate(struct maple_tree *mt);
-extern void mt_dump(const struct maple_tree *mt, enum mt_dump_format fmt);
-
-/* Validate the maple tree */
-static void validate_mm_mt(struct mm_struct *mm)
-{
-	struct maple_tree *mt = &mm->mm_mt;
-	struct vm_area_struct *vma_mt;
-
-	MA_STATE(mas, mt, 0, 0);
-
-	mt_validate(&mm->mm_mt);
-	mas_for_each(&mas, vma_mt, ULONG_MAX) {
-		if ((vma_mt->vm_start != mas.index) ||
-		    (vma_mt->vm_end - 1 != mas.last)) {
-			pr_emerg("issue in %s\n", current->comm);
-			dump_stack();
-			dump_vma(vma_mt);
-			pr_emerg("mt piv: %p %lu - %lu\n", vma_mt,
-				 mas.index, mas.last);
-			pr_emerg("mt vma: %p %lu - %lu\n", vma_mt,
-				 vma_mt->vm_start, vma_mt->vm_end);
-
-			mt_dump(mas.tree, mt_dump_hex);
-			if (vma_mt->vm_end != mas.last + 1) {
-				pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n",
-						mm, vma_mt->vm_start, vma_mt->vm_end,
-						mas.index, mas.last);
-				mt_dump(mas.tree, mt_dump_hex);
-			}
-			VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm);
-			if (vma_mt->vm_start != mas.index) {
-				pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n",
-						mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end);
-				mt_dump(mas.tree, mt_dump_hex);
-			}
-			VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm);
-		}
-	}
-}
-
+#if defined(CONFIG_DEBUG_VM)
 static void validate_mm(struct mm_struct *mm)
 {
 	int bug = 0;
 	int i = 0;
 	struct vm_area_struct *vma;
-	MA_STATE(mas, &mm->mm_mt, 0, 0);
+	VMA_ITERATOR(vmi, mm, 0);
 
-	validate_mm_mt(mm);
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+	mt_validate(&mm->mm_mt);
+#endif
 
-	mas_for_each(&mas, vma, ULONG_MAX) {
+	for_each_vma(vmi, vma) {
 #ifdef CONFIG_DEBUG_VM_RB
 		struct anon_vma *anon_vma = vma->anon_vma;
 		struct anon_vma_chain *avc;
+#endif
+		unsigned long vmi_start, vmi_end;
+		bool warn = 0;
+
+		vmi_start = vma_iter_addr(&vmi);
+		vmi_end = vma_iter_end(&vmi);
+		if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
+			warn = 1;
+
+		if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
+			warn = 1;
+
+		if (warn) {
+			pr_emerg("issue in %s\n", current->comm);
+			dump_stack();
+			dump_vma(vma);
+			pr_emerg("tree range: %px start %lx end %lx\n", vma,
+				 vmi_start, vmi_end - 1);
+			vma_iter_dump_tree(&vmi);
+		}
 
+#ifdef CONFIG_DEBUG_VM_RB
 		if (anon_vma) {
 			anon_vma_lock_read(anon_vma);
 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
@@ -365,16 +347,15 @@ static void validate_mm(struct mm_struct *mm)
 		i++;
 	}
 	if (i != mm->map_count) {
-		pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i);
+		pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
 		bug = 1;
 	}
 	VM_BUG_ON_MM(bug, mm);
 }
 
-#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
-#define validate_mm_mt(root) do { } while (0)
+#else /* !CONFIG_DEBUG_VM */
 #define validate_mm(mm) do { } while (0)
-#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
+#endif /* CONFIG_DEBUG_VM */
 
 /*
  * vma has some anon_vma assigned, and is already inserted on that
@@ -2261,7 +2242,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	struct vm_area_struct *new;
 	int err;
 
-	validate_mm_mt(vma->vm_mm);
+	validate_mm(vma->vm_mm);
 
 	WARN_ON(vma->vm_start >= addr);
 	WARN_ON(vma->vm_end <= addr);
@@ -2319,7 +2300,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	/* Success. */
 	if (new_below)
 		vma_next(vmi);
-	validate_mm_mt(vma->vm_mm);
+	validate_mm(vma->vm_mm);
 	return 0;
 
 out_free_mpol:
@@ -2328,7 +2309,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	vma_iter_free(vmi);
 out_free_vma:
 	vm_area_free(new);
-	validate_mm_mt(vma->vm_mm);
+	validate_mm(vma->vm_mm);
 	return err;
 }
 
@@ -2963,7 +2944,7 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
 
 	arch_unmap(mm, start, end);
 	ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return ret;
 }
 
@@ -2985,7 +2966,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	struct mm_struct *mm = current->mm;
 	struct vma_prepare vp;
 
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	/*
 	 * Check against address space limits by the changed size
 	 * Note: This happens *after* clearing old mappings in some code paths.
@@ -3226,7 +3207,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 	bool faulted_in_anon_vma = true;
 	VMA_ITERATOR(vmi, mm, addr);
 
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	/*
 	 * If anonymous vma has not yet been faulted, update new pgoff
 	 * to match new location, to increase its chance of merging.
@@ -3285,7 +3266,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 			goto out_vma_link;
 		*need_rmap_locks = false;
 	}
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return new_vma;
 
 out_vma_link:
@@ -3301,7 +3282,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 out_free_vma:
 	vm_area_free(new_vma);
 out:
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return NULL;
 }
 
@@ -3438,7 +3419,7 @@ static struct vm_area_struct *__install_special_mapping(
 	int ret;
 	struct vm_area_struct *vma;
 
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	vma = vm_area_alloc(mm);
 	if (unlikely(vma == NULL))
 		return ERR_PTR(-ENOMEM);
@@ -3461,12 +3442,12 @@ static struct vm_area_struct *__install_special_mapping(
 
 	perf_event_mmap(vma);
 
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return vma;
 
 out:
 	vm_area_free(vma);
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return ERR_PTR(ret);
 }
 
-- 
2.39.2


  parent reply	other threads:[~2023-05-05 17:44 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-05 17:41 [PATCH v2 00/36] Maple tree mas_{next,prev}_range() and cleanup Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 01/36] maple_tree: Fix static analyser cppcheck issue Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 02/36] maple_tree: Clean up mas_parent_enum() and rename to mas_parent_type() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 03/36] maple_tree: Avoid unnecessary ascending Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 04/36] maple_tree: Clean up mas_dfs_postorder() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 05/36] maple_tree: Add format option to mt_dump() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 06/36] maple_tree: Add debug BUG_ON and WARN_ON variants Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 07/36] maple_tree: Convert BUG_ON() to MT_BUG_ON() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 08/36] maple_tree: Change RCU checks to WARN_ON() instead of BUG_ON() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 09/36] maple_tree: Convert debug code to use MT_WARN_ON() and MAS_WARN_ON() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 10/36] maple_tree: Use MAS_BUG_ON() when setting a leaf node as a parent Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 11/36] maple_tree: Use MAS_BUG_ON() in mas_set_height() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 12/36] maple_tree: Use MAS_BUG_ON() from mas_topiary_range() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 13/36] maple_tree: Use MAS_WR_BUG_ON() in mas_store_prealloc() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 14/36] maple_tree: Use MAS_BUG_ON() prior to calling mas_meta_gap() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 15/36] maple_tree: Return error on mte_pivots() out of range Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 16/36] maple_tree: Make test code work without debug enabled Liam R. Howlett
2023-05-05 17:41 ` Liam R. Howlett [this message]
2023-05-05 17:41 ` [PATCH v2 18/36] mm: Update vma_iter_store() to use MAS_WARN_ON() Liam R. Howlett
2023-05-06  2:42   ` Sergey Senozhatsky
2023-05-06  2:47   ` Sergey Senozhatsky
2023-05-05 17:41 ` [PATCH v2 19/36] maple_tree: Add __init and __exit to test module Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 20/36] maple_tree: Remove unnecessary check from mas_destroy() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 21/36] maple_tree: mas_start() reset depth on dead node Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 22/36] mm/mmap: Change do_vmi_align_munmap() for maple tree iterator changes Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 23/36] maple_tree: Try harder to keep active node after mas_next() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 24/36] maple_tree: Try harder to keep active node with mas_prev() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 25/36] maple_tree: Revise limit checks in mas_empty_area{_rev}() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 26/36] maple_tree: Fix testing mas_empty_area() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 27/36] maple_tree: Introduce mas_next_slot() interface Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 28/36] maple_tree: Add mas_next_range() and mas_find_range() interfaces Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 29/36] maple_tree: Relocate mas_rewalk() and mas_rewalk_if_dead() Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 30/36] maple_tree: Introduce mas_prev_slot() interface Liam R. Howlett
2023-05-05 17:41 ` [PATCH v2 31/36] maple_tree: Add mas_prev_range() and mas_find_range_rev interface Liam R. Howlett
2023-05-08 13:26   ` Vernon Yang
2023-05-08 16:11     ` Liam R. Howlett
2023-05-05 17:42 ` [PATCH v2 32/36] maple_tree: Clear up index and last setting in single entry tree Liam R. Howlett
2023-05-09 12:39   ` Peng Zhang
2023-05-12 15:54     ` Liam R. Howlett
2023-05-12 17:29       ` Liam R. Howlett
2023-05-05 17:42 ` [PATCH v2 33/36] maple_tree: Update testing code for mas_{next,prev,walk} Liam R. Howlett
2023-05-05 17:42 ` [PATCH v2 34/36] mm: Add vma_iter_{next,prev}_range() to vma iterator Liam R. Howlett
2023-05-05 17:42 ` [PATCH v2 35/36] mm: Avoid rewalk in mmap_region Liam R. Howlett
2023-05-05 17:42 ` [PATCH v2 36/36] maple_tree: Add gap to check_alloc_rev_range() testcase Liam R. Howlett
2023-05-05 19:29   ` Liam R. Howlett
2023-05-05 19:57     ` Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230505174204.2665599-18-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).