linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] smaps bugfixes, new fields for locked memory
@ 2018-02-22  5:26 Daniel Colascione
  2018-02-22  5:26 ` [PATCH 1/2] Bug fixes for smaps_rollup Daniel Colascione
  2018-02-22  5:26 ` [PATCH 2/2] Add LockedRss/LockedPrivate to smaps and smaps_rollup Daniel Colascione
  0 siblings, 2 replies; 4+ messages in thread
From: Daniel Colascione @ 2018-02-22  5:26 UTC (permalink / raw)
  To: linux-mm; +Cc: Daniel Colascione

This small patch series fixes a few bugs in smaps_rollup, then adds
two new status fields that provide information about locked
memory sizes.

Daniel Colascione (2):
  Bug fixes for smaps_rollup
  Add LockedRss/LockedPrivate to smaps and smaps_rollup

 Documentation/filesystems/proc.txt |   7 +-
 fs/proc/task_mmu.c                 | 118 +++++++++++++++++++----------
 2 files changed, 83 insertions(+), 42 deletions(-)

-- 
2.16.1.291.g4437f3f132-goog

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] Bug fixes for smaps_rollup
  2018-02-22  5:26 [PATCH 0/2] smaps bugfixes, new fields for locked memory Daniel Colascione
@ 2018-02-22  5:26 ` Daniel Colascione
  2018-02-22 22:31   ` Andrew Morton
  2018-02-22  5:26 ` [PATCH 2/2] Add LockedRss/LockedPrivate to smaps and smaps_rollup Daniel Colascione
  1 sibling, 1 reply; 4+ messages in thread
From: Daniel Colascione @ 2018-02-22  5:26 UTC (permalink / raw)
  To: linux-mm; +Cc: Daniel Colascione

Properly account and display pss_locked; behave properly when seq_file
starts and stops multiple times on a single open file description,
when when it issues multiple show calls, and when seq_file seeks to a
non-zero position.

Signed-off-by: Daniel Colascione <dancol@google.com>
---
 fs/proc/task_mmu.c | 102 +++++++++++++++++++++++++++------------------
 1 file changed, 62 insertions(+), 40 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ec6d2983a5cb..5e95f7eaf145 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -188,8 +188,14 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
 
 	m->version = 0;
 	if (pos < mm->map_count) {
+		bool rollup_mode = !!priv->rollup;
 		for (vma = mm->mmap; pos; pos--) {
 			m->version = vma->vm_start;
+			if (rollup_mode) {
+				 /* Accumulate into rollup structure */
+				int show_result = m->op->show(m, vma);
+				VM_BUG_ON(!show_result);
+			}
 			vma = vma->vm_next;
 		}
 		return vma;
@@ -438,7 +444,7 @@ const struct file_operations proc_tid_maps_operations = {
 
 #ifdef CONFIG_PROC_PAGE_MONITOR
 struct mem_size_stats {
-	bool first;
+	struct vm_area_struct *previous_vma;
 	unsigned long resident;
 	unsigned long shared_clean;
 	unsigned long shared_dirty;
@@ -459,11 +465,13 @@ struct mem_size_stats {
 	bool check_shmem_swap;
 };
 
-static void smaps_account(struct mem_size_stats *mss, struct page *page,
+static void smaps_account(struct mem_size_stats *mss,
+		struct vm_area_struct *vma, struct page *page,
 		bool compound, bool young, bool dirty)
 {
 	int i, nr = compound ? 1 << compound_order(page) : 1;
 	unsigned long size = nr * PAGE_SIZE;
+	u64 pss_add = 0;
 
 	if (PageAnon(page)) {
 		mss->anonymous += size;
@@ -486,8 +494,8 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
 			mss->private_dirty += size;
 		else
 			mss->private_clean += size;
-		mss->pss += (u64)size << PSS_SHIFT;
-		return;
+		pss_add += (u64)size << PSS_SHIFT;
+		goto done;
 	}
 
 	for (i = 0; i < nr; i++, page++) {
@@ -498,15 +506,20 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
 				mss->shared_dirty += PAGE_SIZE;
 			else
 				mss->shared_clean += PAGE_SIZE;
-			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
+			pss_add += (PAGE_SIZE << PSS_SHIFT) / mapcount;
 		} else {
 			if (dirty || PageDirty(page))
 				mss->private_dirty += PAGE_SIZE;
 			else
 				mss->private_clean += PAGE_SIZE;
-			mss->pss += PAGE_SIZE << PSS_SHIFT;
+			pss_add += PAGE_SIZE << PSS_SHIFT;
 		}
 	}
+
+done:
+	mss->pss += pss_add;
+	if (vma->vm_flags & VM_LOCKED)
+		mss->pss_locked += pss_add;
 }
 
 #ifdef CONFIG_SHMEM
@@ -569,7 +582,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 	if (!page)
 		return;
 
-	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
+	smaps_account(mss, vma, page, false, pte_young(*pte), pte_dirty(*pte));
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -592,7 +605,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 		/* pass */;
 	else
 		VM_BUG_ON_PAGE(1, page);
-	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
+	smaps_account(mss, vma, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
 }
 #else
 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -736,6 +749,37 @@ void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
 {
 }
 
+static void show_smap_accumulate(struct mm_walk *smaps_walk,
+		struct vm_area_struct *vma, struct mem_size_stats *mss)
+{
+#ifdef CONFIG_SHMEM
+	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
+		/*
+		 * For shared or readonly shmem mappings we know that all
+		 * swapped out pages belong to the shmem object, and we can
+		 * obtain the swap value much more efficiently. For private
+		 * writable mappings, we might have COW pages that are
+		 * not affected by the parent swapped out pages of the shmem
+		 * object, so we have to distinguish them during the page walk.
+		 * Unless we know that the shmem object (or the part mapped by
+		 * our VMA) has no swapped out pages at all.
+		 */
+		unsigned long shmem_swapped = shmem_swap_usage(vma);
+
+		if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
+					!(vma->vm_flags & VM_WRITE)) {
+			mss->swap += shmem_swapped;
+		} else {
+			mss->check_shmem_swap = true;
+			smaps_walk->pte_hole = smaps_pte_hole;
+		}
+	}
+#endif
+
+	/* mmap_sem is held in m_start */
+	walk_page_vma(vma, smaps_walk);
+}
+
 static int show_smap(struct seq_file *m, void *v, int is_pid)
 {
 	struct proc_maps_private *priv = m->private;
@@ -756,9 +800,9 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
 	if (priv->rollup) {
 		rollup_mode = true;
 		mss = priv->rollup;
-		if (mss->first) {
+		if (vma == priv->mm->mmap) { /* First */
+			memset(mss, 0, sizeof (*mss));
 			mss->first_vma_start = vma->vm_start;
-			mss->first = false;
 		}
 		last_vma = !m_next_vma(priv, vma);
 	} else {
@@ -769,34 +813,13 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
 
 	smaps_walk.private = mss;
 
-#ifdef CONFIG_SHMEM
-	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
-		/*
-		 * For shared or readonly shmem mappings we know that all
-		 * swapped out pages belong to the shmem object, and we can
-		 * obtain the swap value much more efficiently. For private
-		 * writable mappings, we might have COW pages that are
-		 * not affected by the parent swapped out pages of the shmem
-		 * object, so we have to distinguish them during the page walk.
-		 * Unless we know that the shmem object (or the part mapped by
-		 * our VMA) has no swapped out pages at all.
-		 */
-		unsigned long shmem_swapped = shmem_swap_usage(vma);
-
-		if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
-					!(vma->vm_flags & VM_WRITE)) {
-			mss->swap = shmem_swapped;
-		} else {
-			mss->check_shmem_swap = true;
-			smaps_walk.pte_hole = smaps_pte_hole;
-		}
+	/* seq_file is allowed to ask us to show many times for the
+	 * same iterator value, and we don't want to accumulate each
+	 * VMA more than once. */
+	if (mss->previous_vma != vma) {
+		mss->previous_vma = vma;
+		show_smap_accumulate(&smaps_walk, vma, mss);
 	}
-#endif
-
-	/* mmap_sem is held in m_start */
-	walk_page_vma(vma, &smaps_walk);
-	if (vma->vm_flags & VM_LOCKED)
-		mss->pss_locked += mss->pss;
 
 	if (!rollup_mode) {
 		show_map_vma(m, vma, is_pid);
@@ -852,7 +875,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
 			   mss->private_hugetlb >> 10,
 			   mss->swap >> 10,
 			   (unsigned long)(mss->swap_pss >> (10 + PSS_SHIFT)),
-			   (unsigned long)(mss->pss >> (10 + PSS_SHIFT)));
+			   (unsigned long)(mss->pss_locked >> (10 + PSS_SHIFT)));
 
 	if (!rollup_mode) {
 		arch_show_smap(m, vma);
@@ -901,12 +924,11 @@ static int pid_smaps_rollup_open(struct inode *inode, struct file *file)
 		return ret;
 	seq = file->private_data;
 	priv = seq->private;
-	priv->rollup = kzalloc(sizeof(*priv->rollup), GFP_KERNEL);
+	priv->rollup = kmalloc(sizeof(*priv->rollup), GFP_KERNEL);
 	if (!priv->rollup) {
 		proc_map_release(inode, file);
 		return -ENOMEM;
 	}
-	priv->rollup->first = true;
 	return 0;
 }
 
-- 
2.16.1.291.g4437f3f132-goog

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] Add LockedRss/LockedPrivate to smaps and smaps_rollup
  2018-02-22  5:26 [PATCH 0/2] smaps bugfixes, new fields for locked memory Daniel Colascione
  2018-02-22  5:26 ` [PATCH 1/2] Bug fixes for smaps_rollup Daniel Colascione
@ 2018-02-22  5:26 ` Daniel Colascione
  1 sibling, 0 replies; 4+ messages in thread
From: Daniel Colascione @ 2018-02-22  5:26 UTC (permalink / raw)
  To: linux-mm; +Cc: Daniel Colascione

These additional fields in smaps make it easy to analyze a processes's
contribution to locked memory without having to manually filter and
sum entries from smaps. VmLck from /proc/pid/status isn't quite right,
because it reflects the number of potentially locked pages in lockable
VMAs, not the number of pages actually pinned.

Signed-off-by: Daniel Colascione <dancol@google.com>
---
 Documentation/filesystems/proc.txt |  7 ++++++-
 fs/proc/task_mmu.c                 | 20 +++++++++++++++++---
 2 files changed, 23 insertions(+), 4 deletions(-)

diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 2a84bb334894..e87350400cd9 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -425,6 +425,7 @@ SwapPss:               0 kB
 KernelPageSize:        4 kB
 MMUPageSize:           4 kB
 Locked:                0 kB
+LockedRss:             0 kB
 VmFlags: rd ex mr mw me dw
 
 the first of these lines shows the same information as is displayed for the
@@ -461,7 +462,11 @@ For shmem mappings, "Swap" includes also the size of the mapped (and not
 replaced by copy-on-write) part of the underlying shmem object out on swap.
 "SwapPss" shows proportional swap share of this mapping. Unlike "Swap", this
 does not take into account swapped out page of underlying shmem objects.
-"Locked" indicates whether the mapping is locked in memory or not.
+"Locked" contains the PSS for locked mappings; "LockedRss" contains the
+amount resident and locked memory in the given mapping. That is, "Locked"
+depends on other processes also potentially mapping the given memory, while
+"LockedRss" is invariant. "LockedPrivate" is like "LockedRss", but counts only
+the pages unique to the process.
 
 "VmFlags" field deserves a separate description. This member represents the kernel
 flags associated with the particular virtual memory area in two letter encoded
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 5e95f7eaf145..598a7f855ad1 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -459,6 +459,8 @@ struct mem_size_stats {
 	unsigned long shared_hugetlb;
 	unsigned long private_hugetlb;
 	unsigned long first_vma_start;
+	unsigned long resident_locked;
+	unsigned long private_locked;
 	u64 pss;
 	u64 pss_locked;
 	u64 swap_pss;
@@ -472,6 +474,7 @@ static void smaps_account(struct mem_size_stats *mss,
 	int i, nr = compound ? 1 << compound_order(page) : 1;
 	unsigned long size = nr * PAGE_SIZE;
 	u64 pss_add = 0;
+	bool locked = vma->vm_flags & VM_LOCKED;
 
 	if (PageAnon(page)) {
 		mss->anonymous += size;
@@ -480,6 +483,9 @@ static void smaps_account(struct mem_size_stats *mss,
 	}
 
 	mss->resident += size;
+	if (locked)
+		mss->resident_locked += size;
+
 	/* Accumulate the size in pages that have been accessed. */
 	if (young || page_is_young(page) || PageReferenced(page))
 		mss->referenced += size;
@@ -495,6 +501,8 @@ static void smaps_account(struct mem_size_stats *mss,
 		else
 			mss->private_clean += size;
 		pss_add += (u64)size << PSS_SHIFT;
+		if (locked)
+			mss->private_locked += size;
 		goto done;
 	}
 
@@ -513,12 +521,14 @@ static void smaps_account(struct mem_size_stats *mss,
 			else
 				mss->private_clean += PAGE_SIZE;
 			pss_add += PAGE_SIZE << PSS_SHIFT;
+			if (locked)
+				mss->private_locked += size;
 		}
 	}
 
 done:
 	mss->pss += pss_add;
-	if (vma->vm_flags & VM_LOCKED)
+	if (locked)
 		mss->pss_locked += pss_add;
 }
 
@@ -859,7 +869,9 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
 			   "Private_Hugetlb: %7lu kB\n"
 			   "Swap:           %8lu kB\n"
 			   "SwapPss:        %8lu kB\n"
-			   "Locked:         %8lu kB\n",
+			   "Locked:         %8lu kB\n"
+			   "LockedRss:      %8lu kB\n"
+			   "LockedPrivate:  %8lu kB\n",
 			   mss->resident >> 10,
 			   (unsigned long)(mss->pss >> (10 + PSS_SHIFT)),
 			   mss->shared_clean  >> 10,
@@ -875,7 +887,9 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
 			   mss->private_hugetlb >> 10,
 			   mss->swap >> 10,
 			   (unsigned long)(mss->swap_pss >> (10 + PSS_SHIFT)),
-			   (unsigned long)(mss->pss_locked >> (10 + PSS_SHIFT)));
+			   (unsigned long)(mss->pss_locked >> (10 + PSS_SHIFT)),
+			   mss->resident_locked >> 10,
+			   mss->private_locked >> 10);
 
 	if (!rollup_mode) {
 		arch_show_smap(m, vma);
-- 
2.16.1.291.g4437f3f132-goog

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] Bug fixes for smaps_rollup
  2018-02-22  5:26 ` [PATCH 1/2] Bug fixes for smaps_rollup Daniel Colascione
@ 2018-02-22 22:31   ` Andrew Morton
  0 siblings, 0 replies; 4+ messages in thread
From: Andrew Morton @ 2018-02-22 22:31 UTC (permalink / raw)
  To: Daniel Colascione; +Cc: linux-mm

On Wed, 21 Feb 2018 21:26:58 -0800 Daniel Colascione <dancol@google.com> wrote:

> Properly account and display pss_locked; behave properly when seq_file
> starts and stops multiple times on a single open file description,
> when when it issues multiple show calls, and when seq_file seeks to a
> non-zero position.

For each of these bugs can we please see a detailed description of the
misbehavior?  A good way of presenting that info is to show the
example commands, the resulting output and an explanation of why it was
wrong.  "behave properly" doesn't cut it ;)

There might be requests for one-fix-per-patch, too.  And it is the best
way, although I overlook that at times.

Please also attempt to cc the relevant developers.  `git blame' is a
good way of finding them.  Kirill, Oleg, adobriyan...

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2018-02-22 22:31 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-02-22  5:26 [PATCH 0/2] smaps bugfixes, new fields for locked memory Daniel Colascione
2018-02-22  5:26 ` [PATCH 1/2] Bug fixes for smaps_rollup Daniel Colascione
2018-02-22 22:31   ` Andrew Morton
2018-02-22  5:26 ` [PATCH 2/2] Add LockedRss/LockedPrivate to smaps and smaps_rollup Daniel Colascione

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).