From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
To: linux-mm@kvack.org, akpm@linux-foundation.org
Cc: Dave Hansen <dave.hansen@linux.intel.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Matthew Wilcox <willy@infradead.org>,
Mel Gorman <mgorman@suse.de>, Yu Zhao <yuzhao@google.com>,
Wei Xu <weixugc@google.com>, Guru Anbalagane <gurua@google.com>,
"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
Subject: [RFC PATCH v1 1/7] mm: Move some code around so that next patch is simpler
Date: Sun, 2 Apr 2023 16:12:34 +0530 [thread overview]
Message-ID: <20230402104240.1734931-2-aneesh.kumar@linux.ibm.com> (raw)
In-Reply-To: <20230402104240.1734931-1-aneesh.kumar@linux.ibm.com>
Move lrur_gen_add_folio to .c. We will support arch specific mapping
of page access count to generation in a later patch and will use
that when adding folio to lruvec. This move enables that.
No functional change in this patch.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
include/linux/mm_inline.h | 47 +-------------
mm/vmscan.c | 127 ++++++++++++++++++++++++++------------
2 files changed, 88 insertions(+), 86 deletions(-)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index ff3f3f23f649..4dc2ab95d612 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -217,52 +217,7 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli
VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
}
-static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
-{
- unsigned long seq;
- unsigned long flags;
- int gen = folio_lru_gen(folio);
- int type = folio_is_file_lru(folio);
- int zone = folio_zonenum(folio);
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-
- VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
-
- if (folio_test_unevictable(folio) || !lrugen->enabled)
- return false;
- /*
- * There are three common cases for this page:
- * 1. If it's hot, e.g., freshly faulted in or previously hot and
- * migrated, add it to the youngest generation.
- * 2. If it's cold but can't be evicted immediately, i.e., an anon page
- * not in swapcache or a dirty page pending writeback, add it to the
- * second oldest generation.
- * 3. Everything else (clean, cold) is added to the oldest generation.
- */
- if (folio_test_active(folio))
- seq = lrugen->max_seq;
- else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
- (folio_test_reclaim(folio) &&
- (folio_test_dirty(folio) || folio_test_writeback(folio))))
- seq = lrugen->min_seq[type] + 1;
- else
- seq = lrugen->min_seq[type];
-
- gen = lru_gen_from_seq(seq);
- flags = (gen + 1UL) << LRU_GEN_PGOFF;
- /* see the comment on MIN_NR_GENS about PG_active */
- set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
-
- lru_gen_update_size(lruvec, folio, -1, gen);
- /* for folio_rotate_reclaimable() */
- if (reclaiming)
- list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
- else
- list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
-
- return true;
-}
-
+bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming);
static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
{
unsigned long flags;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5b7b8d4f5297..f47d80ae77ef 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3737,6 +3737,47 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai
return new_gen;
}
+static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
+
+ if (!pte_present(pte) || is_zero_pfn(pfn))
+ return -1;
+
+ if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
+ return -1;
+
+ if (WARN_ON_ONCE(!pfn_valid(pfn)))
+ return -1;
+
+ return pfn;
+}
+
+static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
+ struct pglist_data *pgdat, bool can_swap)
+{
+ struct folio *folio;
+
+ /* try to avoid unnecessary memory loads */
+ if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
+ return NULL;
+
+ folio = pfn_folio(pfn);
+ if (folio_nid(folio) != pgdat->node_id)
+ return NULL;
+
+ if (folio_memcg_rcu(folio) != memcg)
+ return NULL;
+
+ /* file VMAs can contain anon pages from COW */
+ if (!folio_is_file_lru(folio) && !can_swap)
+ return NULL;
+
+ return folio;
+}
+
static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
int old_gen, int new_gen)
{
@@ -3843,23 +3884,6 @@ static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk
return false;
}
-static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
-{
- unsigned long pfn = pte_pfn(pte);
-
- VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
-
- if (!pte_present(pte) || is_zero_pfn(pfn))
- return -1;
-
- if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
- return -1;
-
- if (WARN_ON_ONCE(!pfn_valid(pfn)))
- return -1;
-
- return pfn;
-}
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr)
@@ -3881,29 +3905,6 @@ static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned
}
#endif
-static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
- struct pglist_data *pgdat, bool can_swap)
-{
- struct folio *folio;
-
- /* try to avoid unnecessary memory loads */
- if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
- return NULL;
-
- folio = pfn_folio(pfn);
- if (folio_nid(folio) != pgdat->node_id)
- return NULL;
-
- if (folio_memcg_rcu(folio) != memcg)
- return NULL;
-
- /* file VMAs can contain anon pages from COW */
- if (!folio_is_file_lru(folio) && !can_swap)
- return NULL;
-
- return folio;
-}
-
static bool suitable_to_scan(int total, int young)
{
int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
@@ -5252,6 +5253,52 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
blk_finish_plug(&plug);
}
+bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long seq;
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
+
+ if (folio_test_unevictable(folio) || !lrugen->enabled)
+ return false;
+ /*
+ * There are three common cases for this page:
+ * 1. If it's hot, e.g., freshly faulted in or previously hot and
+ * migrated, add it to the youngest generation.
+ * 2. If it's cold but can't be evicted immediately, i.e., an anon page
+ * not in swapcache or a dirty page pending writeback, add it to the
+ * second oldest generation.
+ * 3. Everything else (clean, cold) is added to the oldest generation.
+ */
+ if (folio_test_active(folio))
+ seq = lrugen->max_seq;
+ else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio))))
+ seq = lrugen->min_seq[type] + 1;
+ else
+ seq = lrugen->min_seq[type];
+
+ gen = lru_gen_from_seq(seq);
+ flags = (gen + 1UL) << LRU_GEN_PGOFF;
+ /* see the comment on MIN_NR_GENS about PG_active */
+ set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
+
+ lru_gen_update_size(lruvec, folio, -1, gen);
+ /* for folio_rotate_reclaimable() */
+ if (reclaiming)
+ list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+ else
+ list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
+
+ return true;
+}
+
/******************************************************************************
* state change
******************************************************************************/
--
2.39.2
next prev parent reply other threads:[~2023-04-02 10:43 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-02 10:42 [RFC PATCH v1 0/7] Support arch-specific page aging mechanism Aneesh Kumar K.V
2023-04-02 10:42 ` Aneesh Kumar K.V [this message]
2023-04-02 10:42 ` [RFC PATCH v1 2/7] mm: Don't build multi-gen LRU page table walk code on architecture not supported Aneesh Kumar K.V
2023-04-02 10:42 ` [RFC PATCH v1 3/7] mm: multi-gen LRU: avoid using generation stored in page flags for generation Aneesh Kumar K.V
2023-04-02 10:42 ` [RFC PATCH v1 4/7] mm: multi-gen LRU: support different page aging mechanism Aneesh Kumar K.V
2023-04-02 10:42 ` [RFC PATCH v1 5/7] powerpc/mm: Add page access count support Aneesh Kumar K.V
2023-04-02 10:42 ` [RFC PATCH v1 6/7] powerpc/mm: Clear page access count on allocation Aneesh Kumar K.V
2023-04-02 10:42 ` [RFC PATCH v1 7/7] mm: multi-gen LRU: Shrink folio list without checking for page table reference Aneesh Kumar K.V
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230402104240.1734931-2-aneesh.kumar@linux.ibm.com \
--to=aneesh.kumar@linux.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=dave.hansen@linux.intel.com \
--cc=gurua@google.com \
--cc=hannes@cmpxchg.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=weixugc@google.com \
--cc=willy@infradead.org \
--cc=yuzhao@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).