From: Chen Ridong <chenridong@huaweicloud.com>
To: hannes@cmpxchg.org, mhocko@kernel.org, roman.gushchin@linux.dev,
shakeel.butt@linux.dev, muchun.song@linux.dev,
akpm@linux-foundation.org, axelrasmussen@google.com,
yuanchu@google.com, weixugc@google.com, david@kernel.org,
zhengqi.arch@bytedance.com, lorenzo.stoakes@oracle.com
Cc: cgroups@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, lujialin4@huawei.com,
chenridong@huaweicloud.com
Subject: [PATCH -next v2 2/2] memcg: remove mem_cgroup_size()
Date: Wed, 10 Dec 2025 07:11:42 +0000 [thread overview]
Message-ID: <20251210071142.2043478-3-chenridong@huaweicloud.com> (raw)
In-Reply-To: <20251210071142.2043478-1-chenridong@huaweicloud.com>
From: Chen Ridong <chenridong@huawei.com>
The mem_cgroup_size helper is used only in apply_proportional_protection
to read the current memory usage. Its semantics are unclear and
inconsistent with other sites, which directly call page_counter_read for
the same purpose.
Remove this helper and replace its usage with page_counter_read for
clarity. Additionally, rename the local variable 'cgroup_size' to 'usage'
to better reflect its meaning.
This change is safe because page_counter_read() is only called when memcg
is enabled in the apply_proportional_protection.
No functional changes intended.
Signed-off-by: Chen Ridong <chenridong@huawei.com>
---
include/linux/memcontrol.h | 7 -------
mm/memcontrol.c | 5 -----
mm/vmscan.c | 8 +++++---
3 files changed, 5 insertions(+), 15 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6a48398a1f4e..bedeb606c691 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -919,8 +919,6 @@ static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
-
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -1328,11 +1326,6 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
return 0;
}
-static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
-{
- return 0;
-}
-
static inline void
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index dbe7d8f93072..659ce171b1b3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1621,11 +1621,6 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
return max;
}
-unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
-{
- return page_counter_read(&memcg->memory);
-}
-
void __memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event, bool allow_spinning)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 670fe9fae5ba..fe48d0376e7c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2451,6 +2451,7 @@ static inline void calculate_pressure_balance(struct scan_control *sc,
static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
struct scan_control *sc, unsigned long scan)
{
+#ifdef CONFIG_MEMCG
unsigned long min, low;
mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low);
@@ -2485,7 +2486,7 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
* again by how much of the total memory used is under
* hard protection.
*/
- unsigned long cgroup_size = mem_cgroup_size(memcg);
+ unsigned long usage = page_counter_read(&memcg->memory);
unsigned long protection;
/* memory.low scaling, make sure we retry before OOM */
@@ -2497,9 +2498,9 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
}
/* Avoid TOCTOU with earlier protection check */
- cgroup_size = max(cgroup_size, protection);
+ usage = max(usage, protection);
- scan -= scan * protection / (cgroup_size + 1);
+ scan -= scan * protection / (usage + 1);
/*
* Minimally target SWAP_CLUSTER_MAX pages to keep
@@ -2508,6 +2509,7 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
*/
scan = max(scan, SWAP_CLUSTER_MAX);
}
+#endif
return scan;
}
--
2.34.1
next prev parent reply other threads:[~2025-12-10 7:26 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-10 7:11 [PATCH -next v2 0/2] memcg cleanups Chen Ridong
2025-12-10 7:11 ` [PATCH -next v2 1/2] memcg: move mem_cgroup_usage memcontrol-v1.c Chen Ridong
2025-12-10 8:01 ` Michal Hocko
2025-12-10 16:28 ` Johannes Weiner
2025-12-10 7:11 ` Chen Ridong [this message]
2025-12-10 8:05 ` [PATCH -next v2 2/2] memcg: remove mem_cgroup_size() Michal Hocko
2025-12-10 8:31 ` Chen Ridong
2025-12-10 8:37 ` Michal Hocko
2025-12-10 8:42 ` Chen Ridong
2025-12-10 16:36 ` Johannes Weiner
2025-12-11 0:43 ` Chen Ridong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251210071142.2043478-3-chenridong@huaweicloud.com \
--to=chenridong@huaweicloud.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=cgroups@vger.kernel.org \
--cc=david@kernel.org \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=lujialin4@huawei.com \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=roman.gushchin@linux.dev \
--cc=shakeel.butt@linux.dev \
--cc=weixugc@google.com \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).