* [PATCH] mm: shmem: extend shmem_unused_huge_shrink() to all sizes
@ 2024-08-25 23:25 Hugh Dickins
2024-08-26 6:53 ` David Hildenbrand
0 siblings, 1 reply; 2+ messages in thread
From: Hugh Dickins @ 2024-08-25 23:25 UTC (permalink / raw)
To: Andrew Morton
Cc: Baolin Wang, Kirill A. Shutemov, Hugh Dickins, linux-kernel,
linux-mm
Although shmem_get_folio_gfp() is correctly putting inodes on the
shrinklist according to the folio size, shmem_unused_huge_shrink()
was still dealing with that shrinklist in terms of HPAGE_PMD_SIZE.
Generalize that; and to handle the mixture of sizes more sensibly,
shmem_alloc_and_add_folio() give it a number of pages to be freed
(approximate: no need to minimize that with an exact calculation)
instead of a number of inodes to split.
Signed-off-by: Hugh Dickins <hughd@google.com>
---
This patch would most naturally go into mm-unstable as 10/9 over
Baolin's "support large folio swap-out and swap-in for shmem" series.
mm/shmem.c | 45 ++++++++++++++++++++-------------------------
1 file changed, 20 insertions(+), 25 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 4dd0570962fa..4c9921c234b7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -636,15 +636,14 @@ static const char *shmem_format_huge(int huge)
#endif
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
- struct shrink_control *sc, unsigned long nr_to_split)
+ struct shrink_control *sc, unsigned long nr_to_free)
{
LIST_HEAD(list), *pos, *next;
- LIST_HEAD(to_remove);
struct inode *inode;
struct shmem_inode_info *info;
struct folio *folio;
unsigned long batch = sc ? sc->nr_to_scan : 128;
- int split = 0;
+ unsigned long split = 0, freed = 0;
if (list_empty(&sbinfo->shrinklist))
return SHRINK_STOP;
@@ -662,13 +661,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
goto next;
}
- /* Check if there's anything to gain */
- if (round_up(inode->i_size, PAGE_SIZE) ==
- round_up(inode->i_size, HPAGE_PMD_SIZE)) {
- list_move(&info->shrinklist, &to_remove);
- goto next;
- }
-
list_move(&info->shrinklist, &list);
next:
sbinfo->shrinklist_len--;
@@ -677,34 +669,36 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
}
spin_unlock(&sbinfo->shrinklist_lock);
- list_for_each_safe(pos, next, &to_remove) {
- info = list_entry(pos, struct shmem_inode_info, shrinklist);
- inode = &info->vfs_inode;
- list_del_init(&info->shrinklist);
- iput(inode);
- }
-
list_for_each_safe(pos, next, &list) {
+ pgoff_t next, end;
+ loff_t i_size;
int ret;
- pgoff_t index;
info = list_entry(pos, struct shmem_inode_info, shrinklist);
inode = &info->vfs_inode;
- if (nr_to_split && split >= nr_to_split)
+ if (nr_to_free && freed >= nr_to_free)
goto move_back;
- index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
- folio = filemap_get_folio(inode->i_mapping, index);
- if (IS_ERR(folio))
+ i_size = i_size_read(inode);
+ folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
+ if (!folio || xa_is_value(folio))
goto drop;
- /* No huge page at the end of the file: nothing to split */
+ /* No large page at the end of the file: nothing to split */
if (!folio_test_large(folio)) {
folio_put(folio);
goto drop;
}
+ /* Check if there is anything to gain from splitting */
+ next = folio_next_index(folio);
+ end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
+ if (end <= folio->index || end >= next) {
+ folio_put(folio);
+ goto drop;
+ }
+
/*
* Move the inode on the list back to shrinklist if we failed
* to lock the page at this time.
@@ -725,6 +719,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
if (ret)
goto move_back;
+ freed += next - end;
split++;
drop:
list_del_init(&info->shrinklist);
@@ -769,7 +764,7 @@ static long shmem_unused_huge_count(struct super_block *sb,
#define shmem_huge SHMEM_HUGE_DENY
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
- struct shrink_control *sc, unsigned long nr_to_split)
+ struct shrink_control *sc, unsigned long nr_to_free)
{
return 0;
}
@@ -1852,7 +1847,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
* Try to reclaim some space by splitting a few
* large folios beyond i_size on the filesystem.
*/
- shmem_unused_huge_shrink(sbinfo, NULL, 2);
+ shmem_unused_huge_shrink(sbinfo, NULL, pages);
/*
* And do a shmem_recalc_inode() to account for freed pages:
* except our folio is there in cache, so not quite balanced.
--
2.35.3
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] mm: shmem: extend shmem_unused_huge_shrink() to all sizes
2024-08-25 23:25 [PATCH] mm: shmem: extend shmem_unused_huge_shrink() to all sizes Hugh Dickins
@ 2024-08-26 6:53 ` David Hildenbrand
0 siblings, 0 replies; 2+ messages in thread
From: David Hildenbrand @ 2024-08-26 6:53 UTC (permalink / raw)
To: Hugh Dickins, Andrew Morton
Cc: Baolin Wang, Kirill A. Shutemov, linux-kernel, linux-mm
On 26.08.24 01:25, Hugh Dickins wrote:
> Although shmem_get_folio_gfp() is correctly putting inodes on the
> shrinklist according to the folio size, shmem_unused_huge_shrink()
> was still dealing with that shrinklist in terms of HPAGE_PMD_SIZE.
>
> Generalize that; and to handle the mixture of sizes more sensibly,
> shmem_alloc_and_add_folio() give it a number of pages to be freed
> (approximate: no need to minimize that with an exact calculation)
> instead of a number of inodes to split.
That might be worth a comment in the code.
>
> Signed-off-by: Hugh Dickins <hughd@google.com>
> ---
> This patch would most naturally go into mm-unstable as 10/9 over
> Baolin's "support large folio swap-out and swap-in for shmem" series.
>
> mm/shmem.c | 45 ++++++++++++++++++++-------------------------
> 1 file changed, 20 insertions(+), 25 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 4dd0570962fa..4c9921c234b7 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -636,15 +636,14 @@ static const char *shmem_format_huge(int huge)
> #endif
>
> static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
> - struct shrink_control *sc, unsigned long nr_to_split)
> + struct shrink_control *sc, unsigned long nr_to_free)
> {
> LIST_HEAD(list), *pos, *next;
> - LIST_HEAD(to_remove);
> struct inode *inode;
> struct shmem_inode_info *info;
> struct folio *folio;
> unsigned long batch = sc ? sc->nr_to_scan : 128;
> - int split = 0;
> + unsigned long split = 0, freed = 0;
>
> if (list_empty(&sbinfo->shrinklist))
> return SHRINK_STOP;
> @@ -662,13 +661,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
> goto next;
> }
>
> - /* Check if there's anything to gain */
> - if (round_up(inode->i_size, PAGE_SIZE) ==
> - round_up(inode->i_size, HPAGE_PMD_SIZE)) {
> - list_move(&info->shrinklist, &to_remove);
> - goto next;
> - }
> -
> list_move(&info->shrinklist, &list);
> next:
> sbinfo->shrinklist_len--;
> @@ -677,34 +669,36 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
> }
> spin_unlock(&sbinfo->shrinklist_lock);
>
> - list_for_each_safe(pos, next, &to_remove) {
> - info = list_entry(pos, struct shmem_inode_info, shrinklist);
> - inode = &info->vfs_inode;
> - list_del_init(&info->shrinklist);
> - iput(inode);
> - }
> -
> list_for_each_safe(pos, next, &list) {
> + pgoff_t next, end;
> + loff_t i_size;
> int ret;
> - pgoff_t index;
>
> info = list_entry(pos, struct shmem_inode_info, shrinklist);
> inode = &info->vfs_inode;
>
> - if (nr_to_split && split >= nr_to_split)
> + if (nr_to_free && freed >= nr_to_free)
> goto move_back;
>
> - index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
> - folio = filemap_get_folio(inode->i_mapping, index);
> - if (IS_ERR(folio))
> + i_size = i_size_read(inode);
> + folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
> + if (!folio || xa_is_value(folio))
> goto drop;
>
> - /* No huge page at the end of the file: nothing to split */
> + /* No large page at the end of the file: nothing to split */
s/large page/large folio/
Or simply "Nothing to split."
> if (!folio_test_large(folio)) {
> folio_put(folio);
> goto drop;
> }
>
> + /* Check if there is anything to gain from splitting */
> + next = folio_next_index(folio);
> + end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
> + if (end <= folio->index || end >= next) {
> + folio_put(folio);
> + goto drop;
> + }
> +
Looks sensible to me
Reviewed-by: David Hildenbrand <david@redhat.com>
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2024-08-26 6:53 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-08-25 23:25 [PATCH] mm: shmem: extend shmem_unused_huge_shrink() to all sizes Hugh Dickins
2024-08-26 6:53 ` David Hildenbrand
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).