From mboxrd@z Thu Jan 1 00:00:00 1970 From: Muchun Song Date: Wed, 26 Jul 2023 15:10:21 +0800 Subject: [Cluster-devel] [PATCH v2 19/47] mm: thp: dynamically allocate the thp-related shrinkers In-Reply-To: <20230724094354.90817-20-zhengqi.arch@bytedance.com> References: <20230724094354.90817-1-zhengqi.arch@bytedance.com> <20230724094354.90817-20-zhengqi.arch@bytedance.com> Message-ID: List-Id: To: cluster-devel.redhat.com MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit On 2023/7/24 17:43, Qi Zheng wrote: > Use new APIs to dynamically allocate the thp-zero and thp-deferred_split > shrinkers. > > Signed-off-by: Qi Zheng > --- > mm/huge_memory.c | 69 +++++++++++++++++++++++++++++++----------------- > 1 file changed, 45 insertions(+), 24 deletions(-) > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 8c94b34024a2..4db5a1834d81 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -65,7 +65,11 @@ unsigned long transparent_hugepage_flags __read_mostly = > (1< (1< > -static struct shrinker deferred_split_shrinker; > +static struct shrinker *deferred_split_shrinker; > +static unsigned long deferred_split_count(struct shrinker *shrink, > + struct shrink_control *sc); > +static unsigned long deferred_split_scan(struct shrinker *shrink, > + struct shrink_control *sc); > > static atomic_t huge_zero_refcount; > struct page *huge_zero_page __read_mostly; > @@ -229,11 +233,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, > return 0; > } > > -static struct shrinker huge_zero_page_shrinker = { > - .count_objects = shrink_huge_zero_page_count, > - .scan_objects = shrink_huge_zero_page_scan, > - .seeks = DEFAULT_SEEKS, > -}; > +static struct shrinker *huge_zero_page_shrinker; Same as patch #17. > > #ifdef CONFIG_SYSFS > static ssize_t enabled_show(struct kobject *kobj, > @@ -454,6 +454,40 @@ static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) > } > #endif /* CONFIG_SYSFS */ > > +static int thp_shrinker_init(void) Better to declare it as __init. > +{ > + huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero"); > + if (!huge_zero_page_shrinker) > + return -ENOMEM; > + > + deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE | > + SHRINKER_MEMCG_AWARE | > + SHRINKER_NONSLAB, > + "thp-deferred_split"); > + if (!deferred_split_shrinker) { > + shrinker_free_non_registered(huge_zero_page_shrinker); > + return -ENOMEM; > + } > + > + huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count; > + huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan; > + huge_zero_page_shrinker->seeks = DEFAULT_SEEKS; > + shrinker_register(huge_zero_page_shrinker); > + > + deferred_split_shrinker->count_objects = deferred_split_count; > + deferred_split_shrinker->scan_objects = deferred_split_scan; > + deferred_split_shrinker->seeks = DEFAULT_SEEKS; > + shrinker_register(deferred_split_shrinker); > + > + return 0; > +} > + > +static void thp_shrinker_exit(void) Same as here. > +{ > + shrinker_unregister(huge_zero_page_shrinker); > + shrinker_unregister(deferred_split_shrinker); > +} > + > static int __init hugepage_init(void) > { > int err; > @@ -482,12 +516,9 @@ static int __init hugepage_init(void) > if (err) > goto err_slab; > > - err = register_shrinker(&huge_zero_page_shrinker, "thp-zero"); > - if (err) > - goto err_hzp_shrinker; > - err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split"); > + err = thp_shrinker_init(); > if (err) > - goto err_split_shrinker; > + goto err_shrinker; > > /* > * By default disable transparent hugepages on smaller systems, > @@ -505,10 +536,8 @@ static int __init hugepage_init(void) > > return 0; > err_khugepaged: > - unregister_shrinker(&deferred_split_shrinker); > -err_split_shrinker: > - unregister_shrinker(&huge_zero_page_shrinker); > -err_hzp_shrinker: > + thp_shrinker_exit(); > +err_shrinker: > khugepaged_destroy(); > err_slab: > hugepage_exit_sysfs(hugepage_kobj); > @@ -2851,7 +2880,7 @@ void deferred_split_folio(struct folio *folio) > #ifdef CONFIG_MEMCG > if (memcg) > set_shrinker_bit(memcg, folio_nid(folio), > - deferred_split_shrinker.id); > + deferred_split_shrinker->id); > #endif > } > spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); > @@ -2925,14 +2954,6 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, > return split; > } > > -static struct shrinker deferred_split_shrinker = { > - .count_objects = deferred_split_count, > - .scan_objects = deferred_split_scan, > - .seeks = DEFAULT_SEEKS, > - .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE | > - SHRINKER_NONSLAB, > -}; > - > #ifdef CONFIG_DEBUG_FS > static void split_huge_pages_all(void) > {