linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] mm: remove zpool
@ 2025-08-29 16:15 Johannes Weiner
  2025-08-29 16:15 ` [PATCH 1/3] mm: zswap: interact directly with zsmalloc Johannes Weiner
                   ` (5 more replies)
  0 siblings, 6 replies; 21+ messages in thread
From: Johannes Weiner @ 2025-08-29 16:15 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Yosry Ahmed, Nhat Pham, Chengming Zhou, linux-mm, linux-kernel

zpool is an indirection layer for zswap to switch between multiple
allocator backends at runtime. Since 6.15, zsmalloc is the only
allocator left in-tree, so there is no point in keeping zpool around.

Based on mm-everything-2025-08-29-00-23.

 Documentation/admin-guide/mm/zswap.rst             |  33 +--
 Documentation/core-api/mm-api.rst                  |   1 -
 Documentation/driver-api/crypto/iaa/iaa-crypto.rst |   2 -
 MAINTAINERS                                        |   2 -
 arch/loongarch/configs/loongson3_defconfig         |   1 -
 include/linux/zpool.h                              |  86 ------
 mm/Kconfig                                         |  49 +--
 mm/Makefile                                        |   1 -
 mm/zpdesc.h                                        |  14 +-
 mm/zpool.c                                         | 328 ---------------------
 mm/zsmalloc.c                                      |  79 -----
 mm/zswap.c                                         | 202 ++++---------
 tools/testing/selftests/zram/README                |   1 -
 13 files changed, 84 insertions(+), 715 deletions(-)



^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 1/3] mm: zswap: interact directly with zsmalloc
  2025-08-29 16:15 [PATCH 0/3] mm: remove zpool Johannes Weiner
@ 2025-08-29 16:15 ` Johannes Weiner
  2025-09-05 18:53   ` Yosry Ahmed
  2025-08-29 16:15 ` [PATCH 2/3] mm: remove unused zpool layer Johannes Weiner
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 21+ messages in thread
From: Johannes Weiner @ 2025-08-29 16:15 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Yosry Ahmed, Nhat Pham, Chengming Zhou, linux-mm, linux-kernel

zswap goes through the zpool layer to enable runtime-switching of
allocator backends for compressed data. However, since zbud and z3fold
were removed in 6.15, zsmalloc has been the only option available.

As such, the zpool indirection is unnecessary. Make zswap deal with
zsmalloc directly. This is comparable to zram, which also directly
interacts with zsmalloc and has never supported a different backend.

Note that this does not preclude future improvements and experiments
with different allocation strategies. Should it become necessary, it's
possible to provide an alternate implementation for the zsmalloc API,
selectable at compile time. However, zsmalloc is also rather mature
and feature rich, with years of widespread production exposure; it's
encouraged to make incremental improvements rather than fork it.

In any case, the complexity of runtime pluggability seems excessive
and unjustified at this time. Switch zswap to zsmalloc to remove the
last user of the zpool API.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/zswap.c | 202 ++++++++++++++---------------------------------------
 1 file changed, 54 insertions(+), 148 deletions(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index e5e1f5687f5e..c88ad61b232c 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -25,7 +25,6 @@
 #include <linux/scatterlist.h>
 #include <linux/mempolicy.h>
 #include <linux/mempool.h>
-#include <linux/zpool.h>
 #include <crypto/acompress.h>
 #include <linux/zswap.h>
 #include <linux/mm_types.h>
@@ -35,6 +34,7 @@
 #include <linux/pagemap.h>
 #include <linux/workqueue.h>
 #include <linux/list_lru.h>
+#include <linux/zsmalloc.h>
 
 #include "swap.h"
 #include "internal.h"
@@ -107,16 +107,6 @@ static const struct kernel_param_ops zswap_compressor_param_ops = {
 module_param_cb(compressor, &zswap_compressor_param_ops,
 		&zswap_compressor, 0644);
 
-/* Compressed storage zpool to use */
-static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
-static int zswap_zpool_param_set(const char *, const struct kernel_param *);
-static const struct kernel_param_ops zswap_zpool_param_ops = {
-	.set =		zswap_zpool_param_set,
-	.get =		param_get_charp,
-	.free =		param_free_charp,
-};
-module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
-
 /* The maximum percentage of memory that the compressed pool can occupy */
 static unsigned int zswap_max_pool_percent = 20;
 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
@@ -161,7 +151,7 @@ struct crypto_acomp_ctx {
  * needs to be verified that it's still valid in the tree.
  */
 struct zswap_pool {
-	struct zpool *zpool;
+	struct zs_pool *zs_pool;
 	struct crypto_acomp_ctx __percpu *acomp_ctx;
 	struct percpu_ref ref;
 	struct list_head list;
@@ -193,7 +183,7 @@ static struct shrinker *zswap_shrinker;
  *              logic if referenced is unset. See comments in the shrinker
  *              section for context.
  * pool - the zswap_pool the entry's data is in
- * handle - zpool allocation handle that stores the compressed page data
+ * handle - zsmalloc allocation handle that stores the compressed page data
  * objcg - the obj_cgroup that the compressed memory is charged to
  * lru - handle to the pool's lru used to evict pages.
  */
@@ -214,7 +204,7 @@ static unsigned int nr_zswap_trees[MAX_SWAPFILES];
 static LIST_HEAD(zswap_pools);
 /* protects zswap_pools list modification */
 static DEFINE_SPINLOCK(zswap_pools_lock);
-/* pool counter to provide unique names to zpool */
+/* pool counter to provide unique names to zsmalloc */
 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 
 enum zswap_init_type {
@@ -241,32 +231,22 @@ static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
 		>> SWAP_ADDRESS_SPACE_SHIFT];
 }
 
-#define zswap_pool_debug(msg, p)				\
-	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
-		 zpool_get_type((p)->zpool))
+#define zswap_pool_debug(msg, p)			\
+	pr_debug("%s pool %s\n", msg, (p)->tfm_name)
 
 /*********************************
 * pool functions
 **********************************/
 static void __zswap_pool_empty(struct percpu_ref *ref);
 
-static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
+static struct zswap_pool *zswap_pool_create(char *compressor)
 {
 	struct zswap_pool *pool;
 	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
-	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 	int ret, cpu;
 
-	if (!zswap_has_pool) {
-		/* if either are unset, pool initialization failed, and we
-		 * need both params to be set correctly before trying to
-		 * create a pool.
-		 */
-		if (!strcmp(type, ZSWAP_PARAM_UNSET))
-			return NULL;
-		if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
-			return NULL;
-	}
+	if (!zswap_has_pool && !strcmp(compressor, ZSWAP_PARAM_UNSET))
+		return NULL;
 
 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 	if (!pool)
@@ -274,12 +254,9 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 
 	/* unique name for each pool specifically required by zsmalloc */
 	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
-	pool->zpool = zpool_create_pool(type, name, gfp);
-	if (!pool->zpool) {
-		pr_err("%s zpool not available\n", type);
+	pool->zs_pool = zs_create_pool(name);
+	if (!pool->zs_pool)
 		goto error;
-	}
-	pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
 
 	strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
 
@@ -315,52 +292,29 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 error:
 	if (pool->acomp_ctx)
 		free_percpu(pool->acomp_ctx);
-	if (pool->zpool)
-		zpool_destroy_pool(pool->zpool);
+	if (pool->zs_pool)
+		zs_destroy_pool(pool->zs_pool);
 	kfree(pool);
 	return NULL;
 }
 
 static struct zswap_pool *__zswap_pool_create_fallback(void)
 {
-	bool has_comp, has_zpool;
-
-	has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
-	if (!has_comp && strcmp(zswap_compressor,
-				CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
+	if (!crypto_has_acomp(zswap_compressor, 0, 0) &&
+	    strcmp(zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
 		pr_err("compressor %s not available, using default %s\n",
 		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
 		param_free_charp(&zswap_compressor);
 		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
-		has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
-	}
-	if (!has_comp) {
-		pr_err("default compressor %s not available\n",
-		       zswap_compressor);
-		param_free_charp(&zswap_compressor);
-		zswap_compressor = ZSWAP_PARAM_UNSET;
-	}
-
-	has_zpool = zpool_has_pool(zswap_zpool_type);
-	if (!has_zpool && strcmp(zswap_zpool_type,
-				 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
-		pr_err("zpool %s not available, using default %s\n",
-		       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
-		param_free_charp(&zswap_zpool_type);
-		zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
-		has_zpool = zpool_has_pool(zswap_zpool_type);
-	}
-	if (!has_zpool) {
-		pr_err("default zpool %s not available\n",
-		       zswap_zpool_type);
-		param_free_charp(&zswap_zpool_type);
-		zswap_zpool_type = ZSWAP_PARAM_UNSET;
+		if (!crypto_has_acomp(zswap_compressor, 0, 0)) {
+			pr_err("default compressor %s not available\n",
+			       zswap_compressor);
+			zswap_compressor = ZSWAP_PARAM_UNSET;
+			return NULL;
+		}
 	}
 
-	if (!has_comp || !has_zpool)
-		return NULL;
-
-	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
+	return zswap_pool_create(zswap_compressor);
 }
 
 static void zswap_pool_destroy(struct zswap_pool *pool)
@@ -370,7 +324,7 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
 	free_percpu(pool->acomp_ctx);
 
-	zpool_destroy_pool(pool->zpool);
+	zs_destroy_pool(pool->zs_pool);
 	kfree(pool);
 }
 
@@ -462,7 +416,7 @@ static struct zswap_pool *zswap_pool_current_get(void)
 }
 
 /* type and compressor must be null-terminated */
-static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
+static struct zswap_pool *zswap_pool_find_get(char *compressor)
 {
 	struct zswap_pool *pool;
 
@@ -471,8 +425,6 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 	list_for_each_entry_rcu(pool, &zswap_pools, list) {
 		if (strcmp(pool->tfm_name, compressor))
 			continue;
-		if (strcmp(zpool_get_type(pool->zpool), type))
-			continue;
 		/* if we can't get it, it's about to be destroyed */
 		if (!zswap_pool_tryget(pool))
 			continue;
@@ -499,7 +451,7 @@ unsigned long zswap_total_pages(void)
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(pool, &zswap_pools, list)
-		total += zpool_get_total_pages(pool->zpool);
+		total += zs_get_total_pages(pool->zs_pool);
 	rcu_read_unlock();
 
 	return total;
@@ -524,33 +476,22 @@ static bool zswap_check_limits(void)
 * param callbacks
 **********************************/
 
-static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
-{
-	/* no change required */
-	if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
-		return false;
-	return true;
-}
-
-/* val must be a null-terminated string */
-static int __zswap_param_set(const char *val, const struct kernel_param *kp,
-			     char *type, char *compressor)
+static int zswap_compressor_param_set(const char *val, const struct kernel_param *kp)
 {
 	struct zswap_pool *pool, *put_pool = NULL;
 	char *s = strstrip((char *)val);
+	bool create_pool = false;
 	int ret = 0;
-	bool new_pool = false;
 
 	mutex_lock(&zswap_init_lock);
 	switch (zswap_init_state) {
 	case ZSWAP_UNINIT:
-		/* if this is load-time (pre-init) param setting,
-		 * don't create a pool; that's done during init.
-		 */
+		/* Handled in zswap_setup() */
 		ret = param_set_charp(s, kp);
 		break;
 	case ZSWAP_INIT_SUCCEED:
-		new_pool = zswap_pool_changed(s, kp);
+		if (!zswap_has_pool || strcmp(s, *(char **)kp->arg))
+			create_pool = true;
 		break;
 	case ZSWAP_INIT_FAILED:
 		pr_err("can't set param, initialization failed\n");
@@ -558,30 +499,17 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 	}
 	mutex_unlock(&zswap_init_lock);
 
-	/* no need to create a new pool, return directly */
-	if (!new_pool)
+	if (!create_pool)
 		return ret;
 
-	if (!type) {
-		if (!zpool_has_pool(s)) {
-			pr_err("zpool %s not available\n", s);
-			return -ENOENT;
-		}
-		type = s;
-	} else if (!compressor) {
-		if (!crypto_has_acomp(s, 0, 0)) {
-			pr_err("compressor %s not available\n", s);
-			return -ENOENT;
-		}
-		compressor = s;
-	} else {
-		WARN_ON(1);
-		return -EINVAL;
+	if (!crypto_has_acomp(s, 0, 0)) {
+		pr_err("compressor %s not available\n", s);
+		return -ENOENT;
 	}
 
 	spin_lock_bh(&zswap_pools_lock);
 
-	pool = zswap_pool_find_get(type, compressor);
+	pool = zswap_pool_find_get(s);
 	if (pool) {
 		zswap_pool_debug("using existing", pool);
 		WARN_ON(pool == zswap_pool_current());
@@ -591,7 +519,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 	spin_unlock_bh(&zswap_pools_lock);
 
 	if (!pool)
-		pool = zswap_pool_create(type, compressor);
+		pool = zswap_pool_create(s);
 	else {
 		/*
 		 * Restore the initial ref dropped by percpu_ref_kill()
@@ -616,7 +544,8 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 		list_add_rcu(&pool->list, &zswap_pools);
 		zswap_has_pool = true;
 	} else if (pool) {
-		/* add the possibly pre-existing pool to the end of the pools
+		/*
+		 * Add the possibly pre-existing pool to the end of the pools
 		 * list; if it's new (and empty) then it'll be removed and
 		 * destroyed by the put after we drop the lock
 		 */
@@ -626,18 +555,8 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 
 	spin_unlock_bh(&zswap_pools_lock);
 
-	if (!zswap_has_pool && !pool) {
-		/* if initial pool creation failed, and this pool creation also
-		 * failed, maybe both compressor and zpool params were bad.
-		 * Allow changing this param, so pool creation will succeed
-		 * when the other param is changed. We already verified this
-		 * param is ok in the zpool_has_pool() or crypto_has_acomp()
-		 * checks above.
-		 */
-		ret = param_set_charp(s, kp);
-	}
-
-	/* drop the ref from either the old current pool,
+	/*
+	 * Drop the ref from either the old current pool,
 	 * or the new pool we failed to add
 	 */
 	if (put_pool)
@@ -646,18 +565,6 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 	return ret;
 }
 
-static int zswap_compressor_param_set(const char *val,
-				      const struct kernel_param *kp)
-{
-	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
-}
-
-static int zswap_zpool_param_set(const char *val,
-				 const struct kernel_param *kp)
-{
-	return __zswap_param_set(val, kp, NULL, zswap_compressor);
-}
-
 static int zswap_enabled_param_set(const char *val,
 				   const struct kernel_param *kp)
 {
@@ -801,13 +708,13 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
 }
 
 /*
- * Carries out the common pattern of freeing and entry's zpool allocation,
+ * Carries out the common pattern of freeing an entry's zsmalloc allocation,
  * freeing the entry itself, and decrementing the number of stored pages.
  */
 static void zswap_entry_free(struct zswap_entry *entry)
 {
 	zswap_lru_del(&zswap_list_lru, entry);
-	zpool_free(entry->pool->zpool, entry->handle);
+	zs_free(entry->pool->zs_pool, entry->handle);
 	zswap_pool_put(entry->pool);
 	if (entry->objcg) {
 		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
@@ -949,7 +856,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
 	int comp_ret = 0, alloc_ret = 0;
 	unsigned int dlen = PAGE_SIZE;
 	unsigned long handle;
-	struct zpool *zpool;
 	gfp_t gfp;
 	u8 *dst;
 	bool mapped = false;
@@ -997,13 +903,14 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
 		mapped = true;
 	}
 
-	zpool = pool->zpool;
 	gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE;
-	alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle, page_to_nid(page));
-	if (alloc_ret)
+	handle = zs_malloc(pool->zs_pool, dlen, gfp, page_to_nid(page));
+	if (IS_ERR_VALUE(handle)) {
+		alloc_ret = PTR_ERR((void *)handle);
 		goto unlock;
+	}
 
-	zpool_obj_write(zpool, handle, dst, dlen);
+	zs_obj_write(pool->zs_pool, handle, dst, dlen);
 	entry->handle = handle;
 	entry->length = dlen;
 
@@ -1023,14 +930,14 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
 
 static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
 {
-	struct zpool *zpool = entry->pool->zpool;
+	struct zswap_pool *pool = entry->pool;
 	struct scatterlist input, output;
 	struct crypto_acomp_ctx *acomp_ctx;
 	int decomp_ret = 0, dlen = PAGE_SIZE;
 	u8 *src, *obj;
 
-	acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
-	obj = zpool_obj_read_begin(zpool, entry->handle, acomp_ctx->buffer);
+	acomp_ctx = acomp_ctx_get_cpu_lock(pool);
+	obj = zs_obj_read_begin(pool->zs_pool, entry->handle, acomp_ctx->buffer);
 
 	/* zswap entries of length PAGE_SIZE are not compressed. */
 	if (entry->length == PAGE_SIZE) {
@@ -1039,7 +946,7 @@ static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
 	}
 
 	/*
-	 * zpool_obj_read_begin() might return a kmap address of highmem when
+	 * zs_obj_read_begin() might return a kmap address of highmem when
 	 * acomp_ctx->buffer is not used.  However, sg_init_one() does not
 	 * handle highmem addresses, so copy the object to acomp_ctx->buffer.
 	 */
@@ -1059,7 +966,7 @@ static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
 	dlen = acomp_ctx->req->dlen;
 
 read_done:
-	zpool_obj_read_end(zpool, entry->handle, obj);
+	zs_obj_read_end(pool->zs_pool, entry->handle, obj);
 	acomp_ctx_put_unlock(acomp_ctx);
 
 	if (!decomp_ret && dlen == PAGE_SIZE)
@@ -1576,7 +1483,7 @@ static bool zswap_store_page(struct page *page,
 	return true;
 
 store_failed:
-	zpool_free(pool->zpool, entry->handle);
+	zs_free(pool->zs_pool, entry->handle);
 compress_failed:
 	zswap_entry_cache_free(entry);
 	return false;
@@ -1906,8 +1813,7 @@ static int zswap_setup(void)
 
 	pool = __zswap_pool_create_fallback();
 	if (pool) {
-		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
-			zpool_get_type(pool->zpool));
+		pr_info("loaded using pool %s\n", pool->tfm_name);
 		list_add(&pool->list, &zswap_pools);
 		zswap_has_pool = true;
 		static_branch_enable(&zswap_ever_enabled);
-- 
2.51.0



^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 2/3] mm: remove unused zpool layer
  2025-08-29 16:15 [PATCH 0/3] mm: remove zpool Johannes Weiner
  2025-08-29 16:15 ` [PATCH 1/3] mm: zswap: interact directly with zsmalloc Johannes Weiner
@ 2025-08-29 16:15 ` Johannes Weiner
  2025-08-29 19:07   ` SeongJae Park
  2025-09-05 18:58   ` Yosry Ahmed
  2025-08-29 16:15 ` [PATCH 3/3] mm: zpdesc: minor naming and comment corrections Johannes Weiner
                   ` (3 subsequent siblings)
  5 siblings, 2 replies; 21+ messages in thread
From: Johannes Weiner @ 2025-08-29 16:15 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Yosry Ahmed, Nhat Pham, Chengming Zhou, linux-mm, linux-kernel

With zswap using zsmalloc directly, there are no more in-tree users of
this code. Remove it.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 Documentation/admin-guide/mm/zswap.rst        |  33 +-
 Documentation/core-api/mm-api.rst             |   1 -
 .../driver-api/crypto/iaa/iaa-crypto.rst      |   2 -
 MAINTAINERS                                   |   2 -
 arch/loongarch/configs/loongson3_defconfig    |   1 -
 include/linux/zpool.h                         |  86 -----
 mm/Kconfig                                    |  49 +--
 mm/Makefile                                   |   1 -
 mm/zpool.c                                    | 328 ------------------
 mm/zsmalloc.c                                 |  79 -----
 tools/testing/selftests/zram/README           |   1 -
 11 files changed, 23 insertions(+), 560 deletions(-)
 delete mode 100644 include/linux/zpool.h
 delete mode 100644 mm/zpool.c

diff --git a/Documentation/admin-guide/mm/zswap.rst b/Documentation/admin-guide/mm/zswap.rst
index fd3370aa43fe..283d77217c6f 100644
--- a/Documentation/admin-guide/mm/zswap.rst
+++ b/Documentation/admin-guide/mm/zswap.rst
@@ -53,26 +53,17 @@ Zswap receives pages for compression from the swap subsystem and is able to
 evict pages from its own compressed pool on an LRU basis and write them back to
 the backing swap device in the case that the compressed pool is full.
 
-Zswap makes use of zpool for the managing the compressed memory pool.  Each
-allocation in zpool is not directly accessible by address.  Rather, a handle is
+Zswap makes use of zsmalloc for the managing the compressed memory pool.  Each
+allocation in zsmalloc is not directly accessible by address.  Rather, a handle is
 returned by the allocation routine and that handle must be mapped before being
 accessed.  The compressed memory pool grows on demand and shrinks as compressed
-pages are freed.  The pool is not preallocated.  By default, a zpool
-of type selected in ``CONFIG_ZSWAP_ZPOOL_DEFAULT`` Kconfig option is created,
-but it can be overridden at boot time by setting the ``zpool`` attribute,
-e.g. ``zswap.zpool=zsmalloc``. It can also be changed at runtime using the sysfs
-``zpool`` attribute, e.g.::
-
-	echo zsmalloc > /sys/module/zswap/parameters/zpool
-
-The zsmalloc type zpool has a complex compressed page storage method, and it
-can achieve great storage densities.
+pages are freed.  The pool is not preallocated.
 
 When a swap page is passed from swapout to zswap, zswap maintains a mapping
-of the swap entry, a combination of the swap type and swap offset, to the zpool
-handle that references that compressed swap page.  This mapping is achieved
-with a red-black tree per swap type.  The swap offset is the search key for the
-tree nodes.
+of the swap entry, a combination of the swap type and swap offset, to the
+zsmalloc handle that references that compressed swap page.  This mapping is
+achieved with a red-black tree per swap type.  The swap offset is the search
+key for the tree nodes.
 
 During a page fault on a PTE that is a swap entry, the swapin code calls the
 zswap load function to decompress the page into the page allocated by the page
@@ -96,11 +87,11 @@ It can also be changed at runtime using the sysfs "compressor"
 
 	echo lzo > /sys/module/zswap/parameters/compressor
 
-When the zpool and/or compressor parameter is changed at runtime, any existing
-compressed pages are not modified; they are left in their own zpool.  When a
-request is made for a page in an old zpool, it is uncompressed using its
-original compressor.  Once all pages are removed from an old zpool, the zpool
-and its compressor are freed.
+When the compressor parameter is changed at runtime, any existing compressed
+pages are not modified; they are left in their own pool.  When a request is
+made for a page in an old pool, it is uncompressed using its original
+compressor.  Once all pages are removed from an old pool, the pool and its
+compressor are freed.
 
 Some of the pages in zswap are same-value filled pages (i.e. contents of the
 page have same value or repetitive pattern). These pages include zero-filled
diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst
index 5063179cfc70..68193a4cfcf5 100644
--- a/Documentation/core-api/mm-api.rst
+++ b/Documentation/core-api/mm-api.rst
@@ -118,7 +118,6 @@ More Memory Management Functions
 .. kernel-doc:: mm/memremap.c
 .. kernel-doc:: mm/hugetlb.c
 .. kernel-doc:: mm/swap.c
-.. kernel-doc:: mm/zpool.c
 .. kernel-doc:: mm/memcontrol.c
 .. #kernel-doc:: mm/memory-tiers.c (build warnings)
 .. kernel-doc:: mm/shmem.c
diff --git a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
index 8e50b900d51c..f815d4fd8372 100644
--- a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
+++ b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
@@ -476,7 +476,6 @@ To demonstrate that the following steps work as expected, these
   # echo 0 > /sys/module/zswap/parameters/enabled
   # echo 50 > /sys/module/zswap/parameters/max_pool_percent
   # echo deflate-iaa > /sys/module/zswap/parameters/compressor
-  # echo zsmalloc > /sys/module/zswap/parameters/zpool
   # echo 1 > /sys/module/zswap/parameters/enabled
   # echo 100 > /proc/sys/vm/swappiness
   # echo never > /sys/kernel/mm/transparent_hugepage/enabled
@@ -625,7 +624,6 @@ Now run the following zswap-specific setup commands to have zswap use
   echo 0 > /sys/module/zswap/parameters/enabled
   echo 50 > /sys/module/zswap/parameters/max_pool_percent
   echo deflate-iaa > /sys/module/zswap/parameters/compressor
-  echo zsmalloc > /sys/module/zswap/parameters/zpool
   echo 1 > /sys/module/zswap/parameters/enabled
 
   echo 100 > /proc/sys/vm/swappiness
diff --git a/MAINTAINERS b/MAINTAINERS
index e791f18b61d8..7ceda15372f9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -27877,9 +27877,7 @@ R:	Chengming Zhou <chengming.zhou@linux.dev>
 L:	linux-mm@kvack.org
 S:	Maintained
 F:	Documentation/admin-guide/mm/zswap.rst
-F:	include/linux/zpool.h
 F:	include/linux/zswap.h
-F:	mm/zpool.c
 F:	mm/zswap.c
 F:	tools/testing/selftests/cgroup/test_zswap.c
 
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index 34eaee0384c9..2b8df0e9e42a 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -106,7 +106,6 @@ CONFIG_CMDLINE_PARTITION=y
 CONFIG_IOSCHED_BFQ=y
 CONFIG_BFQ_GROUP_IOSCHED=y
 CONFIG_BINFMT_MISC=m
-CONFIG_ZPOOL=y
 CONFIG_ZSWAP=y
 CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
 CONFIG_ZSMALLOC=y
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
deleted file mode 100644
index 369ef068fad8..000000000000
--- a/include/linux/zpool.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * zpool memory storage api
- *
- * Copyright (C) 2014 Dan Streetman
- *
- * This is a common frontend for the zswap compressed memory storage
- * implementations.
- */
-
-#ifndef _ZPOOL_H_
-#define _ZPOOL_H_
-
-struct zpool;
-
-bool zpool_has_pool(char *type);
-
-struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp);
-
-const char *zpool_get_type(struct zpool *pool);
-
-void zpool_destroy_pool(struct zpool *pool);
-
-int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
-		 unsigned long *handle, const int nid);
-
-void zpool_free(struct zpool *pool, unsigned long handle);
-
-void *zpool_obj_read_begin(struct zpool *zpool, unsigned long handle,
-			   void *local_copy);
-
-void zpool_obj_read_end(struct zpool *zpool, unsigned long handle,
-			void *handle_mem);
-
-void zpool_obj_write(struct zpool *zpool, unsigned long handle,
-		     void *handle_mem, size_t mem_len);
-
-u64 zpool_get_total_pages(struct zpool *pool);
-
-
-/**
- * struct zpool_driver - driver implementation for zpool
- * @type:	name of the driver.
- * @list:	entry in the list of zpool drivers.
- * @create:	create a new pool.
- * @destroy:	destroy a pool.
- * @malloc:	allocate mem from a pool.
- * @free:	free mem from a pool.
- * @sleep_mapped: whether zpool driver can sleep during map.
- * @map:	map a handle.
- * @unmap:	unmap a handle.
- * @total_size:	get total size of a pool.
- *
- * This is created by a zpool implementation and registered
- * with zpool.
- */
-struct zpool_driver {
-	char *type;
-	struct module *owner;
-	atomic_t refcount;
-	struct list_head list;
-
-	void *(*create)(const char *name, gfp_t gfp);
-	void (*destroy)(void *pool);
-
-	int (*malloc)(void *pool, size_t size, gfp_t gfp,
-		      unsigned long *handle, const int nid);
-	void (*free)(void *pool, unsigned long handle);
-
-	void *(*obj_read_begin)(void *pool, unsigned long handle,
-				void *local_copy);
-	void (*obj_read_end)(void *pool, unsigned long handle,
-			     void *handle_mem);
-	void (*obj_write)(void *pool, unsigned long handle,
-			  void *handle_mem, size_t mem_len);
-
-	u64 (*total_pages)(void *pool);
-};
-
-void zpool_register_driver(struct zpool_driver *driver);
-
-int zpool_unregister_driver(struct zpool_driver *driver);
-
-bool zpool_can_sleep_mapped(struct zpool *pool);
-
-#endif
diff --git a/mm/Kconfig b/mm/Kconfig
index 4108bcd96784..b971d35c43c3 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -9,9 +9,6 @@ menu "Memory Management options"
 config ARCH_NO_SWAP
 	bool
 
-config ZPOOL
-	bool
-
 menuconfig SWAP
 	bool "Support for paging of anonymous memory (swap)"
 	depends on MMU && BLOCK && !ARCH_NO_SWAP
@@ -26,7 +23,7 @@ config ZSWAP
 	bool "Compressed cache for swap pages"
 	depends on SWAP
 	select CRYPTO
-	select ZPOOL
+	select ZSMALLOC
 	help
 	  A lightweight compressed cache for swap pages.  It takes
 	  pages that are in the process of being swapped out and attempts to
@@ -125,45 +122,18 @@ config ZSWAP_COMPRESSOR_DEFAULT
        default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
        default ""
 
-choice
-	prompt "Default allocator"
-	depends on ZSWAP
-	default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU
-	help
-	  Selects the default allocator for the compressed cache for
-	  swap pages.
-	  The default is 'zbud' for compatibility, however please do
-	  read the description of each of the allocators below before
-	  making a right choice.
-
-	  The selection made here can be overridden by using the kernel
-	  command line 'zswap.zpool=' option.
+config ZSMALLOC
+	tristate
 
-config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
-	bool "zsmalloc"
-	select ZSMALLOC
-	help
-	  Use the zsmalloc allocator as the default allocator.
-endchoice
+if ZSMALLOC
 
-config ZSWAP_ZPOOL_DEFAULT
-       string
-       depends on ZSWAP
-       default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
-       default ""
+menu "Zsmalloc allocator options"
+	depends on ZSMALLOC
 
-config ZSMALLOC
-	tristate
-	prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM)
-	depends on MMU
-	help
-	  zsmalloc is a slab-based memory allocator designed to store
-	  pages of various compression levels efficiently. It achieves
-	  the highest storage density with the least amount of fragmentation.
+comment "Zsmalloc is a common backend allocator for zswap & zram"
 
 config ZSMALLOC_STAT
 	bool "Export zsmalloc statistics"
-	depends on ZSMALLOC
 	select DEBUG_FS
 	help
 	  This option enables code in the zsmalloc to collect various
@@ -175,7 +145,6 @@ config ZSMALLOC_CHAIN_SIZE
 	int "Maximum number of physical pages per-zspage"
 	default 8
 	range 4 16
-	depends on ZSMALLOC
 	help
 	  This option sets the upper limit on the number of physical pages
 	  that a zmalloc page (zspage) can consist of. The optimal zspage
@@ -190,6 +159,10 @@ config ZSMALLOC_CHAIN_SIZE
 
 	  For more information, see zsmalloc documentation.
 
+endmenu
+
+endif
+
 menu "Slab allocator options"
 
 config SLUB
diff --git a/mm/Makefile b/mm/Makefile
index ef54aa615d9d..21abb3353550 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -115,7 +115,6 @@ obj-$(CONFIG_DEBUG_RODATA_TEST) += rodata_test.o
 obj-$(CONFIG_DEBUG_VM_PGTABLE) += debug_vm_pgtable.o
 obj-$(CONFIG_PAGE_OWNER) += page_owner.o
 obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
-obj-$(CONFIG_ZPOOL)	+= zpool.o
 obj-$(CONFIG_ZSMALLOC)	+= zsmalloc.o
 obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o
 obj-$(CONFIG_CMA)	+= cma.o
diff --git a/mm/zpool.c b/mm/zpool.c
deleted file mode 100644
index 0a71d03369f1..000000000000
--- a/mm/zpool.c
+++ /dev/null
@@ -1,328 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * zpool memory storage api
- *
- * Copyright (C) 2014 Dan Streetman
- *
- * This is a common frontend for memory storage pool implementations.
- * Typically, this is used to store compressed memory.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/list.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/zpool.h>
-
-struct zpool {
-	struct zpool_driver *driver;
-	void *pool;
-};
-
-static LIST_HEAD(drivers_head);
-static DEFINE_SPINLOCK(drivers_lock);
-
-/**
- * zpool_register_driver() - register a zpool implementation.
- * @driver:	driver to register
- */
-void zpool_register_driver(struct zpool_driver *driver)
-{
-	spin_lock(&drivers_lock);
-	atomic_set(&driver->refcount, 0);
-	list_add(&driver->list, &drivers_head);
-	spin_unlock(&drivers_lock);
-}
-EXPORT_SYMBOL(zpool_register_driver);
-
-/**
- * zpool_unregister_driver() - unregister a zpool implementation.
- * @driver:	driver to unregister.
- *
- * Module usage counting is used to prevent using a driver
- * while/after unloading, so if this is called from module
- * exit function, this should never fail; if called from
- * other than the module exit function, and this returns
- * failure, the driver is in use and must remain available.
- */
-int zpool_unregister_driver(struct zpool_driver *driver)
-{
-	int ret = 0, refcount;
-
-	spin_lock(&drivers_lock);
-	refcount = atomic_read(&driver->refcount);
-	WARN_ON(refcount < 0);
-	if (refcount > 0)
-		ret = -EBUSY;
-	else
-		list_del(&driver->list);
-	spin_unlock(&drivers_lock);
-
-	return ret;
-}
-EXPORT_SYMBOL(zpool_unregister_driver);
-
-/* this assumes @type is null-terminated. */
-static struct zpool_driver *zpool_get_driver(const char *type)
-{
-	struct zpool_driver *driver;
-
-	spin_lock(&drivers_lock);
-	list_for_each_entry(driver, &drivers_head, list) {
-		if (!strcmp(driver->type, type)) {
-			bool got = try_module_get(driver->owner);
-
-			if (got)
-				atomic_inc(&driver->refcount);
-			spin_unlock(&drivers_lock);
-			return got ? driver : NULL;
-		}
-	}
-
-	spin_unlock(&drivers_lock);
-	return NULL;
-}
-
-static void zpool_put_driver(struct zpool_driver *driver)
-{
-	atomic_dec(&driver->refcount);
-	module_put(driver->owner);
-}
-
-/**
- * zpool_has_pool() - Check if the pool driver is available
- * @type:	The type of the zpool to check (e.g. zsmalloc)
- *
- * This checks if the @type pool driver is available.  This will try to load
- * the requested module, if needed, but there is no guarantee the module will
- * still be loaded and available immediately after calling.  If this returns
- * true, the caller should assume the pool is available, but must be prepared
- * to handle the @zpool_create_pool() returning failure.  However if this
- * returns false, the caller should assume the requested pool type is not
- * available; either the requested pool type module does not exist, or could
- * not be loaded, and calling @zpool_create_pool() with the pool type will
- * fail.
- *
- * The @type string must be null-terminated.
- *
- * Returns: true if @type pool is available, false if not
- */
-bool zpool_has_pool(char *type)
-{
-	struct zpool_driver *driver = zpool_get_driver(type);
-
-	if (!driver) {
-		request_module("zpool-%s", type);
-		driver = zpool_get_driver(type);
-	}
-
-	if (!driver)
-		return false;
-
-	zpool_put_driver(driver);
-	return true;
-}
-EXPORT_SYMBOL(zpool_has_pool);
-
-/**
- * zpool_create_pool() - Create a new zpool
- * @type:	The type of the zpool to create (e.g. zsmalloc)
- * @name:	The name of the zpool (e.g. zram0, zswap)
- * @gfp:	The GFP flags to use when allocating the pool.
- *
- * This creates a new zpool of the specified type.  The gfp flags will be
- * used when allocating memory, if the implementation supports it.  If the
- * ops param is NULL, then the created zpool will not be evictable.
- *
- * Implementations must guarantee this to be thread-safe.
- *
- * The @type and @name strings must be null-terminated.
- *
- * Returns: New zpool on success, NULL on failure.
- */
-struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp)
-{
-	struct zpool_driver *driver;
-	struct zpool *zpool;
-
-	pr_debug("creating pool type %s\n", type);
-
-	driver = zpool_get_driver(type);
-
-	if (!driver) {
-		request_module("zpool-%s", type);
-		driver = zpool_get_driver(type);
-	}
-
-	if (!driver) {
-		pr_err("no driver for type %s\n", type);
-		return NULL;
-	}
-
-	zpool = kmalloc(sizeof(*zpool), gfp);
-	if (!zpool) {
-		pr_err("couldn't create zpool - out of memory\n");
-		zpool_put_driver(driver);
-		return NULL;
-	}
-
-	zpool->driver = driver;
-	zpool->pool = driver->create(name, gfp);
-
-	if (!zpool->pool) {
-		pr_err("couldn't create %s pool\n", type);
-		zpool_put_driver(driver);
-		kfree(zpool);
-		return NULL;
-	}
-
-	pr_debug("created pool type %s\n", type);
-
-	return zpool;
-}
-
-/**
- * zpool_destroy_pool() - Destroy a zpool
- * @zpool:	The zpool to destroy.
- *
- * Implementations must guarantee this to be thread-safe,
- * however only when destroying different pools.  The same
- * pool should only be destroyed once, and should not be used
- * after it is destroyed.
- *
- * This destroys an existing zpool.  The zpool should not be in use.
- */
-void zpool_destroy_pool(struct zpool *zpool)
-{
-	pr_debug("destroying pool type %s\n", zpool->driver->type);
-
-	zpool->driver->destroy(zpool->pool);
-	zpool_put_driver(zpool->driver);
-	kfree(zpool);
-}
-
-/**
- * zpool_get_type() - Get the type of the zpool
- * @zpool:	The zpool to check
- *
- * This returns the type of the pool.
- *
- * Implementations must guarantee this to be thread-safe.
- *
- * Returns: The type of zpool.
- */
-const char *zpool_get_type(struct zpool *zpool)
-{
-	return zpool->driver->type;
-}
-
-/**
- * zpool_malloc() - Allocate memory
- * @zpool:	The zpool to allocate from.
- * @size:	The amount of memory to allocate.
- * @gfp:	The GFP flags to use when allocating memory.
- * @handle:	Pointer to the handle to set
- * @nid:	The preferred node id.
- *
- * This allocates the requested amount of memory from the pool.
- * The gfp flags will be used when allocating memory, if the
- * implementation supports it.  The provided @handle will be
- * set to the allocated object handle. The allocation will
- * prefer the NUMA node specified by @nid.
- *
- * Implementations must guarantee this to be thread-safe.
- *
- * Returns: 0 on success, negative value on error.
- */
-int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
-		 unsigned long *handle, const int nid)
-{
-	return zpool->driver->malloc(zpool->pool, size, gfp, handle, nid);
-}
-
-/**
- * zpool_free() - Free previously allocated memory
- * @zpool:	The zpool that allocated the memory.
- * @handle:	The handle to the memory to free.
- *
- * This frees previously allocated memory.  This does not guarantee
- * that the pool will actually free memory, only that the memory
- * in the pool will become available for use by the pool.
- *
- * Implementations must guarantee this to be thread-safe,
- * however only when freeing different handles.  The same
- * handle should only be freed once, and should not be used
- * after freeing.
- */
-void zpool_free(struct zpool *zpool, unsigned long handle)
-{
-	zpool->driver->free(zpool->pool, handle);
-}
-
-/**
- * zpool_obj_read_begin() - Start reading from a previously allocated handle.
- * @zpool:	The zpool that the handle was allocated from
- * @handle:	The handle to read from
- * @local_copy:	A local buffer to use if needed.
- *
- * This starts a read operation of a previously allocated handle. The passed
- * @local_copy buffer may be used if needed by copying the memory into.
- * zpool_obj_read_end() MUST be called after the read is completed to undo any
- * actions taken (e.g. release locks).
- *
- * Returns: A pointer to the handle memory to be read, if @local_copy is used,
- * the returned pointer is @local_copy.
- */
-void *zpool_obj_read_begin(struct zpool *zpool, unsigned long handle,
-			   void *local_copy)
-{
-	return zpool->driver->obj_read_begin(zpool->pool, handle, local_copy);
-}
-
-/**
- * zpool_obj_read_end() - Finish reading from a previously allocated handle.
- * @zpool:	The zpool that the handle was allocated from
- * @handle:	The handle to read from
- * @handle_mem:	The pointer returned by zpool_obj_read_begin()
- *
- * Finishes a read operation previously started by zpool_obj_read_begin().
- */
-void zpool_obj_read_end(struct zpool *zpool, unsigned long handle,
-			void *handle_mem)
-{
-	zpool->driver->obj_read_end(zpool->pool, handle, handle_mem);
-}
-
-/**
- * zpool_obj_write() - Write to a previously allocated handle.
- * @zpool:	The zpool that the handle was allocated from
- * @handle:	The handle to read from
- * @handle_mem:	The memory to copy from into the handle.
- * @mem_len:	The length of memory to be written.
- *
- */
-void zpool_obj_write(struct zpool *zpool, unsigned long handle,
-		     void *handle_mem, size_t mem_len)
-{
-	zpool->driver->obj_write(zpool->pool, handle, handle_mem, mem_len);
-}
-
-/**
- * zpool_get_total_pages() - The total size of the pool
- * @zpool:	The zpool to check
- *
- * This returns the total size in pages of the pool.
- *
- * Returns: Total size of the zpool in pages.
- */
-u64 zpool_get_total_pages(struct zpool *zpool)
-{
-	return zpool->driver->total_pages(zpool->pool);
-}
-
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("Common API for compressed memory storage");
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 153783d49d34..5bf832f9c05c 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -36,7 +36,6 @@
 #include <linux/types.h>
 #include <linux/debugfs.h>
 #include <linux/zsmalloc.h>
-#include <linux/zpool.h>
 #include <linux/fs.h>
 #include <linux/workqueue.h>
 #include "zpdesc.h"
@@ -433,78 +432,6 @@ static void record_obj(unsigned long handle, unsigned long obj)
 	*(unsigned long *)handle = obj;
 }
 
-/* zpool driver */
-
-#ifdef CONFIG_ZPOOL
-
-static void *zs_zpool_create(const char *name, gfp_t gfp)
-{
-	/*
-	 * Ignore global gfp flags: zs_malloc() may be invoked from
-	 * different contexts and its caller must provide a valid
-	 * gfp mask.
-	 */
-	return zs_create_pool(name);
-}
-
-static void zs_zpool_destroy(void *pool)
-{
-	zs_destroy_pool(pool);
-}
-
-static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
-			   unsigned long *handle, const int nid)
-{
-	*handle = zs_malloc(pool, size, gfp, nid);
-
-	if (IS_ERR_VALUE(*handle))
-		return PTR_ERR((void *)*handle);
-	return 0;
-}
-static void zs_zpool_free(void *pool, unsigned long handle)
-{
-	zs_free(pool, handle);
-}
-
-static void *zs_zpool_obj_read_begin(void *pool, unsigned long handle,
-				     void *local_copy)
-{
-	return zs_obj_read_begin(pool, handle, local_copy);
-}
-
-static void zs_zpool_obj_read_end(void *pool, unsigned long handle,
-				  void *handle_mem)
-{
-	zs_obj_read_end(pool, handle, handle_mem);
-}
-
-static void zs_zpool_obj_write(void *pool, unsigned long handle,
-			       void *handle_mem, size_t mem_len)
-{
-	zs_obj_write(pool, handle, handle_mem, mem_len);
-}
-
-static u64 zs_zpool_total_pages(void *pool)
-{
-	return zs_get_total_pages(pool);
-}
-
-static struct zpool_driver zs_zpool_driver = {
-	.type =			  "zsmalloc",
-	.owner =		  THIS_MODULE,
-	.create =		  zs_zpool_create,
-	.destroy =		  zs_zpool_destroy,
-	.malloc =		  zs_zpool_malloc,
-	.free =			  zs_zpool_free,
-	.obj_read_begin =	  zs_zpool_obj_read_begin,
-	.obj_read_end  =	  zs_zpool_obj_read_end,
-	.obj_write =		  zs_zpool_obj_write,
-	.total_pages =		  zs_zpool_total_pages,
-};
-
-MODULE_ALIAS("zpool-zsmalloc");
-#endif /* CONFIG_ZPOOL */
-
 static inline bool __maybe_unused is_first_zpdesc(struct zpdesc *zpdesc)
 {
 	return PagePrivate(zpdesc_page(zpdesc));
@@ -2248,9 +2175,6 @@ static int __init zs_init(void)
 {
 	int rc __maybe_unused;
 
-#ifdef CONFIG_ZPOOL
-	zpool_register_driver(&zs_zpool_driver);
-#endif
 #ifdef CONFIG_COMPACTION
 	rc = set_movable_ops(&zsmalloc_mops, PGTY_zsmalloc);
 	if (rc)
@@ -2262,9 +2186,6 @@ static int __init zs_init(void)
 
 static void __exit zs_exit(void)
 {
-#ifdef CONFIG_ZPOOL
-	zpool_unregister_driver(&zs_zpool_driver);
-#endif
 #ifdef CONFIG_COMPACTION
 	set_movable_ops(NULL, PGTY_zsmalloc);
 #endif
diff --git a/tools/testing/selftests/zram/README b/tools/testing/selftests/zram/README
index 110b34834a6f..82921c75681c 100644
--- a/tools/testing/selftests/zram/README
+++ b/tools/testing/selftests/zram/README
@@ -14,7 +14,6 @@ Statistics for individual zram devices are exported through sysfs nodes at
 Kconfig required:
 CONFIG_ZRAM=y
 CONFIG_CRYPTO_LZ4=y
-CONFIG_ZPOOL=y
 CONFIG_ZSMALLOC=y
 
 ZRAM Testcases
-- 
2.51.0



^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 3/3] mm: zpdesc: minor naming and comment corrections
  2025-08-29 16:15 [PATCH 0/3] mm: remove zpool Johannes Weiner
  2025-08-29 16:15 ` [PATCH 1/3] mm: zswap: interact directly with zsmalloc Johannes Weiner
  2025-08-29 16:15 ` [PATCH 2/3] mm: remove unused zpool layer Johannes Weiner
@ 2025-08-29 16:15 ` Johannes Weiner
  2025-09-05 19:05   ` Yosry Ahmed
  2025-09-04  9:33 ` [PATCH 0/3] mm: remove zpool Vitaly Wool
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 21+ messages in thread
From: Johannes Weiner @ 2025-08-29 16:15 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Yosry Ahmed, Nhat Pham, Chengming Zhou, linux-mm, linux-kernel

zpdesc is the page descriptor used by the zsmalloc backend allocator,
which in turn is used by zswap and zram. The zpool layer is gone.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/zpdesc.h | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index 25bf5ea0beb8..b8258dc78548 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* zpdesc.h: zswap.zpool memory descriptor
+/* zpdesc.h: zsmalloc pool memory descriptor
  *
  * Written by Alex Shi <alexs@kernel.org>
  *	      Hyeonggon Yoo <42.hyeyoo@gmail.com>
@@ -11,14 +11,14 @@
 #include <linux/pagemap.h>
 
 /*
- * struct zpdesc -	Memory descriptor for zpool memory.
+ * struct zpdesc -	Memory descriptor for zsmalloc pool memory.
  * @flags:		Page flags, mostly unused by zsmalloc.
  * @lru:		Indirectly used by page migration.
  * @movable_ops:	Used by page migration.
- * @next:		Next zpdesc in a zspage in zsmalloc zpool.
- * @handle:		For huge zspage in zsmalloc zpool.
+ * @next:		Next zpdesc in a zspage in zsmalloc pool.
+ * @handle:		For huge zspage in zsmalloc pool.
  * @zspage:		Points to the zspage this zpdesc is a part of.
- * @first_obj_offset:	First object offset in zsmalloc zpool.
+ * @first_obj_offset:	First object offset in zsmalloc pool.
  * @_refcount:		The number of references to this zpdesc.
  *
  * This struct overlays struct page for now. Do not modify without a good
@@ -79,8 +79,8 @@ static_assert(sizeof(struct zpdesc) <= sizeof(struct page));
  * zpdesc_folio - The folio allocated for a zpdesc
  * @zp: The zpdesc.
  *
- * Zpdescs are descriptors for zpool memory. The zpool memory itself is
- * allocated as folios that contain the zpool objects, and zpdesc uses specific
+ * Zpdescs are descriptors for zsmalloc memory. The memory itself is allocated
+ * as folios that contain the zsmalloc objects, and zpdesc uses specific
  * fields in the first struct page of the folio - those fields are now accessed
  * by struct zpdesc.
  *
-- 
2.51.0



^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH 2/3] mm: remove unused zpool layer
  2025-08-29 16:15 ` [PATCH 2/3] mm: remove unused zpool layer Johannes Weiner
@ 2025-08-29 19:07   ` SeongJae Park
  2025-09-05 18:58   ` Yosry Ahmed
  1 sibling, 0 replies; 21+ messages in thread
From: SeongJae Park @ 2025-08-29 19:07 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: SeongJae Park, Andrew Morton, Yosry Ahmed, Nhat Pham,
	Chengming Zhou, linux-mm, linux-kernel

On Fri, 29 Aug 2025 17:15:27 +0100 Johannes Weiner <hannes@cmpxchg.org> wrote:

> With zswap using zsmalloc directly, there are no more in-tree users of
> this code. Remove it.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
[...]
> --- a/Documentation/admin-guide/mm/zswap.rst
> +++ b/Documentation/admin-guide/mm/zswap.rst
[...]
>  When a swap page is passed from swapout to zswap, zswap maintains a mapping
> -of the swap entry, a combination of the swap type and swap offset, to the zpool
> -handle that references that compressed swap page.  This mapping is achieved
> -with a red-black tree per swap type.  The swap offset is the search key for the
> -tree nodes.

Nit.  s/red-black tree/xarray/ ?

Other than that,

Acked-by: SeongJae Park <sj@kernel.org>


Thanks,
SJ

[...]


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-08-29 16:15 [PATCH 0/3] mm: remove zpool Johannes Weiner
                   ` (2 preceding siblings ...)
  2025-08-29 16:15 ` [PATCH 3/3] mm: zpdesc: minor naming and comment corrections Johannes Weiner
@ 2025-09-04  9:33 ` Vitaly Wool
  2025-09-04 10:13   ` Vlastimil Babka
  2025-09-04 23:47   ` Andrew Morton
  2025-09-04  9:51 ` Vitaly Wool
  2025-09-05 17:52 ` Nhat Pham
  5 siblings, 2 replies; 21+ messages in thread
From: Vitaly Wool @ 2025-09-04  9:33 UTC (permalink / raw)
  To: hannes; +Cc: linux-kernel, Vlastimil Babka, linux-mm, Andrew Morton,
	Vitaly Wool

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=y, Size: 522 bytes --]

> With zswap using zsmalloc directly, there are no more in-tree users of
> this code. Remove it.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Per the previous discussions, this gets a *NACK* from my side. There is
hardly anything _technical_ preventing new in-tree users of zpool API.
zpool API is neutral and well-defined, I don’t see *any* good reason for
it to be phased out.

BTW, remarkable is that you didn't bother to CC: me to this patch.

Anyway,

Nacked-by: Vitaly Wool <vitaly.wool@konsulko.se>


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-08-29 16:15 [PATCH 0/3] mm: remove zpool Johannes Weiner
                   ` (3 preceding siblings ...)
  2025-09-04  9:33 ` [PATCH 0/3] mm: remove zpool Vitaly Wool
@ 2025-09-04  9:51 ` Vitaly Wool
  2025-09-05 17:52 ` Nhat Pham
  5 siblings, 0 replies; 21+ messages in thread
From: Vitaly Wool @ 2025-09-04  9:51 UTC (permalink / raw)
  To: hannes; +Cc: linux-kernel, Vlastimil Babka, linux-mm, Andrew Morton,
	Vitaly Wool

> With zswap using zsmalloc directly, there are no more in-tree users of
> this code. Remove it.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Per the previous discussions, this gets a *NACK* from my side. There is
hardly anything _technical_ preventing new in-tree users of zpool API.
zpool API is neutral and well-defined, I don’t see *any* good reason for
it to be phased out.

BTW, remarkable is that you didn't bother to CC: me to this patch.

Anyway,

Nacked-by: Vitaly Wool <vitaly.wool@konsulko.se>


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-09-04  9:33 ` [PATCH 0/3] mm: remove zpool Vitaly Wool
@ 2025-09-04 10:13   ` Vlastimil Babka
  2025-09-04 11:26     ` David Hildenbrand
  2025-09-04 14:11     ` Vitaly Wool
  2025-09-04 23:47   ` Andrew Morton
  1 sibling, 2 replies; 21+ messages in thread
From: Vlastimil Babka @ 2025-09-04 10:13 UTC (permalink / raw)
  To: Vitaly Wool, hannes
  Cc: linux-kernel, linux-mm, Andrew Morton, Christoph Hellwig

On 9/4/25 11:33, Vitaly Wool wrote:
>> With zswap using zsmalloc directly, there are no more in-tree users of
>> this code. Remove it.
>> 
>> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> 
> Per the previous discussions, this gets a *NACK* from my side. There is
> hardly anything _technical_ preventing new in-tree users of zpool API.
> zpool API is neutral and well-defined, I don’t see *any* good reason for
> it to be phased out.

AFAIK it's a policy that unused code should be removed ASAP. And that's the
case for zpool after Patch 1, no? It could be different if another user was
about to be merged (to avoid unnecessary churn), but that doesn't seem the
case for zblock?

My concern would be if the removal breaks any existing installations relying
on zswap. Presumably not as a make oldconfig will produce a config where
nothing important is missing, and existing boot options such as
"zswap.zpool=" or attempts to write to in the init scripts to
"/sys/module/zswap/parameters/zpool" will cause some errors/noise but not
prevent booting correctly?

I mean if we were paranoid and anticipated somebody would break their
booting if writing to /sys/module/zswap/parameters/zpool failed, we could
keep the file (for a while) and just produce a warning in dmesg that it's
deprecated and does nothing?

From Patch 1:

> Note that this does not preclude future improvements and experiments
> with different allocation strategies. Should it become necessary, it's
> possible to provide an alternate implementation for the zsmalloc API,
> selectable at compile time. However, zsmalloc is also rather mature
> and feature rich, with years of widespread production exposure; it's
> encouraged to make incremental improvements rather than fork it.

With my history of maintaining the slab allocators I can only support this
approach.


> BTW, remarkable is that you didn't bother to CC: me to this patch.
> 
> Anyway,
> 
> Nacked-by: Vitaly Wool <vitaly.wool@konsulko.se>
> 



^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-09-04 10:13   ` Vlastimil Babka
@ 2025-09-04 11:26     ` David Hildenbrand
  2025-09-05  5:36       ` Vitaly Wool
  2025-09-04 14:11     ` Vitaly Wool
  1 sibling, 1 reply; 21+ messages in thread
From: David Hildenbrand @ 2025-09-04 11:26 UTC (permalink / raw)
  To: Vlastimil Babka, Vitaly Wool, hannes
  Cc: linux-kernel, linux-mm, Andrew Morton, Christoph Hellwig

On 04.09.25 12:13, Vlastimil Babka wrote:
> On 9/4/25 11:33, Vitaly Wool wrote:
>>> With zswap using zsmalloc directly, there are no more in-tree users of
>>> this code. Remove it.
>>>
>>> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
>>
>> Per the previous discussions, this gets a *NACK* from my side. There is
>> hardly anything _technical_ preventing new in-tree users of zpool API.
>> zpool API is neutral and well-defined, I don’t see *any* good reason for
>> it to be phased out.
> 
> AFAIK it's a policy that unused code should be removed ASAP. And that's the
> case for zpool after Patch 1, no? It could be different if another user was
> about to be merged (to avoid unnecessary churn), but that doesn't seem the
> case for zblock?

Right, and

  13 files changed, 84 insertions(+), 715 deletions(-)

speaks for itself if there is no new user anticipated.

IIRC, we did a similar approach when we removed frontswap.

-- 
Cheers

David / dhildenb



^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-09-04 10:13   ` Vlastimil Babka
  2025-09-04 11:26     ` David Hildenbrand
@ 2025-09-04 14:11     ` Vitaly Wool
  2025-09-05  7:03       ` Vlastimil Babka
  2025-09-05 18:02       ` Nhat Pham
  1 sibling, 2 replies; 21+ messages in thread
From: Vitaly Wool @ 2025-09-04 14:11 UTC (permalink / raw)
  To: Vlastimil Babka, hannes
  Cc: linux-kernel, linux-mm, Andrew Morton, Christoph Hellwig



On 9/4/25 12:13, Vlastimil Babka wrote:
> On 9/4/25 11:33, Vitaly Wool wrote:
>>> With zswap using zsmalloc directly, there are no more in-tree users of
>>> this code. Remove it.
>>>
>>> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
>>
>> Per the previous discussions, this gets a *NACK* from my side. There is
>> hardly anything _technical_ preventing new in-tree users of zpool API.
>> zpool API is neutral and well-defined, I don’t see *any* good reason for
>> it to be phased out.
> 
> AFAIK it's a policy that unused code should be removed ASAP. And that's the
> case for zpool after Patch 1, no? It could be different if another user was
> about to be merged (to avoid unnecessary churn), but that doesn't seem the
> case for zblock?

For the C implementation of zblock, no. But there's another 
implementation which is in Rust and it's nearing the submission.

> My concern would be if the removal breaks any existing installations relying
> on zswap. Presumably not as a make oldconfig will produce a config where
> nothing important is missing, and existing boot options such as
> "zswap.zpool=" or attempts to write to in the init scripts to
> "/sys/module/zswap/parameters/zpool" will cause some errors/noise but not
> prevent booting correctly?

I don't expect heavy breakage but misconfigurations will definitely occur.
> I mean if we were paranoid and anticipated somebody would break their
> booting if writing to /sys/module/zswap/parameters/zpool failed, we could
> keep the file (for a while) and just produce a warning in dmesg that it's
> deprecated and does nothing?
> 
>  From Patch 1:
> 
>> Note that this does not preclude future improvements and experiments
>> with different allocation strategies. Should it become necessary, it's
>> possible to provide an alternate implementation for the zsmalloc API,
>> selectable at compile time. However, zsmalloc is also rather mature
>> and feature rich, with years of widespread production exposure; it's
>> encouraged to make incremental improvements rather than fork it.
> 
> With my history of maintaining the slab allocators I can only support this
> approach.

There was the time when slab was the best option and it was more mature 
than slub, which is now the best and only option. Thus, the "maturity" 
point is indeed valid but not being backed by anything else it doesn't 
weigh too much. Besides, zsmalloc's production exposure from all I know 
is limited to the 4K page case, and zsmalloc is truly struggling when 
the system is configured for 16K pages.

Things keep changing, and some of the proven solutions may not be a good 
fit moving forward. While not suggesting that we should have a handful 
of zpool backends just for the sake of variety, I'd like to emphasize 
that there are good reasons to have zblock (especially the Rust one), 
and there are good reasons to keep zsmalloc. That leads to the 
conclusion that zpool should stay.




^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-09-04  9:33 ` [PATCH 0/3] mm: remove zpool Vitaly Wool
  2025-09-04 10:13   ` Vlastimil Babka
@ 2025-09-04 23:47   ` Andrew Morton
  2025-09-05  5:42     ` Vitaly Wool
  1 sibling, 1 reply; 21+ messages in thread
From: Andrew Morton @ 2025-09-04 23:47 UTC (permalink / raw)
  To: Vitaly Wool; +Cc: hannes, linux-kernel, Vlastimil Babka, linux-mm

On Thu,  4 Sep 2025 11:33:24 +0200 Vitaly Wool <vitaly.wool@konsulko.se> wrote:

> > With zswap using zsmalloc directly, there are no more in-tree users of
> > this code. Remove it.
> > 
> > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> 
> Per the previous discussions, this gets a *NACK* from my side. There is
> hardly anything _technical_ preventing new in-tree users of zpool API.
> zpool API is neutral and well-defined, I don’t see *any* good reason for
> it to be phased out.

Well, we have the zpool code and we know it works.  If a later need for
the zpool layer is demonstrated then we can unremove the code at that
time.



^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-09-04 11:26     ` David Hildenbrand
@ 2025-09-05  5:36       ` Vitaly Wool
  0 siblings, 0 replies; 21+ messages in thread
From: Vitaly Wool @ 2025-09-05  5:36 UTC (permalink / raw)
  To: David Hildenbrand, Vlastimil Babka, hannes
  Cc: linux-kernel, linux-mm, Andrew Morton, Christoph Hellwig



On 9/4/25 13:26, David Hildenbrand wrote:
> On 04.09.25 12:13, Vlastimil Babka wrote:
>> On 9/4/25 11:33, Vitaly Wool wrote:
>>>> With zswap using zsmalloc directly, there are no more in-tree users of
>>>> this code. Remove it.
>>>>
>>>> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
>>>
>>> Per the previous discussions, this gets a *NACK* from my side. There is
>>> hardly anything _technical_ preventing new in-tree users of zpool API.
>>> zpool API is neutral and well-defined, I don’t see *any* good reason for
>>> it to be phased out.
>>
>> AFAIK it's a policy that unused code should be removed ASAP. And 
>> that's the
>> case for zpool after Patch 1, no? It could be different if another 
>> user was
>> about to be merged (to avoid unnecessary churn), but that doesn't seem 
>> the
>> case for zblock?
> 
> Right, and
> 
>   13 files changed, 84 insertions(+), 715 deletions(-)
> 
> speaks for itself if there is no new user anticipated.

Well, there surely is.

> IIRC, we did a similar approach when we removed frontswap.
> 



^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-09-04 23:47   ` Andrew Morton
@ 2025-09-05  5:42     ` Vitaly Wool
  2025-09-05 18:30       ` Andrew Morton
  0 siblings, 1 reply; 21+ messages in thread
From: Vitaly Wool @ 2025-09-05  5:42 UTC (permalink / raw)
  To: Andrew Morton; +Cc: hannes, linux-kernel, Vlastimil Babka, linux-mm



On 9/5/25 01:47, Andrew Morton wrote:
> On Thu,  4 Sep 2025 11:33:24 +0200 Vitaly Wool <vitaly.wool@konsulko.se> wrote:
> 
>>> With zswap using zsmalloc directly, there are no more in-tree users of
>>> this code. Remove it.
>>>
>>> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
>>
>> Per the previous discussions, this gets a *NACK* from my side. There is
>> hardly anything _technical_ preventing new in-tree users of zpool API.
>> zpool API is neutral and well-defined, I don’t see *any* good reason for
>> it to be phased out.
> 
> Well, we have the zpool code and we know it works.  If a later need for
> the zpool layer is demonstrated then we can unremove the code at that
> time.

The whole patchset [1] depends on zpool, with the whole intention to use 
it on the Rust side.

[1] https://lkml.org/lkml/2025/8/23/232




^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-09-04 14:11     ` Vitaly Wool
@ 2025-09-05  7:03       ` Vlastimil Babka
  2025-09-05 18:02       ` Nhat Pham
  1 sibling, 0 replies; 21+ messages in thread
From: Vlastimil Babka @ 2025-09-05  7:03 UTC (permalink / raw)
  To: Vitaly Wool, hannes
  Cc: linux-kernel, linux-mm, Andrew Morton, Christoph Hellwig

On 9/4/25 16:11, Vitaly Wool wrote:
> On 9/4/25 12:13, Vlastimil Babka wrote:
>> On 9/4/25 11:33, Vitaly Wool wrote:
>>>> With zswap using zsmalloc directly, there are no more in-tree users of
>>>> this code. Remove it.
>>>>
>>>> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
>>>
> 
> Things keep changing, and some of the proven solutions may not be a good 
> fit moving forward. While not suggesting that we should have a handful 
> of zpool backends just for the sake of variety, I'd like to emphasize 
> that there are good reasons to have zblock (especially the Rust one), 
> and there are good reasons to keep zsmalloc. That leads to the 
> conclusion that zpool should stay.

Johannes already suggested it's possible to do that by reimplementing
zsmalloc APIs without the runtime switch layer and choosing at config time.
It would also mean zram would be able to switch.

Your argument was "zpool API is neutral and well-defined" but we don't
really care about API/KABI/etc stability inside the kernel:
Documentation/process/stable-api-nonsense.rst
So that's not a sufficient argument.


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-08-29 16:15 [PATCH 0/3] mm: remove zpool Johannes Weiner
                   ` (4 preceding siblings ...)
  2025-09-04  9:51 ` Vitaly Wool
@ 2025-09-05 17:52 ` Nhat Pham
  2025-09-05 19:45   ` Yosry Ahmed
  5 siblings, 1 reply; 21+ messages in thread
From: Nhat Pham @ 2025-09-05 17:52 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Yosry Ahmed, Chengming Zhou, linux-mm,
	linux-kernel

On Fri, Aug 29, 2025 at 9:22 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> zpool is an indirection layer for zswap to switch between multiple
> allocator backends at runtime. Since 6.15, zsmalloc is the only
> allocator left in-tree, so there is no point in keeping zpool around.
>

Taking a step back, even if we do have needs for multiple allocators
for different setups, having it runtime-selectable makes no sense.

If you just need one particular allocator per-set up, then it's better
select it at build/boot time. And we have ways to do it, without
adding 700+ LoCs boilerplate.

So:

Acked-by: Nhat Pham <nphamcs@gmail.com>


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-09-04 14:11     ` Vitaly Wool
  2025-09-05  7:03       ` Vlastimil Babka
@ 2025-09-05 18:02       ` Nhat Pham
  1 sibling, 0 replies; 21+ messages in thread
From: Nhat Pham @ 2025-09-05 18:02 UTC (permalink / raw)
  To: Vitaly Wool
  Cc: Vlastimil Babka, hannes, linux-kernel, linux-mm, Andrew Morton,
	Christoph Hellwig

On Thu, Sep 4, 2025 at 4:49 PM Vitaly Wool <vitaly.wool@konsulko.se> wrote:
>
>
>
> On 9/4/25 12:13, Vlastimil Babka wrote:
> > On 9/4/25 11:33, Vitaly Wool wrote:
> >>> With zswap using zsmalloc directly, there are no more in-tree users of
> >>> this code. Remove it.
> >>>
> >>> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> >>
> >> Per the previous discussions, this gets a *NACK* from my side. There is
> >> hardly anything _technical_ preventing new in-tree users of zpool API.
> >> zpool API is neutral and well-defined, I don’t see *any* good reason for
> >> it to be phased out.
> >
> > AFAIK it's a policy that unused code should be removed ASAP. And that's the
> > case for zpool after Patch 1, no? It could be different if another user was
> > about to be merged (to avoid unnecessary churn), but that doesn't seem the
> > case for zblock?
>
> For the C implementation of zblock, no. But there's another
> implementation which is in Rust and it's nearing the submission.
>
> > My concern would be if the removal breaks any existing installations relying
> > on zswap. Presumably not as a make oldconfig will produce a config where
> > nothing important is missing, and existing boot options such as
> > "zswap.zpool=" or attempts to write to in the init scripts to
> > "/sys/module/zswap/parameters/zpool" will cause some errors/noise but not
> > prevent booting correctly?
>
> I don't expect heavy breakage but misconfigurations will definitely occur.
> > I mean if we were paranoid and anticipated somebody would break their
> > booting if writing to /sys/module/zswap/parameters/zpool failed, we could
> > keep the file (for a while) and just produce a warning in dmesg that it's
> > deprecated and does nothing?
> >
> >  From Patch 1:
> >
> >> Note that this does not preclude future improvements and experiments
> >> with different allocation strategies. Should it become necessary, it's
> >> possible to provide an alternate implementation for the zsmalloc API,
> >> selectable at compile time. However, zsmalloc is also rather mature
> >> and feature rich, with years of widespread production exposure; it's
> >> encouraged to make incremental improvements rather than fork it.
> >
> > With my history of maintaining the slab allocators I can only support this
> > approach.
>
> There was the time when slab was the best option and it was more mature
> than slub, which is now the best and only option. Thus, the "maturity"
> point is indeed valid but not being backed by anything else it doesn't
> weigh too much. Besides, zsmalloc's production exposure from all I know
> is limited to the 4K page case, and zsmalloc is truly struggling when
> the system is configured for 16K pages.

That doesn't sound unfixable, if I recall our conversation correctly.
Perhaps all of this effort is better off being spent fixing zsmalloc's
inefficiencies :)

>
> Things keep changing, and some of the proven solutions may not be a good
> fit moving forward. While not suggesting that we should have a handful
> of zpool backends just for the sake of variety, I'd like to emphasize
> that there are good reasons to have zblock (especially the Rust one),
> and there are good reasons to keep zsmalloc. That leads to the
> conclusion that zpool should stay.

IMHO, the needs for multiple allocators do not necessitate the zpool API.

The zpool API is only needed if you want to switch the allocators
arbitrarily at runtime. This one is a much harder sell.

We can always add zblock, and select the backend via build options.
Overtime, as zsmalloc improves to acquire zblock's advances, or zblock
implements the missing features (migratability, compaction, etc.), we
can unify/remove one of them.


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-09-05  5:42     ` Vitaly Wool
@ 2025-09-05 18:30       ` Andrew Morton
  0 siblings, 0 replies; 21+ messages in thread
From: Andrew Morton @ 2025-09-05 18:30 UTC (permalink / raw)
  To: Vitaly Wool; +Cc: hannes, linux-kernel, Vlastimil Babka, linux-mm

On Fri, 5 Sep 2025 07:42:34 +0200 Vitaly Wool <vitaly.wool@konsulko.se> wrote:

> 
> 
> On 9/5/25 01:47, Andrew Morton wrote:
> > On Thu,  4 Sep 2025 11:33:24 +0200 Vitaly Wool <vitaly.wool@konsulko.se> wrote:
> > 
> >>> With zswap using zsmalloc directly, there are no more in-tree users of
> >>> this code. Remove it.
> >>>
> >>> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> >>
> >> Per the previous discussions, this gets a *NACK* from my side. There is
> >> hardly anything _technical_ preventing new in-tree users of zpool API.
> >> zpool API is neutral and well-defined, I don’t see *any* good reason for
> >> it to be phased out.
> > 
> > Well, we have the zpool code and we know it works.  If a later need for
> > the zpool layer is demonstrated then we can unremove the code at that
> > time.
> 
> The whole patchset [1] depends on zpool, with the whole intention to use 
> it on the Rust side.
> 
> [1] https://lkml.org/lkml/2025/8/23/232

Well, that puts a Rust wrapper around zpool.  But what user-visible
benefit does it (or shall it) enable?


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 1/3] mm: zswap: interact directly with zsmalloc
  2025-08-29 16:15 ` [PATCH 1/3] mm: zswap: interact directly with zsmalloc Johannes Weiner
@ 2025-09-05 18:53   ` Yosry Ahmed
  0 siblings, 0 replies; 21+ messages in thread
From: Yosry Ahmed @ 2025-09-05 18:53 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Nhat Pham, Chengming Zhou, linux-mm, linux-kernel

On Fri, Aug 29, 2025 at 05:15:26PM +0100, Johannes Weiner wrote:
> zswap goes through the zpool layer to enable runtime-switching of
> allocator backends for compressed data. However, since zbud and z3fold
> were removed in 6.15, zsmalloc has been the only option available.
> 
> As such, the zpool indirection is unnecessary. Make zswap deal with
> zsmalloc directly. This is comparable to zram, which also directly
> interacts with zsmalloc and has never supported a different backend.
> 
> Note that this does not preclude future improvements and experiments
> with different allocation strategies. Should it become necessary, it's
> possible to provide an alternate implementation for the zsmalloc API,
> selectable at compile time. However, zsmalloc is also rather mature
> and feature rich, with years of widespread production exposure; it's
> encouraged to make incremental improvements rather than fork it.
> 
> In any case, the complexity of runtime pluggability seems excessive
> and unjustified at this time. Switch zswap to zsmalloc to remove the
> last user of the zpool API.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> ---
[..]
> @@ -315,52 +292,29 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
>  error:
>  	if (pool->acomp_ctx)
>  		free_percpu(pool->acomp_ctx);
> -	if (pool->zpool)
> -		zpool_destroy_pool(pool->zpool);
> +	if (pool->zs_pool)
> +		zs_destroy_pool(pool->zs_pool);
>  	kfree(pool);
>  	return NULL;
>  }
>  
>  static struct zswap_pool *__zswap_pool_create_fallback(void)
>  {
> -	bool has_comp, has_zpool;
> -
> -	has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
> -	if (!has_comp && strcmp(zswap_compressor,
> -				CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
> +	if (!crypto_has_acomp(zswap_compressor, 0, 0) &&
> +	    strcmp(zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
>  		pr_err("compressor %s not available, using default %s\n",
>  		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
>  		param_free_charp(&zswap_compressor);
>  		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
> -		has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
> -	}
> -	if (!has_comp) {
> -		pr_err("default compressor %s not available\n",
> -		       zswap_compressor);
> -		param_free_charp(&zswap_compressor);
> -		zswap_compressor = ZSWAP_PARAM_UNSET;
> -	}
> -
> -	has_zpool = zpool_has_pool(zswap_zpool_type);
> -	if (!has_zpool && strcmp(zswap_zpool_type,
> -				 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
> -		pr_err("zpool %s not available, using default %s\n",
> -		       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
> -		param_free_charp(&zswap_zpool_type);
> -		zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
> -		has_zpool = zpool_has_pool(zswap_zpool_type);
> -	}
> -	if (!has_zpool) {
> -		pr_err("default zpool %s not available\n",
> -		       zswap_zpool_type);
> -		param_free_charp(&zswap_zpool_type);
> -		zswap_zpool_type = ZSWAP_PARAM_UNSET;
> +		if (!crypto_has_acomp(zswap_compressor, 0, 0)) {
> +			pr_err("default compressor %s not available\n",
> +			       zswap_compressor);
> +			zswap_compressor = ZSWAP_PARAM_UNSET;
> +			return NULL;
> +		}

Hmm it seems like there may be a change of behavior here. If
zswap_compressor == CONFIG_ZSWAP_COMPRESSOR_DEFAULT at the beginning and
crypto_has_acomp() returns false, the old code will go into the second
if (!has_comp) block, printing an error, freeing the string, and setting
zswap_compressor to ZSWAP_PARAM_UNSET, then we eventually return NULL.

It seems like the new code will just call zswap_pool_create() anyway.

Am I missing something here?


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 2/3] mm: remove unused zpool layer
  2025-08-29 16:15 ` [PATCH 2/3] mm: remove unused zpool layer Johannes Weiner
  2025-08-29 19:07   ` SeongJae Park
@ 2025-09-05 18:58   ` Yosry Ahmed
  1 sibling, 0 replies; 21+ messages in thread
From: Yosry Ahmed @ 2025-09-05 18:58 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Nhat Pham, Chengming Zhou, linux-mm, linux-kernel

On Fri, Aug 29, 2025 at 05:15:27PM +0100, Johannes Weiner wrote:
> With zswap using zsmalloc directly, there are no more in-tree users of
> this code. Remove it.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> ---
[..]
> @@ -125,45 +122,18 @@ config ZSWAP_COMPRESSOR_DEFAULT
>         default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
>         default ""
>  
> -choice
> -	prompt "Default allocator"
> -	depends on ZSWAP
> -	default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU
> -	help
> -	  Selects the default allocator for the compressed cache for
> -	  swap pages.
> -	  The default is 'zbud' for compatibility, however please do
> -	  read the description of each of the allocators below before
> -	  making a right choice.
> -
> -	  The selection made here can be overridden by using the kernel
> -	  command line 'zswap.zpool=' option.
> +config ZSMALLOC
> +	tristate
>  
> -config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
> -	bool "zsmalloc"
> -	select ZSMALLOC
> -	help
> -	  Use the zsmalloc allocator as the default allocator.
> -endchoice
> +if ZSMALLOC
>  
> -config ZSWAP_ZPOOL_DEFAULT
> -       string
> -       depends on ZSWAP
> -       default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
> -       default ""
> +menu "Zsmalloc allocator options"
> +	depends on ZSMALLOC
>  
> -config ZSMALLOC
> -	tristate
> -	prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM)
> -	depends on MMU
> -	help
> -	  zsmalloc is a slab-based memory allocator designed to store
> -	  pages of various compression levels efficiently. It achieves
> -	  the highest storage density with the least amount of fragmentation.

Why remove the prompt and help text here?

> +comment "Zsmalloc is a common backend allocator for zswap & zram"
>  
>  config ZSMALLOC_STAT
>  	bool "Export zsmalloc statistics"
> -	depends on ZSMALLOC
>  	select DEBUG_FS
>  	help
>  	  This option enables code in the zsmalloc to collect various
[..]


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 3/3] mm: zpdesc: minor naming and comment corrections
  2025-08-29 16:15 ` [PATCH 3/3] mm: zpdesc: minor naming and comment corrections Johannes Weiner
@ 2025-09-05 19:05   ` Yosry Ahmed
  0 siblings, 0 replies; 21+ messages in thread
From: Yosry Ahmed @ 2025-09-05 19:05 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Nhat Pham, Chengming Zhou, linux-mm, linux-kernel

On Fri, Aug 29, 2025 at 05:15:28PM +0100, Johannes Weiner wrote:
> zpdesc is the page descriptor used by the zsmalloc backend allocator,
> which in turn is used by zswap and zram. The zpool layer is gone.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> ---

Why not rename zpdesc to zsdesc or zdesc while we're at it? It's a lot
of noise but it's just this file and zsmalloc.c IIUC. Up to you.

If it's just the comment changes I would squash it into the second
patch, but not a big deal. Either way:

Acked-by: Yosry Ahmed <yosry.ahmed@linux.dev>

>  mm/zpdesc.h | 14 +++++++-------
>  1 file changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/mm/zpdesc.h b/mm/zpdesc.h
> index 25bf5ea0beb8..b8258dc78548 100644
> --- a/mm/zpdesc.h
> +++ b/mm/zpdesc.h
> @@ -1,5 +1,5 @@
>  /* SPDX-License-Identifier: GPL-2.0 */
> -/* zpdesc.h: zswap.zpool memory descriptor
> +/* zpdesc.h: zsmalloc pool memory descriptor
>   *
>   * Written by Alex Shi <alexs@kernel.org>
>   *	      Hyeonggon Yoo <42.hyeyoo@gmail.com>
> @@ -11,14 +11,14 @@
>  #include <linux/pagemap.h>
>  
>  /*
> - * struct zpdesc -	Memory descriptor for zpool memory.
> + * struct zpdesc -	Memory descriptor for zsmalloc pool memory.
>   * @flags:		Page flags, mostly unused by zsmalloc.
>   * @lru:		Indirectly used by page migration.
>   * @movable_ops:	Used by page migration.
> - * @next:		Next zpdesc in a zspage in zsmalloc zpool.
> - * @handle:		For huge zspage in zsmalloc zpool.
> + * @next:		Next zpdesc in a zspage in zsmalloc pool.
> + * @handle:		For huge zspage in zsmalloc pool.
>   * @zspage:		Points to the zspage this zpdesc is a part of.
> - * @first_obj_offset:	First object offset in zsmalloc zpool.
> + * @first_obj_offset:	First object offset in zsmalloc pool.
>   * @_refcount:		The number of references to this zpdesc.
>   *
>   * This struct overlays struct page for now. Do not modify without a good
> @@ -79,8 +79,8 @@ static_assert(sizeof(struct zpdesc) <= sizeof(struct page));
>   * zpdesc_folio - The folio allocated for a zpdesc
>   * @zp: The zpdesc.
>   *
> - * Zpdescs are descriptors for zpool memory. The zpool memory itself is
> - * allocated as folios that contain the zpool objects, and zpdesc uses specific
> + * Zpdescs are descriptors for zsmalloc memory. The memory itself is allocated
> + * as folios that contain the zsmalloc objects, and zpdesc uses specific
>   * fields in the first struct page of the folio - those fields are now accessed
>   * by struct zpdesc.
>   *
> -- 
> 2.51.0
> 


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/3] mm: remove zpool
  2025-09-05 17:52 ` Nhat Pham
@ 2025-09-05 19:45   ` Yosry Ahmed
  0 siblings, 0 replies; 21+ messages in thread
From: Yosry Ahmed @ 2025-09-05 19:45 UTC (permalink / raw)
  To: Nhat Pham, Johannes Weiner
  Cc: Andrew Morton, Chengming Zhou, linux-mm, linux-kernel

On Fri, Sep 05, 2025 at 10:52:18AM -0700, Nhat Pham wrote:
> On Fri, Aug 29, 2025 at 9:22 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
> >
> > zpool is an indirection layer for zswap to switch between multiple
> > allocator backends at runtime. Since 6.15, zsmalloc is the only
> > allocator left in-tree, so there is no point in keeping zpool around.
> >
> 
> Taking a step back, even if we do have needs for multiple allocators
> for different setups, having it runtime-selectable makes no sense.

Honestly I think we should take it a step further and make the
compressor selection only at build/boot time and completely get rid of
supporting having multiple pools. We'd create one pool at initilization
and that would be it.

I believe this will simplify things considerably, and I doubt changing
the compressor at runtime has a valid use case beyond experimentation.

WDYT?


^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2025-09-05 19:45 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-29 16:15 [PATCH 0/3] mm: remove zpool Johannes Weiner
2025-08-29 16:15 ` [PATCH 1/3] mm: zswap: interact directly with zsmalloc Johannes Weiner
2025-09-05 18:53   ` Yosry Ahmed
2025-08-29 16:15 ` [PATCH 2/3] mm: remove unused zpool layer Johannes Weiner
2025-08-29 19:07   ` SeongJae Park
2025-09-05 18:58   ` Yosry Ahmed
2025-08-29 16:15 ` [PATCH 3/3] mm: zpdesc: minor naming and comment corrections Johannes Weiner
2025-09-05 19:05   ` Yosry Ahmed
2025-09-04  9:33 ` [PATCH 0/3] mm: remove zpool Vitaly Wool
2025-09-04 10:13   ` Vlastimil Babka
2025-09-04 11:26     ` David Hildenbrand
2025-09-05  5:36       ` Vitaly Wool
2025-09-04 14:11     ` Vitaly Wool
2025-09-05  7:03       ` Vlastimil Babka
2025-09-05 18:02       ` Nhat Pham
2025-09-04 23:47   ` Andrew Morton
2025-09-05  5:42     ` Vitaly Wool
2025-09-05 18:30       ` Andrew Morton
2025-09-04  9:51 ` Vitaly Wool
2025-09-05 17:52 ` Nhat Pham
2025-09-05 19:45   ` Yosry Ahmed

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).