* [PATCH v5 01/21] mm/zsmalloc: add zpdesc memory descriptor for zswap.zpool
2024-08-06 2:22 ` alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 02/21] mm/zsmalloc: use zpdesc in trylock_zspage()/lock_zspage() alexs
` (20 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Alex Shi <alexs@kernel.org>
The 1st patch introduces new memory decriptor zpdesc and rename
zspage.first_page to zspage.first_zpdesc, no functional change.
We removed PG_owner_priv_1 since it was moved to zspage after
commit a41ec880aa7b ("zsmalloc: move huge compressed obj from
page to zspage").
And keep the memcg_data member, since as Yosry pointed out:
"When the pages are freed, put_page() -> folio_put() -> __folio_put() will call
mem_cgroup_uncharge(). The latter will call folio_memcg() (which reads
folio->memcg_data) to figure out if uncharging needs to be done.
There are also other similar code paths that will check
folio->memcg_data. It is currently expected to be present for all
folios. So until we have custom code paths per-folio type for
allocation/freeing/etc, we need to keep folio->memcg_data present and
properly initialized."
Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zpdesc.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++
mm/zsmalloc.c | 25 +++++++++---------
2 files changed, 83 insertions(+), 13 deletions(-)
create mode 100644 mm/zpdesc.h
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
new file mode 100644
index 000000000000..0df54a5b1670
--- /dev/null
+++ b/mm/zpdesc.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* zpdesc.h: zswap.zpool memory descriptor
+ *
+ * Written by Alex Shi <alexs@kernel.org>
+ * Hyeonggon Yoo <42.hyeyoo@gmail.com>
+ */
+#ifndef __MM_ZPDESC_H__
+#define __MM_ZPDESC_H__
+
+/*
+ * struct zpdesc - Memory descriptor for zpool memory, now is for zsmalloc
+ * @flags: Page flags, PG_private: identifies the first component page
+ * @lru: Indirectly used by page migration
+ * @mops: Used by page migration
+ * @next: Next zpdesc in a zspage in zsmalloc zpool
+ * @handle: For huge zspage in zsmalloc zpool
+ * @zspage: Points to the zspage this zpdesc is a part of
+ * @first_obj_offset: First object offset in zsmalloc zpool
+ * @_refcount: Indirectly use by page migration
+ * @memcg_data: Memory Control Group data.
+ *
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
+ */
+struct zpdesc {
+ unsigned long flags;
+ struct list_head lru;
+ struct movable_operations *mops;
+ union {
+ /* Next zpdescs in a zspage in zsmalloc zpool */
+ struct zpdesc *next;
+ /* For huge zspage in zsmalloc zpool */
+ unsigned long handle;
+ };
+ struct zspage *zspage;
+ unsigned int first_obj_offset;
+ atomic_t _refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
+#endif
+};
+#define ZPDESC_MATCH(pg, zp) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct zpdesc, zp))
+
+ZPDESC_MATCH(flags, flags);
+ZPDESC_MATCH(lru, lru);
+ZPDESC_MATCH(mapping, mops);
+ZPDESC_MATCH(index, next);
+ZPDESC_MATCH(index, handle);
+ZPDESC_MATCH(private, zspage);
+ZPDESC_MATCH(page_type, first_obj_offset);
+ZPDESC_MATCH(_refcount, _refcount);
+#ifdef CONFIG_MEMCG
+ZPDESC_MATCH(memcg_data, memcg_data);
+#endif
+#undef ZPDESC_MATCH
+static_assert(sizeof(struct zpdesc) <= sizeof(struct page));
+
+#define zpdesc_page(zp) (_Generic((zp), \
+ const struct zpdesc *: (const struct page *)(zp), \
+ struct zpdesc *: (struct page *)(zp)))
+
+#define zpdesc_folio(zp) (_Generic((zp), \
+ const struct zpdesc *: (const struct folio *)(zp), \
+ struct zpdesc *: (struct folio *)(zp)))
+
+#define page_zpdesc(p) (_Generic((p), \
+ const struct page *: (const struct zpdesc *)(p), \
+ struct page *: (struct zpdesc *)(p)))
+
+#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 5d6581ab7c07..30f0a7abbda3 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -13,20 +13,18 @@
/*
* Following is how we use various fields and flags of underlying
- * struct page(s) to form a zspage.
+ * struct zpdesc(page) to form a zspage.
*
- * Usage of struct page fields:
- * page->private: points to zspage
- * page->index: links together all component pages of a zspage
+ * Usage of struct zpdesc fields:
+ * zpdesc->zspage: points to zspage
+ * zpdesc->next: links together all component pages of a zspage
* For the huge page, this is always 0, so we use this field
* to store handle.
- * page->page_type: PG_zsmalloc, lower 16 bit locate the first object
- * offset in a subpage of a zspage
+ * zpdesc->first_obj_offset: PG_zsmalloc, lower 16 bit locate the first
+ * object offset in a subpage of a zspage
*
- * Usage of struct page flags:
+ * Usage of struct zpdesc(page) flags:
* PG_private: identifies the first component page
- * PG_owner_priv_1: identifies the huge component page
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -64,6 +62,7 @@
#include <linux/pagemap.h>
#include <linux/fs.h>
#include <linux/local_lock.h>
+#include "zpdesc.h"
#define ZSPAGE_MAGIC 0x58
@@ -253,7 +252,7 @@ struct zspage {
};
unsigned int inuse;
unsigned int freeobj;
- struct page *first_page;
+ struct zpdesc *first_zpdesc;
struct list_head list; /* fullness list */
struct zs_pool *pool;
rwlock_t lock;
@@ -448,7 +447,7 @@ static inline void mod_zspage_inuse(struct zspage *zspage, int val)
static inline struct page *get_first_page(struct zspage *zspage)
{
- struct page *first_page = zspage->first_page;
+ struct page *first_page = zpdesc_page(zspage->first_zpdesc);
VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
return first_page;
@@ -948,7 +947,7 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage,
set_page_private(page, (unsigned long)zspage);
page->index = 0;
if (i == 0) {
- zspage->first_page = page;
+ zspage->first_zpdesc = page_zpdesc(page);
SetPagePrivate(page);
if (unlikely(class->objs_per_zspage == 1 &&
class->pages_per_zspage == 1))
@@ -1324,7 +1323,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
link->handle = handle | OBJ_ALLOCATED_TAG;
else
/* record handle to page->index */
- zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
+ zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG;
kunmap_atomic(vaddr);
mod_zspage_inuse(zspage, 1);
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 02/21] mm/zsmalloc: use zpdesc in trylock_zspage()/lock_zspage()
2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 01/21] " alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 03/21] mm/zsmalloc: convert __zs_map_object/__zs_unmap_object to use zpdesc alexs
` (19 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Alex Shi <alexs@kernel.org>
To use zpdesc in trylock_zspage()/lock_zspage() funcs, we add couple of helpers:
zpdesc_lock()/zpdesc_unlock()/zpdesc_trylock()/zpdesc_wait_locked() and
zpdesc_get()/zpdesc_put() for this purpose.
Here we use the folio series func in guts for 2 reasons, one zswap.zpool
only get single page, and use folio could save some compound_head checking;
two, folio_put could bypass devmap checking that we don't need.
BTW, thanks Intel LKP found a build warning on the patch.
Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zpdesc.h | 30 ++++++++++++++++++++++++
mm/zsmalloc.c | 64 ++++++++++++++++++++++++++++++++++-----------------
2 files changed, 73 insertions(+), 21 deletions(-)
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index 0df54a5b1670..421eeeef6f8f 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -68,4 +68,34 @@ static_assert(sizeof(struct zpdesc) <= sizeof(struct page));
const struct page *: (const struct zpdesc *)(p), \
struct page *: (struct zpdesc *)(p)))
+static inline void zpdesc_lock(struct zpdesc *zpdesc)
+{
+ folio_lock(zpdesc_folio(zpdesc));
+}
+
+static inline bool zpdesc_trylock(struct zpdesc *zpdesc)
+{
+ return folio_trylock(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_unlock(struct zpdesc *zpdesc)
+{
+ folio_unlock(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_wait_locked(struct zpdesc *zpdesc)
+{
+ folio_wait_locked(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_get(struct zpdesc *zpdesc)
+{
+ folio_get(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_put(struct zpdesc *zpdesc)
+{
+ folio_put(zpdesc_folio(zpdesc));
+}
+
#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 30f0a7abbda3..25c90224f21f 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -433,13 +433,17 @@ static __maybe_unused int is_first_page(struct page *page)
return PagePrivate(page);
}
+static inline bool is_first_zpdesc(struct zpdesc *zpdesc)
+{
+ return PagePrivate(zpdesc_page(zpdesc));
+}
+
/* Protected by class->lock */
static inline int get_zspage_inuse(struct zspage *zspage)
{
return zspage->inuse;
}
-
static inline void mod_zspage_inuse(struct zspage *zspage, int val)
{
zspage->inuse += val;
@@ -453,6 +457,14 @@ static inline struct page *get_first_page(struct zspage *zspage)
return first_page;
}
+static struct zpdesc *get_first_zpdesc(struct zspage *zspage)
+{
+ struct zpdesc *first_zpdesc = zspage->first_zpdesc;
+
+ VM_BUG_ON_PAGE(!is_first_zpdesc(first_zpdesc), zpdesc_page(first_zpdesc));
+ return first_zpdesc;
+}
+
#define FIRST_OBJ_PAGE_TYPE_MASK 0xffff
static inline void reset_first_obj_offset(struct page *page)
@@ -745,6 +757,16 @@ static struct page *get_next_page(struct page *page)
return (struct page *)page->index;
}
+static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc)
+{
+ struct zspage *zspage = get_zspage(zpdesc_page(zpdesc));
+
+ if (unlikely(ZsHugePage(zspage)))
+ return NULL;
+
+ return zpdesc->next;
+}
+
/**
* obj_to_location - get (<page>, <obj_idx>) from encoded object value
* @obj: the encoded object value
@@ -815,11 +837,11 @@ static void reset_page(struct page *page)
static int trylock_zspage(struct zspage *zspage)
{
- struct page *cursor, *fail;
+ struct zpdesc *cursor, *fail;
- for (cursor = get_first_page(zspage); cursor != NULL; cursor =
- get_next_page(cursor)) {
- if (!trylock_page(cursor)) {
+ for (cursor = get_first_zpdesc(zspage); cursor != NULL; cursor =
+ get_next_zpdesc(cursor)) {
+ if (!zpdesc_trylock(cursor)) {
fail = cursor;
goto unlock;
}
@@ -827,9 +849,9 @@ static int trylock_zspage(struct zspage *zspage)
return 1;
unlock:
- for (cursor = get_first_page(zspage); cursor != fail; cursor =
- get_next_page(cursor))
- unlock_page(cursor);
+ for (cursor = get_first_zpdesc(zspage); cursor != fail; cursor =
+ get_next_zpdesc(cursor))
+ zpdesc_unlock(cursor);
return 0;
}
@@ -1658,7 +1680,7 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
*/
static void lock_zspage(struct zspage *zspage)
{
- struct page *curr_page, *page;
+ struct zpdesc *curr_zpdesc, *zpdesc;
/*
* Pages we haven't locked yet can be migrated off the list while we're
@@ -1670,24 +1692,24 @@ static void lock_zspage(struct zspage *zspage)
*/
while (1) {
migrate_read_lock(zspage);
- page = get_first_page(zspage);
- if (trylock_page(page))
+ zpdesc = get_first_zpdesc(zspage);
+ if (zpdesc_trylock(zpdesc))
break;
- get_page(page);
+ zpdesc_get(zpdesc);
migrate_read_unlock(zspage);
- wait_on_page_locked(page);
- put_page(page);
+ zpdesc_wait_locked(zpdesc);
+ zpdesc_put(zpdesc);
}
- curr_page = page;
- while ((page = get_next_page(curr_page))) {
- if (trylock_page(page)) {
- curr_page = page;
+ curr_zpdesc = zpdesc;
+ while ((zpdesc = get_next_zpdesc(curr_zpdesc))) {
+ if (zpdesc_trylock(zpdesc)) {
+ curr_zpdesc = zpdesc;
} else {
- get_page(page);
+ zpdesc_get(zpdesc);
migrate_read_unlock(zspage);
- wait_on_page_locked(page);
- put_page(page);
+ zpdesc_wait_locked(zpdesc);
+ zpdesc_put(zpdesc);
migrate_read_lock(zspage);
}
}
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 03/21] mm/zsmalloc: convert __zs_map_object/__zs_unmap_object to use zpdesc
2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 01/21] " alexs
2024-08-06 2:22 ` [PATCH v5 02/21] mm/zsmalloc: use zpdesc in trylock_zspage()/lock_zspage() alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 04/21] mm/zsmalloc: add and use pfn/zpdesc seeking funcs alexs
` (18 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
These two functions take pointer to an array of struct page. Introduce
zpdesc_kmap_atomic() and make __zs_{map,unmap}_object() take pointer
to an array of zpdesc instead of page.
Add silly type casting when calling them. Casting will be removed late.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 25c90224f21f..b9b5e2824f2c 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -243,6 +243,11 @@ struct zs_pool {
atomic_t compaction_in_progress;
};
+static inline void *zpdesc_kmap_atomic(struct zpdesc *zpdesc)
+{
+ return kmap_atomic(zpdesc_page(zpdesc));
+}
+
struct zspage {
struct {
unsigned int huge:HUGE_BITS;
@@ -1061,7 +1066,7 @@ static inline void __zs_cpu_down(struct mapping_area *area)
}
static void *__zs_map_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
+ struct zpdesc *zpdescs[2], int off, int size)
{
int sizes[2];
void *addr;
@@ -1078,10 +1083,10 @@ static void *__zs_map_object(struct mapping_area *area,
sizes[1] = size - sizes[0];
/* copy object to per-cpu buffer */
- addr = kmap_atomic(pages[0]);
+ addr = zpdesc_kmap_atomic(zpdescs[0]);
memcpy(buf, addr + off, sizes[0]);
kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
+ addr = zpdesc_kmap_atomic(zpdescs[1]);
memcpy(buf + sizes[0], addr, sizes[1]);
kunmap_atomic(addr);
out:
@@ -1089,7 +1094,7 @@ static void *__zs_map_object(struct mapping_area *area,
}
static void __zs_unmap_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
+ struct zpdesc *zpdescs[2], int off, int size)
{
int sizes[2];
void *addr;
@@ -1108,10 +1113,10 @@ static void __zs_unmap_object(struct mapping_area *area,
sizes[1] = size - sizes[0];
/* copy per-cpu buffer to object */
- addr = kmap_atomic(pages[0]);
+ addr = zpdesc_kmap_atomic(zpdescs[0]);
memcpy(addr + off, buf, sizes[0]);
kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
+ addr = zpdesc_kmap_atomic(zpdescs[1]);
memcpy(addr, buf + sizes[0], sizes[1]);
kunmap_atomic(addr);
@@ -1252,7 +1257,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
pages[1] = get_next_page(page);
BUG_ON(!pages[1]);
- ret = __zs_map_object(area, pages, off, class->size);
+ ret = __zs_map_object(area, (struct zpdesc **)pages, off, class->size);
out:
if (likely(!ZsHugePage(zspage)))
ret += ZS_HANDLE_SIZE;
@@ -1287,7 +1292,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
pages[1] = get_next_page(page);
BUG_ON(!pages[1]);
- __zs_unmap_object(area, pages, off, class->size);
+ __zs_unmap_object(area, (struct zpdesc **)pages, off, class->size);
}
local_unlock(&zs_map_area.lock);
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 04/21] mm/zsmalloc: add and use pfn/zpdesc seeking funcs
2024-08-06 2:22 ` alexs
` (2 preceding siblings ...)
2024-08-06 2:22 ` [PATCH v5 03/21] mm/zsmalloc: convert __zs_map_object/__zs_unmap_object to use zpdesc alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 05/21] mm/zsmalloc: convert obj_malloc() to use zpdesc alexs
` (17 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Add pfn_zpdesc conversion, convert obj_to_location() to take zpdesc
and also convert its users to use zpdesc.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zpdesc.h | 9 +++++++
mm/zsmalloc.c | 75 ++++++++++++++++++++++++++-------------------------
2 files changed, 47 insertions(+), 37 deletions(-)
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index 421eeeef6f8f..2101de23d16d 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -98,4 +98,13 @@ static inline void zpdesc_put(struct zpdesc *zpdesc)
folio_put(zpdesc_folio(zpdesc));
}
+static inline unsigned long zpdesc_pfn(struct zpdesc *zpdesc)
+{
+ return page_to_pfn(zpdesc_page(zpdesc));
+}
+
+static inline struct zpdesc *pfn_zpdesc(unsigned long pfn)
+{
+ return page_zpdesc(pfn_to_page(pfn));
+}
#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b9b5e2824f2c..384a5ba49788 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -773,15 +773,15 @@ static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc)
}
/**
- * obj_to_location - get (<page>, <obj_idx>) from encoded object value
+ * obj_to_location - get (<zpdesc>, <obj_idx>) from encoded object value
* @obj: the encoded object value
- * @page: page object resides in zspage
+ * @zpdesc: zpdesc object resides in zspage
* @obj_idx: object index
*/
-static void obj_to_location(unsigned long obj, struct page **page,
+static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc,
unsigned int *obj_idx)
{
- *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
+ *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS);
*obj_idx = (obj & OBJ_INDEX_MASK);
}
@@ -1208,13 +1208,13 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm)
{
struct zspage *zspage;
- struct page *page;
+ struct zpdesc *zpdesc;
unsigned long obj, off;
unsigned int obj_idx;
struct size_class *class;
struct mapping_area *area;
- struct page *pages[2];
+ struct zpdesc *zpdescs[2];
void *ret;
/*
@@ -1227,8 +1227,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
/* It guarantees it can get zspage from handle safely */
read_lock(&pool->migrate_lock);
obj = handle_to_obj(handle);
- obj_to_location(obj, &page, &obj_idx);
- zspage = get_zspage(page);
+ obj_to_location(obj, &zpdesc, &obj_idx);
+ zspage = get_zspage(zpdesc_page(zpdesc));
/*
* migration cannot move any zpages in this zspage. Here, class->lock
@@ -1247,17 +1247,17 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
- area->vm_addr = kmap_atomic(page);
+ area->vm_addr = zpdesc_kmap_atomic(zpdesc);
ret = area->vm_addr + off;
goto out;
}
/* this object spans two pages */
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
+ zpdescs[0] = zpdesc;
+ zpdescs[1] = get_next_zpdesc(zpdesc);
+ BUG_ON(!zpdescs[1]);
- ret = __zs_map_object(area, (struct zpdesc **)pages, off, class->size);
+ ret = __zs_map_object(area, zpdescs, off, class->size);
out:
if (likely(!ZsHugePage(zspage)))
ret += ZS_HANDLE_SIZE;
@@ -1269,7 +1269,7 @@ EXPORT_SYMBOL_GPL(zs_map_object);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
{
struct zspage *zspage;
- struct page *page;
+ struct zpdesc *zpdesc;
unsigned long obj, off;
unsigned int obj_idx;
@@ -1277,8 +1277,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
struct mapping_area *area;
obj = handle_to_obj(handle);
- obj_to_location(obj, &page, &obj_idx);
- zspage = get_zspage(page);
+ obj_to_location(obj, &zpdesc, &obj_idx);
+ zspage = get_zspage(zpdesc_page(zpdesc));
class = zspage_class(pool, zspage);
off = offset_in_page(class->size * obj_idx);
@@ -1286,13 +1286,13 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
if (off + class->size <= PAGE_SIZE)
kunmap_atomic(area->vm_addr);
else {
- struct page *pages[2];
+ struct zpdesc *zpdescs[2];
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
+ zpdescs[0] = zpdesc;
+ zpdescs[1] = get_next_zpdesc(zpdesc);
+ BUG_ON(!zpdescs[1]);
- __zs_unmap_object(area, (struct zpdesc **)pages, off, class->size);
+ __zs_unmap_object(area, zpdescs, off, class->size);
}
local_unlock(&zs_map_area.lock);
@@ -1434,23 +1434,24 @@ static void obj_free(int class_size, unsigned long obj)
{
struct link_free *link;
struct zspage *zspage;
- struct page *f_page;
+ struct zpdesc *f_zpdesc;
unsigned long f_offset;
unsigned int f_objidx;
void *vaddr;
- obj_to_location(obj, &f_page, &f_objidx);
+
+ obj_to_location(obj, &f_zpdesc, &f_objidx);
f_offset = offset_in_page(class_size * f_objidx);
- zspage = get_zspage(f_page);
+ zspage = get_zspage(zpdesc_page(f_zpdesc));
- vaddr = kmap_atomic(f_page);
+ vaddr = zpdesc_kmap_atomic(f_zpdesc);
link = (struct link_free *)(vaddr + f_offset);
/* Insert this object in containing zspage's freelist */
if (likely(!ZsHugePage(zspage)))
link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
else
- f_page->index = 0;
+ f_zpdesc->next = NULL;
set_freeobj(zspage, f_objidx);
kunmap_atomic(vaddr);
@@ -1495,7 +1496,7 @@ EXPORT_SYMBOL_GPL(zs_free);
static void zs_object_copy(struct size_class *class, unsigned long dst,
unsigned long src)
{
- struct page *s_page, *d_page;
+ struct zpdesc *s_zpdesc, *d_zpdesc;
unsigned int s_objidx, d_objidx;
unsigned long s_off, d_off;
void *s_addr, *d_addr;
@@ -1504,8 +1505,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
s_size = d_size = class->size;
- obj_to_location(src, &s_page, &s_objidx);
- obj_to_location(dst, &d_page, &d_objidx);
+ obj_to_location(src, &s_zpdesc, &s_objidx);
+ obj_to_location(dst, &d_zpdesc, &d_objidx);
s_off = offset_in_page(class->size * s_objidx);
d_off = offset_in_page(class->size * d_objidx);
@@ -1516,8 +1517,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
if (d_off + class->size > PAGE_SIZE)
d_size = PAGE_SIZE - d_off;
- s_addr = kmap_atomic(s_page);
- d_addr = kmap_atomic(d_page);
+ s_addr = zpdesc_kmap_atomic(s_zpdesc);
+ d_addr = zpdesc_kmap_atomic(d_zpdesc);
while (1) {
size = min(s_size, d_size);
@@ -1542,17 +1543,17 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
if (s_off >= PAGE_SIZE) {
kunmap_atomic(d_addr);
kunmap_atomic(s_addr);
- s_page = get_next_page(s_page);
- s_addr = kmap_atomic(s_page);
- d_addr = kmap_atomic(d_page);
+ s_zpdesc = get_next_zpdesc(s_zpdesc);
+ s_addr = zpdesc_kmap_atomic(s_zpdesc);
+ d_addr = zpdesc_kmap_atomic(d_zpdesc);
s_size = class->size - written;
s_off = 0;
}
if (d_off >= PAGE_SIZE) {
kunmap_atomic(d_addr);
- d_page = get_next_page(d_page);
- d_addr = kmap_atomic(d_page);
+ d_zpdesc = get_next_zpdesc(d_zpdesc);
+ d_addr = zpdesc_kmap_atomic(d_zpdesc);
d_size = class->size - written;
d_off = 0;
}
@@ -1791,7 +1792,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
struct zs_pool *pool;
struct size_class *class;
struct zspage *zspage;
- struct page *dummy;
+ struct zpdesc *dummy;
void *s_addr, *d_addr, *addr;
unsigned int offset;
unsigned long handle;
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 05/21] mm/zsmalloc: convert obj_malloc() to use zpdesc
2024-08-06 2:22 ` alexs
` (3 preceding siblings ...)
2024-08-06 2:22 ` [PATCH v5 04/21] mm/zsmalloc: add and use pfn/zpdesc seeking funcs alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 06/21] mm/zsmalloc: convert create_page_chain() and its users " alexs
` (16 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Use get_first_zpdesc/get_next_zpdesc to replace
get_first_page/get_next_page. no functional change.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 384a5ba49788..7421d7678880 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1322,12 +1322,12 @@ EXPORT_SYMBOL_GPL(zs_huge_class_size);
static unsigned long obj_malloc(struct zs_pool *pool,
struct zspage *zspage, unsigned long handle)
{
- int i, nr_page, offset;
+ int i, nr_zpdesc, offset;
unsigned long obj;
struct link_free *link;
struct size_class *class;
- struct page *m_page;
+ struct zpdesc *m_zpdesc;
unsigned long m_offset;
void *vaddr;
@@ -1335,14 +1335,14 @@ static unsigned long obj_malloc(struct zs_pool *pool,
obj = get_freeobj(zspage);
offset = obj * class->size;
- nr_page = offset >> PAGE_SHIFT;
+ nr_zpdesc = offset >> PAGE_SHIFT;
m_offset = offset_in_page(offset);
- m_page = get_first_page(zspage);
+ m_zpdesc = get_first_zpdesc(zspage);
- for (i = 0; i < nr_page; i++)
- m_page = get_next_page(m_page);
+ for (i = 0; i < nr_zpdesc; i++)
+ m_zpdesc = get_next_zpdesc(m_zpdesc);
- vaddr = kmap_atomic(m_page);
+ vaddr = zpdesc_kmap_atomic(m_zpdesc);
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
if (likely(!ZsHugePage(zspage)))
@@ -1355,7 +1355,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
kunmap_atomic(vaddr);
mod_zspage_inuse(zspage, 1);
- obj = location_to_obj(m_page, obj);
+ obj = location_to_obj(zpdesc_page(m_zpdesc), obj);
record_obj(handle, obj);
return obj;
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 06/21] mm/zsmalloc: convert create_page_chain() and its users to use zpdesc
2024-08-06 2:22 ` alexs
` (4 preceding siblings ...)
2024-08-06 2:22 ` [PATCH v5 05/21] mm/zsmalloc: convert obj_malloc() to use zpdesc alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 07/21] mm/zsmalloc: convert obj_allocated() and related helpers " alexs
` (15 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Alex Shi <alexs@kernel.org>
Introduce a few helper functions for conversion to convert create_page_chain()
to use zpdesc, then use zpdesc in replace_sub_page() too.
Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zpdesc.h | 6 ++++
mm/zsmalloc.c | 95 ++++++++++++++++++++++++++++++---------------------
2 files changed, 62 insertions(+), 39 deletions(-)
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index 2101de23d16d..6a01e09e4beb 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -107,4 +107,10 @@ static inline struct zpdesc *pfn_zpdesc(unsigned long pfn)
{
return page_zpdesc(pfn_to_page(pfn));
}
+
+static inline void __zpdesc_set_movable(struct zpdesc *zpdesc,
+ const struct movable_operations *mops)
+{
+ __SetPageMovable(zpdesc_page(zpdesc), mops);
+}
#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 7421d7678880..33a4f7d026e3 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -248,6 +248,21 @@ static inline void *zpdesc_kmap_atomic(struct zpdesc *zpdesc)
return kmap_atomic(zpdesc_page(zpdesc));
}
+static inline void zpdesc_set_first(struct zpdesc *zpdesc)
+{
+ SetPagePrivate(zpdesc_page(zpdesc));
+}
+
+static inline void zpdesc_inc_zone_page_state(struct zpdesc *zpdesc)
+{
+ inc_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES);
+}
+
+static inline void zpdesc_dec_zone_page_state(struct zpdesc *zpdesc)
+{
+ dec_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES);
+}
+
struct zspage {
struct {
unsigned int huge:HUGE_BITS;
@@ -954,35 +969,35 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
}
static void create_page_chain(struct size_class *class, struct zspage *zspage,
- struct page *pages[])
+ struct zpdesc *zpdescs[])
{
int i;
- struct page *page;
- struct page *prev_page = NULL;
- int nr_pages = class->pages_per_zspage;
+ struct zpdesc *zpdesc;
+ struct zpdesc *prev_zpdesc = NULL;
+ int nr_zpdescs = class->pages_per_zspage;
/*
* Allocate individual pages and link them together as:
- * 1. all pages are linked together using page->index
- * 2. each sub-page point to zspage using page->private
+ * 1. all pages are linked together using zpdesc->next
+ * 2. each sub-page point to zspage using zpdesc->zspage
*
- * we set PG_private to identify the first page (i.e. no other sub-page
+ * we set PG_private to identify the first zpdesc (i.e. no other zpdesc
* has this flag set).
*/
- for (i = 0; i < nr_pages; i++) {
- page = pages[i];
- set_page_private(page, (unsigned long)zspage);
- page->index = 0;
+ for (i = 0; i < nr_zpdescs; i++) {
+ zpdesc = zpdescs[i];
+ zpdesc->zspage = zspage;
+ zpdesc->next = NULL;
if (i == 0) {
- zspage->first_zpdesc = page_zpdesc(page);
- SetPagePrivate(page);
+ zspage->first_zpdesc = zpdesc;
+ zpdesc_set_first(zpdesc);
if (unlikely(class->objs_per_zspage == 1 &&
class->pages_per_zspage == 1))
SetZsHugePage(zspage);
} else {
- prev_page->index = (unsigned long)page;
+ prev_zpdesc->next = zpdesc;
}
- prev_page = page;
+ prev_zpdesc = zpdesc;
}
}
@@ -994,7 +1009,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
gfp_t gfp)
{
int i;
- struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
+ struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE];
struct zspage *zspage = cache_alloc_zspage(pool, gfp);
if (!zspage)
@@ -1004,25 +1019,25 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
migrate_lock_init(zspage);
for (i = 0; i < class->pages_per_zspage; i++) {
- struct page *page;
+ struct zpdesc *zpdesc;
- page = alloc_page(gfp);
- if (!page) {
+ zpdesc = page_zpdesc(alloc_page(gfp));
+ if (!zpdesc) {
while (--i >= 0) {
- dec_zone_page_state(pages[i], NR_ZSPAGES);
- __ClearPageZsmalloc(pages[i]);
- __free_page(pages[i]);
+ zpdesc_dec_zone_page_state(zpdescs[i]);
+ __ClearPageZsmalloc(zpdesc_page(zpdescs[i]));
+ __free_page(zpdesc_page(zpdescs[i]));
}
cache_free_zspage(pool, zspage);
return NULL;
}
- __SetPageZsmalloc(page);
+ __SetPageZsmalloc(zpdesc_page(zpdesc));
- inc_zone_page_state(page, NR_ZSPAGES);
- pages[i] = page;
+ zpdesc_inc_zone_page_state(zpdesc);
+ zpdescs[i] = zpdesc;
}
- create_page_chain(class, zspage, pages);
+ create_page_chain(class, zspage, zpdescs);
init_zspage(class, zspage);
zspage->pool = pool;
zspage->class = class->index;
@@ -1753,26 +1768,28 @@ static void migrate_write_unlock(struct zspage *zspage)
static const struct movable_operations zsmalloc_mops;
static void replace_sub_page(struct size_class *class, struct zspage *zspage,
- struct page *newpage, struct page *oldpage)
+ struct zpdesc *newzpdesc, struct zpdesc *oldzpdesc)
{
- struct page *page;
- struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
+ struct zpdesc *zpdesc;
+ struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
+ unsigned int first_obj_offset;
int idx = 0;
- page = get_first_page(zspage);
+ zpdesc = get_first_zpdesc(zspage);
do {
- if (page == oldpage)
- pages[idx] = newpage;
+ if (zpdesc == oldzpdesc)
+ zpdescs[idx] = newzpdesc;
else
- pages[idx] = page;
+ zpdescs[idx] = zpdesc;
idx++;
- } while ((page = get_next_page(page)) != NULL);
+ } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL);
- create_page_chain(class, zspage, pages);
- set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
+ create_page_chain(class, zspage, zpdescs);
+ first_obj_offset = get_first_obj_offset(zpdesc_page(oldzpdesc));
+ set_first_obj_offset(zpdesc_page(newzpdesc), first_obj_offset);
if (unlikely(ZsHugePage(zspage)))
- newpage->index = oldpage->index;
- __SetPageMovable(newpage, &zsmalloc_mops);
+ newzpdesc->handle = oldzpdesc->handle;
+ __zpdesc_set_movable(newzpdesc, &zsmalloc_mops);
}
static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
@@ -1845,7 +1862,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
}
kunmap_atomic(s_addr);
- replace_sub_page(class, zspage, newpage, page);
+ replace_sub_page(class, zspage, page_zpdesc(newpage), page_zpdesc(page));
/*
* Since we complete the data copy and set up new zspage structure,
* it's okay to release migration_lock.
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 07/21] mm/zsmalloc: convert obj_allocated() and related helpers to use zpdesc
2024-08-06 2:22 ` alexs
` (5 preceding siblings ...)
2024-08-06 2:22 ` [PATCH v5 06/21] mm/zsmalloc: convert create_page_chain() and its users " alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 08/21] mm/zsmalloc: convert init_zspage() " alexs
` (14 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Convert obj_allocated(), and related helpers to take zpdesc. Also make
its callers to cast (struct page *) to (struct zpdesc *) when calling them.
The users will be converted gradually as there are many.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 33a4f7d026e3..3c1755227706 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -825,15 +825,15 @@ static unsigned long handle_to_obj(unsigned long handle)
return *(unsigned long *)handle;
}
-static inline bool obj_allocated(struct page *page, void *obj,
+static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj,
unsigned long *phandle)
{
unsigned long handle;
- struct zspage *zspage = get_zspage(page);
+ struct zspage *zspage = get_zspage(zpdesc_page(zpdesc));
if (unlikely(ZsHugePage(zspage))) {
- VM_BUG_ON_PAGE(!is_first_page(page), page);
- handle = page->index;
+ VM_BUG_ON_PAGE(!is_first_zpdesc(zpdesc), zpdesc_page(zpdesc));
+ handle = zpdesc->handle;
} else
handle = *(unsigned long *)obj;
@@ -1583,18 +1583,18 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
* return handle.
*/
static unsigned long find_alloced_obj(struct size_class *class,
- struct page *page, int *obj_idx)
+ struct zpdesc *zpdesc, int *obj_idx)
{
unsigned int offset;
int index = *obj_idx;
unsigned long handle = 0;
- void *addr = kmap_atomic(page);
+ void *addr = zpdesc_kmap_atomic(zpdesc);
- offset = get_first_obj_offset(page);
+ offset = get_first_obj_offset(zpdesc_page(zpdesc));
offset += class->size * index;
while (offset < PAGE_SIZE) {
- if (obj_allocated(page, addr + offset, &handle))
+ if (obj_allocated(zpdesc, addr + offset, &handle))
break;
offset += class->size;
@@ -1618,7 +1618,7 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
struct size_class *class = pool->size_class[src_zspage->class];
while (1) {
- handle = find_alloced_obj(class, s_page, &obj_idx);
+ handle = find_alloced_obj(class, page_zpdesc(s_page), &obj_idx);
if (!handle) {
s_page = get_next_page(s_page);
if (!s_page)
@@ -1851,7 +1851,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
addr += class->size) {
- if (obj_allocated(page, addr, &handle)) {
+ if (obj_allocated(page_zpdesc(page), addr, &handle)) {
old_obj = handle_to_obj(handle);
obj_to_location(old_obj, &dummy, &obj_idx);
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 08/21] mm/zsmalloc: convert init_zspage() to use zpdesc
2024-08-06 2:22 ` alexs
` (6 preceding siblings ...)
2024-08-06 2:22 ` [PATCH v5 07/21] mm/zsmalloc: convert obj_allocated() and related helpers " alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 09/21] mm/zsmalloc: convert obj_to_page() and zs_free() " alexs
` (13 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Replace get_first/next_page func series and kmap_atomic to new helper,
no functional change.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 3c1755227706..d7400acd0f01 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -928,16 +928,16 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
{
unsigned int freeobj = 1;
unsigned long off = 0;
- struct page *page = get_first_page(zspage);
+ struct zpdesc *zpdesc = get_first_zpdesc(zspage);
- while (page) {
- struct page *next_page;
+ while (zpdesc) {
+ struct zpdesc *next_zpdesc;
struct link_free *link;
void *vaddr;
- set_first_obj_offset(page, off);
+ set_first_obj_offset(zpdesc_page(zpdesc), off);
- vaddr = kmap_atomic(page);
+ vaddr = zpdesc_kmap_atomic(zpdesc);
link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
@@ -950,8 +950,8 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
* page, which must point to the first object on the next
* page (if present)
*/
- next_page = get_next_page(page);
- if (next_page) {
+ next_zpdesc = get_next_zpdesc(zpdesc);
+ if (next_zpdesc) {
link->next = freeobj++ << OBJ_TAG_BITS;
} else {
/*
@@ -961,7 +961,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
link->next = -1UL << OBJ_TAG_BITS;
}
kunmap_atomic(vaddr);
- page = next_page;
+ zpdesc = next_zpdesc;
off %= PAGE_SIZE;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 09/21] mm/zsmalloc: convert obj_to_page() and zs_free() to use zpdesc
2024-08-06 2:22 ` alexs
` (7 preceding siblings ...)
2024-08-06 2:22 ` [PATCH v5 08/21] mm/zsmalloc: convert init_zspage() " alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 10/21] mm/zsmalloc: add zpdesc_is_isolated()/zpdesc_zone() helper for zs_page_migrate() alexs
` (12 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Rename obj_to_page() to obj_to_zpdesc() and also convert it and
its user zs_free() to use zpdesc.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index d7400acd0f01..5fac2bb436f8 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -800,9 +800,9 @@ static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc,
*obj_idx = (obj & OBJ_INDEX_MASK);
}
-static void obj_to_page(unsigned long obj, struct page **page)
+static void obj_to_zpdesc(unsigned long obj, struct zpdesc **zpdesc)
{
- *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
+ *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS);
}
/**
@@ -1476,7 +1476,7 @@ static void obj_free(int class_size, unsigned long obj)
void zs_free(struct zs_pool *pool, unsigned long handle)
{
struct zspage *zspage;
- struct page *f_page;
+ struct zpdesc *f_zpdesc;
unsigned long obj;
struct size_class *class;
int fullness;
@@ -1490,8 +1490,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
*/
read_lock(&pool->migrate_lock);
obj = handle_to_obj(handle);
- obj_to_page(obj, &f_page);
- zspage = get_zspage(f_page);
+ obj_to_zpdesc(obj, &f_zpdesc);
+ zspage = get_zspage(zpdesc_page(f_zpdesc));
class = zspage_class(pool, zspage);
spin_lock(&class->lock);
read_unlock(&pool->migrate_lock);
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 10/21] mm/zsmalloc: add zpdesc_is_isolated()/zpdesc_zone() helper for zs_page_migrate()
2024-08-06 2:22 ` alexs
` (8 preceding siblings ...)
2024-08-06 2:22 ` [PATCH v5 09/21] mm/zsmalloc: convert obj_to_page() and zs_free() " alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 11/21] mm/zsmalloc: rename reset_page to reset_zpdesc and use zpdesc in it alexs
` (11 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
To convert page to zpdesc in zs_page_migrate(), we added
zpdesc_is_isolated()/zpdesc_zone() helpers. No functional change.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zpdesc.h | 11 +++++++++++
mm/zsmalloc.c | 30 ++++++++++++++++--------------
2 files changed, 27 insertions(+), 14 deletions(-)
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index 6a01e09e4beb..20881e7e42be 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -113,4 +113,15 @@ static inline void __zpdesc_set_movable(struct zpdesc *zpdesc,
{
__SetPageMovable(zpdesc_page(zpdesc), mops);
}
+
+static inline bool zpdesc_is_isolated(struct zpdesc *zpdesc)
+{
+ return PageIsolated(zpdesc_page(zpdesc));
+}
+
+static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc)
+{
+ return page_zone(zpdesc_page(zpdesc));
+}
+
#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 5fac2bb436f8..398b602a3b4d 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1810,19 +1810,21 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
struct size_class *class;
struct zspage *zspage;
struct zpdesc *dummy;
+ struct zpdesc *newzpdesc = page_zpdesc(newpage);
+ struct zpdesc *zpdesc = page_zpdesc(page);
void *s_addr, *d_addr, *addr;
unsigned int offset;
unsigned long handle;
unsigned long old_obj, new_obj;
unsigned int obj_idx;
- VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc));
/* We're committed, tell the world that this is a Zsmalloc page. */
- __SetPageZsmalloc(newpage);
+ __SetPageZsmalloc(zpdesc_page(newzpdesc));
/* The page is locked, so this pointer must remain valid */
- zspage = get_zspage(page);
+ zspage = get_zspage(zpdesc_page(zpdesc));
pool = zspage->pool;
/*
@@ -1839,30 +1841,30 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
/* the migrate_write_lock protects zpage access via zs_map_object */
migrate_write_lock(zspage);
- offset = get_first_obj_offset(page);
- s_addr = kmap_atomic(page);
+ offset = get_first_obj_offset(zpdesc_page(zpdesc));
+ s_addr = zpdesc_kmap_atomic(zpdesc);
/*
* Here, any user cannot access all objects in the zspage so let's move.
*/
- d_addr = kmap_atomic(newpage);
+ d_addr = zpdesc_kmap_atomic(newzpdesc);
copy_page(d_addr, s_addr);
kunmap_atomic(d_addr);
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
addr += class->size) {
- if (obj_allocated(page_zpdesc(page), addr, &handle)) {
+ if (obj_allocated(zpdesc, addr, &handle)) {
old_obj = handle_to_obj(handle);
obj_to_location(old_obj, &dummy, &obj_idx);
- new_obj = (unsigned long)location_to_obj(newpage,
+ new_obj = (unsigned long)location_to_obj(zpdesc_page(newzpdesc),
obj_idx);
record_obj(handle, new_obj);
}
}
kunmap_atomic(s_addr);
- replace_sub_page(class, zspage, page_zpdesc(newpage), page_zpdesc(page));
+ replace_sub_page(class, zspage, newzpdesc, zpdesc);
/*
* Since we complete the data copy and set up new zspage structure,
* it's okay to release migration_lock.
@@ -1871,14 +1873,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
spin_unlock(&class->lock);
migrate_write_unlock(zspage);
- get_page(newpage);
- if (page_zone(newpage) != page_zone(page)) {
- dec_zone_page_state(page, NR_ZSPAGES);
- inc_zone_page_state(newpage, NR_ZSPAGES);
+ zpdesc_get(newzpdesc);
+ if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) {
+ zpdesc_dec_zone_page_state(zpdesc);
+ zpdesc_inc_zone_page_state(newzpdesc);
}
reset_page(page);
- put_page(page);
+ zpdesc_put(zpdesc);
return MIGRATEPAGE_SUCCESS;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 11/21] mm/zsmalloc: rename reset_page to reset_zpdesc and use zpdesc in it
2024-08-06 2:22 ` alexs
` (9 preceding siblings ...)
2024-08-06 2:22 ` [PATCH v5 10/21] mm/zsmalloc: add zpdesc_is_isolated()/zpdesc_zone() helper for zs_page_migrate() alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:22 ` [PATCH v5 12/21] mm/zsmalloc: convert __free_zspage() to use zdsesc alexs
` (10 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Alex Shi <alexs@kernel.org>
zpdesc.zspage matches with page.private, zpdesc.next matches with
page.index. They will be reset in reset_page() wich is called prior to
free base pages of a zspage.
Use zpdesc to replace page struct and rename it to reset_zpdesc(), few
page helper still left since they are used too widely.
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 398b602a3b4d..f4fc79c0bef5 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -845,12 +845,14 @@ static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj,
return true;
}
-static void reset_page(struct page *page)
+static void reset_zpdesc(struct zpdesc *zpdesc)
{
+ struct page *page = zpdesc_page(zpdesc);
+
__ClearPageMovable(page);
ClearPagePrivate(page);
- set_page_private(page, 0);
- page->index = 0;
+ zpdesc->zspage = NULL;
+ zpdesc->next = NULL;
reset_first_obj_offset(page);
__ClearPageZsmalloc(page);
}
@@ -890,7 +892,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
do {
VM_BUG_ON_PAGE(!PageLocked(page), page);
next = get_next_page(page);
- reset_page(page);
+ reset_zpdesc(page_zpdesc(page));
unlock_page(page);
dec_zone_page_state(page, NR_ZSPAGES);
put_page(page);
@@ -1879,7 +1881,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
zpdesc_inc_zone_page_state(newzpdesc);
}
- reset_page(page);
+ reset_zpdesc(zpdesc);
zpdesc_put(zpdesc);
return MIGRATEPAGE_SUCCESS;
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 12/21] mm/zsmalloc: convert __free_zspage() to use zdsesc
2024-08-06 2:22 ` alexs
` (10 preceding siblings ...)
2024-08-06 2:22 ` [PATCH v5 11/21] mm/zsmalloc: rename reset_page to reset_zpdesc and use zpdesc in it alexs
@ 2024-08-06 2:22 ` alexs
2024-08-06 2:23 ` [PATCH v5 13/21] mm/zsmalloc: convert location_to_obj() to take zpdesc alexs
` (9 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:22 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Introduce zpdesc_is_locked() and convert __free_zspage() to use zpdesc.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zpdesc.h | 4 ++++
mm/zsmalloc.c | 20 ++++++++++----------
2 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index 20881e7e42be..640f33b5c8bd 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -124,4 +124,8 @@ static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc)
return page_zone(zpdesc_page(zpdesc));
}
+static inline bool zpdesc_is_locked(struct zpdesc *zpdesc)
+{
+ return PageLocked(zpdesc_page(zpdesc));
+}
#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index f4fc79c0bef5..4b948b84e7f3 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -881,23 +881,23 @@ static int trylock_zspage(struct zspage *zspage)
static void __free_zspage(struct zs_pool *pool, struct size_class *class,
struct zspage *zspage)
{
- struct page *page, *next;
+ struct zpdesc *zpdesc, *next;
assert_spin_locked(&class->lock);
VM_BUG_ON(get_zspage_inuse(zspage));
VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);
- next = page = get_first_page(zspage);
+ next = zpdesc = get_first_zpdesc(zspage);
do {
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- next = get_next_page(page);
- reset_zpdesc(page_zpdesc(page));
- unlock_page(page);
- dec_zone_page_state(page, NR_ZSPAGES);
- put_page(page);
- page = next;
- } while (page != NULL);
+ VM_BUG_ON_PAGE(!zpdesc_is_locked(zpdesc), zpdesc_page(zpdesc));
+ next = get_next_zpdesc(zpdesc);
+ reset_zpdesc(zpdesc);
+ zpdesc_unlock(zpdesc);
+ zpdesc_dec_zone_page_state(zpdesc);
+ zpdesc_put(zpdesc);
+ zpdesc = next;
+ } while (zpdesc != NULL);
cache_free_zspage(pool, zspage);
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 13/21] mm/zsmalloc: convert location_to_obj() to take zpdesc
2024-08-06 2:22 ` alexs
` (11 preceding siblings ...)
2024-08-06 2:22 ` [PATCH v5 12/21] mm/zsmalloc: convert __free_zspage() to use zdsesc alexs
@ 2024-08-06 2:23 ` alexs
2024-08-06 2:23 ` [PATCH v5 14/21] mm/zsmalloc: convert migrate_zspage() to use zpdesc alexs
` (8 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:23 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
As all users of location_to_obj() now use zpdesc, convert
location_to_obj() to take zpdesc.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 4b948b84e7f3..e6d1bd8969e5 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -806,15 +806,15 @@ static void obj_to_zpdesc(unsigned long obj, struct zpdesc **zpdesc)
}
/**
- * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
- * @page: page object resides in zspage
+ * location_to_obj - get obj value encoded from (<zpdesc>, <obj_idx>)
+ * @zpdesc: zpdesc object resides in zspage
* @obj_idx: object index
*/
-static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
+static unsigned long location_to_obj(struct zpdesc *zpdesc, unsigned int obj_idx)
{
unsigned long obj;
- obj = page_to_pfn(page) << OBJ_INDEX_BITS;
+ obj = zpdesc_pfn(zpdesc) << OBJ_INDEX_BITS;
obj |= obj_idx & OBJ_INDEX_MASK;
return obj;
@@ -1372,7 +1372,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
kunmap_atomic(vaddr);
mod_zspage_inuse(zspage, 1);
- obj = location_to_obj(zpdesc_page(m_zpdesc), obj);
+ obj = location_to_obj(m_zpdesc, obj);
record_obj(handle, obj);
return obj;
@@ -1859,8 +1859,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
old_obj = handle_to_obj(handle);
obj_to_location(old_obj, &dummy, &obj_idx);
- new_obj = (unsigned long)location_to_obj(zpdesc_page(newzpdesc),
- obj_idx);
+ new_obj = (unsigned long)location_to_obj(newzpdesc, obj_idx);
record_obj(handle, new_obj);
}
}
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 14/21] mm/zsmalloc: convert migrate_zspage() to use zpdesc
2024-08-06 2:22 ` alexs
` (12 preceding siblings ...)
2024-08-06 2:23 ` [PATCH v5 13/21] mm/zsmalloc: convert location_to_obj() to take zpdesc alexs
@ 2024-08-06 2:23 ` alexs
2024-08-06 2:23 ` [PATCH v5 15/21] mm/zsmalloc: convert get_zspage() to take zpdesc alexs
` (7 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:23 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Use get_first_zpdesc/get_next_zpdesc to replace get_first/next_page. No
functional change.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index e6d1bd8969e5..c8a533b516ea 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1616,14 +1616,14 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
unsigned long used_obj, free_obj;
unsigned long handle;
int obj_idx = 0;
- struct page *s_page = get_first_page(src_zspage);
+ struct zpdesc *s_zpdesc = get_first_zpdesc(src_zspage);
struct size_class *class = pool->size_class[src_zspage->class];
while (1) {
- handle = find_alloced_obj(class, page_zpdesc(s_page), &obj_idx);
+ handle = find_alloced_obj(class, s_zpdesc, &obj_idx);
if (!handle) {
- s_page = get_next_page(s_page);
- if (!s_page)
+ s_zpdesc = get_next_zpdesc(s_zpdesc);
+ if (!s_zpdesc)
break;
obj_idx = 0;
continue;
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 15/21] mm/zsmalloc: convert get_zspage() to take zpdesc
2024-08-06 2:22 ` alexs
` (13 preceding siblings ...)
2024-08-06 2:23 ` [PATCH v5 14/21] mm/zsmalloc: convert migrate_zspage() to use zpdesc alexs
@ 2024-08-06 2:23 ` alexs
2024-08-06 2:23 ` [PATCH v5 16/21] mm/zsmalloc: convert SetZsPageMovable and remove unused funcs alexs
` (6 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:23 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Now that all users except get_next_page() (which will be removed in
later patch) use zpdesc, convert get_zspage() to take zpdesc instead
of page.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c8a533b516ea..7ae98d048590 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -759,9 +759,9 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
return newfg;
}
-static struct zspage *get_zspage(struct page *page)
+static struct zspage *get_zspage(struct zpdesc *zpdesc)
{
- struct zspage *zspage = (struct zspage *)page_private(page);
+ struct zspage *zspage = zpdesc->zspage;
BUG_ON(zspage->magic != ZSPAGE_MAGIC);
return zspage;
@@ -769,7 +769,7 @@ static struct zspage *get_zspage(struct page *page)
static struct page *get_next_page(struct page *page)
{
- struct zspage *zspage = get_zspage(page);
+ struct zspage *zspage = get_zspage(page_zpdesc(page));
if (unlikely(ZsHugePage(zspage)))
return NULL;
@@ -779,7 +779,7 @@ static struct page *get_next_page(struct page *page)
static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc)
{
- struct zspage *zspage = get_zspage(zpdesc_page(zpdesc));
+ struct zspage *zspage = get_zspage(zpdesc);
if (unlikely(ZsHugePage(zspage)))
return NULL;
@@ -829,7 +829,7 @@ static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj,
unsigned long *phandle)
{
unsigned long handle;
- struct zspage *zspage = get_zspage(zpdesc_page(zpdesc));
+ struct zspage *zspage = get_zspage(zpdesc);
if (unlikely(ZsHugePage(zspage))) {
VM_BUG_ON_PAGE(!is_first_zpdesc(zpdesc), zpdesc_page(zpdesc));
@@ -1245,7 +1245,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
read_lock(&pool->migrate_lock);
obj = handle_to_obj(handle);
obj_to_location(obj, &zpdesc, &obj_idx);
- zspage = get_zspage(zpdesc_page(zpdesc));
+ zspage = get_zspage(zpdesc);
/*
* migration cannot move any zpages in this zspage. Here, class->lock
@@ -1295,7 +1295,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
obj = handle_to_obj(handle);
obj_to_location(obj, &zpdesc, &obj_idx);
- zspage = get_zspage(zpdesc_page(zpdesc));
+ zspage = get_zspage(zpdesc);
class = zspage_class(pool, zspage);
off = offset_in_page(class->size * obj_idx);
@@ -1459,7 +1459,7 @@ static void obj_free(int class_size, unsigned long obj)
obj_to_location(obj, &f_zpdesc, &f_objidx);
f_offset = offset_in_page(class_size * f_objidx);
- zspage = get_zspage(zpdesc_page(f_zpdesc));
+ zspage = get_zspage(f_zpdesc);
vaddr = zpdesc_kmap_atomic(f_zpdesc);
link = (struct link_free *)(vaddr + f_offset);
@@ -1493,7 +1493,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
read_lock(&pool->migrate_lock);
obj = handle_to_obj(handle);
obj_to_zpdesc(obj, &f_zpdesc);
- zspage = get_zspage(zpdesc_page(f_zpdesc));
+ zspage = get_zspage(f_zpdesc);
class = zspage_class(pool, zspage);
spin_lock(&class->lock);
read_unlock(&pool->migrate_lock);
@@ -1826,7 +1826,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
__SetPageZsmalloc(zpdesc_page(newzpdesc));
/* The page is locked, so this pointer must remain valid */
- zspage = get_zspage(zpdesc_page(zpdesc));
+ zspage = get_zspage(zpdesc);
pool = zspage->pool;
/*
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 16/21] mm/zsmalloc: convert SetZsPageMovable and remove unused funcs
2024-08-06 2:22 ` alexs
` (14 preceding siblings ...)
2024-08-06 2:23 ` [PATCH v5 15/21] mm/zsmalloc: convert get_zspage() to take zpdesc alexs
@ 2024-08-06 2:23 ` alexs
2024-08-06 2:23 ` [PATCH v5 17/21] mm/zsmalloc: convert get/set_first_obj_offset() to take zpdesc alexs
` (5 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:23 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Alex Shi <alexs@kernel.org>
Convert SetZsPageMovable() to use zpdesc, and then remove unused
funcs: get_next_page()/get_first_page()/is_first_page().
Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 33 +++++----------------------------
1 file changed, 5 insertions(+), 28 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 7ae98d048590..1c53a58e6473 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -448,11 +448,6 @@ static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
.lock = INIT_LOCAL_LOCK(lock),
};
-static __maybe_unused int is_first_page(struct page *page)
-{
- return PagePrivate(page);
-}
-
static inline bool is_first_zpdesc(struct zpdesc *zpdesc)
{
return PagePrivate(zpdesc_page(zpdesc));
@@ -469,14 +464,6 @@ static inline void mod_zspage_inuse(struct zspage *zspage, int val)
zspage->inuse += val;
}
-static inline struct page *get_first_page(struct zspage *zspage)
-{
- struct page *first_page = zpdesc_page(zspage->first_zpdesc);
-
- VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- return first_page;
-}
-
static struct zpdesc *get_first_zpdesc(struct zspage *zspage)
{
struct zpdesc *first_zpdesc = zspage->first_zpdesc;
@@ -767,16 +754,6 @@ static struct zspage *get_zspage(struct zpdesc *zpdesc)
return zspage;
}
-static struct page *get_next_page(struct page *page)
-{
- struct zspage *zspage = get_zspage(page_zpdesc(page));
-
- if (unlikely(ZsHugePage(zspage)))
- return NULL;
-
- return (struct page *)page->index;
-}
-
static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc)
{
struct zspage *zspage = get_zspage(zpdesc);
@@ -1950,13 +1927,13 @@ static void init_deferred_free(struct zs_pool *pool)
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
{
- struct page *page = get_first_page(zspage);
+ struct zpdesc *zpdesc = get_first_zpdesc(zspage);
do {
- WARN_ON(!trylock_page(page));
- __SetPageMovable(page, &zsmalloc_mops);
- unlock_page(page);
- } while ((page = get_next_page(page)) != NULL);
+ WARN_ON(!zpdesc_trylock(zpdesc));
+ __zpdesc_set_movable(zpdesc, &zsmalloc_mops);
+ zpdesc_unlock(zpdesc);
+ } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL);
}
#else
static inline void zs_flush_migration(struct zs_pool *pool) { }
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 17/21] mm/zsmalloc: convert get/set_first_obj_offset() to take zpdesc
2024-08-06 2:22 ` alexs
` (15 preceding siblings ...)
2024-08-06 2:23 ` [PATCH v5 16/21] mm/zsmalloc: convert SetZsPageMovable and remove unused funcs alexs
@ 2024-08-06 2:23 ` alexs
2024-08-06 2:23 ` [PATCH v5 18/21] mm/zsmalloc: introduce __zpdesc_clear_movable alexs
` (4 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:23 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Alex Shi <alexs@kernel.org>
Now that all users of get/set_first_obj_offset() are converted
to use zpdesc, convert them to take zpdesc.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 1c53a58e6473..e2cfee57a39a 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -474,26 +474,26 @@ static struct zpdesc *get_first_zpdesc(struct zspage *zspage)
#define FIRST_OBJ_PAGE_TYPE_MASK 0xffff
-static inline void reset_first_obj_offset(struct page *page)
+static inline void reset_first_obj_offset(struct zpdesc *zpdesc)
{
- VM_WARN_ON_ONCE(!PageZsmalloc(page));
- page->page_type |= FIRST_OBJ_PAGE_TYPE_MASK;
+ VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc)));
+ zpdesc->first_obj_offset |= FIRST_OBJ_PAGE_TYPE_MASK;
}
-static inline unsigned int get_first_obj_offset(struct page *page)
+static inline unsigned int get_first_obj_offset(struct zpdesc *zpdesc)
{
- VM_WARN_ON_ONCE(!PageZsmalloc(page));
- return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK;
+ VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc)));
+ return zpdesc->first_obj_offset & FIRST_OBJ_PAGE_TYPE_MASK;
}
-static inline void set_first_obj_offset(struct page *page, unsigned int offset)
+static inline void set_first_obj_offset(struct zpdesc *zpdesc, unsigned int offset)
{
/* With 16 bit available, we can support offsets into 64 KiB pages. */
BUILD_BUG_ON(PAGE_SIZE > SZ_64K);
- VM_WARN_ON_ONCE(!PageZsmalloc(page));
+ VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc)));
VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK);
- page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK;
- page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK;
+ zpdesc->first_obj_offset &= ~FIRST_OBJ_PAGE_TYPE_MASK;
+ zpdesc->first_obj_offset |= offset & FIRST_OBJ_PAGE_TYPE_MASK;
}
static inline unsigned int get_freeobj(struct zspage *zspage)
@@ -830,7 +830,7 @@ static void reset_zpdesc(struct zpdesc *zpdesc)
ClearPagePrivate(page);
zpdesc->zspage = NULL;
zpdesc->next = NULL;
- reset_first_obj_offset(page);
+ reset_first_obj_offset(zpdesc);
__ClearPageZsmalloc(page);
}
@@ -914,7 +914,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
struct link_free *link;
void *vaddr;
- set_first_obj_offset(zpdesc_page(zpdesc), off);
+ set_first_obj_offset(zpdesc, off);
vaddr = zpdesc_kmap_atomic(zpdesc);
link = (struct link_free *)vaddr + off / sizeof(*link);
@@ -1569,7 +1569,7 @@ static unsigned long find_alloced_obj(struct size_class *class,
unsigned long handle = 0;
void *addr = zpdesc_kmap_atomic(zpdesc);
- offset = get_first_obj_offset(zpdesc_page(zpdesc));
+ offset = get_first_obj_offset(zpdesc);
offset += class->size * index;
while (offset < PAGE_SIZE) {
@@ -1764,8 +1764,8 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
} while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL);
create_page_chain(class, zspage, zpdescs);
- first_obj_offset = get_first_obj_offset(zpdesc_page(oldzpdesc));
- set_first_obj_offset(zpdesc_page(newzpdesc), first_obj_offset);
+ first_obj_offset = get_first_obj_offset(oldzpdesc);
+ set_first_obj_offset(newzpdesc, first_obj_offset);
if (unlikely(ZsHugePage(zspage)))
newzpdesc->handle = oldzpdesc->handle;
__zpdesc_set_movable(newzpdesc, &zsmalloc_mops);
@@ -1820,7 +1820,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
/* the migrate_write_lock protects zpage access via zs_map_object */
migrate_write_lock(zspage);
- offset = get_first_obj_offset(zpdesc_page(zpdesc));
+ offset = get_first_obj_offset(zpdesc);
s_addr = zpdesc_kmap_atomic(zpdesc);
/*
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 18/21] mm/zsmalloc: introduce __zpdesc_clear_movable
2024-08-06 2:22 ` alexs
` (16 preceding siblings ...)
2024-08-06 2:23 ` [PATCH v5 17/21] mm/zsmalloc: convert get/set_first_obj_offset() to take zpdesc alexs
@ 2024-08-06 2:23 ` alexs
2024-08-06 2:23 ` [PATCH v5 19/21] mm/zsmalloc: introduce __zpdesc_clear/set_zsmalloc() alexs
` (3 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:23 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Alex Shi <alexs@kernel.org>
Add a helper __zpdesc_clear_movable() for __ClearPageMovable(), and use it
in callers to make code clear.
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zpdesc.h | 5 +++++
mm/zsmalloc.c | 2 +-
2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index 640f33b5c8bd..1ab47faed49b 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -114,6 +114,11 @@ static inline void __zpdesc_set_movable(struct zpdesc *zpdesc,
__SetPageMovable(zpdesc_page(zpdesc), mops);
}
+static inline void __zpdesc_clear_movable(struct zpdesc *zpdesc)
+{
+ __ClearPageMovable(zpdesc_page(zpdesc));
+}
+
static inline bool zpdesc_is_isolated(struct zpdesc *zpdesc)
{
return PageIsolated(zpdesc_page(zpdesc));
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index e2cfee57a39a..7c7432a00a07 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -826,7 +826,7 @@ static void reset_zpdesc(struct zpdesc *zpdesc)
{
struct page *page = zpdesc_page(zpdesc);
- __ClearPageMovable(page);
+ __zpdesc_clear_movable(zpdesc);
ClearPagePrivate(page);
zpdesc->zspage = NULL;
zpdesc->next = NULL;
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 19/21] mm/zsmalloc: introduce __zpdesc_clear/set_zsmalloc()
2024-08-06 2:22 ` alexs
` (17 preceding siblings ...)
2024-08-06 2:23 ` [PATCH v5 18/21] mm/zsmalloc: introduce __zpdesc_clear_movable alexs
@ 2024-08-06 2:23 ` alexs
2024-08-06 2:23 ` [PATCH v5 20/21] mm/zsmalloc: introduce zpdesc_clear_first() helper alexs
` (2 subsequent siblings)
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:23 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Alex Shi <alexs@kernel.org>
Add helper __zpdesc_clear_zsmalloc() for __ClearPageZsmalloc(),
__zpdesc_set_zsmalloc() for __SetPageZsmalloc(), and use them in
callers.
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zpdesc.h | 10 ++++++++++
mm/zsmalloc.c | 8 ++++----
2 files changed, 14 insertions(+), 4 deletions(-)
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index 1ab47faed49b..d0a1af0ee424 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -119,6 +119,16 @@ static inline void __zpdesc_clear_movable(struct zpdesc *zpdesc)
__ClearPageMovable(zpdesc_page(zpdesc));
}
+static inline void __zpdesc_set_zsmalloc(struct zpdesc *zpdesc)
+{
+ __SetPageZsmalloc(zpdesc_page(zpdesc));
+}
+
+static inline void __zpdesc_clear_zsmalloc(struct zpdesc *zpdesc)
+{
+ __ClearPageZsmalloc(zpdesc_page(zpdesc));
+}
+
static inline bool zpdesc_is_isolated(struct zpdesc *zpdesc)
{
return PageIsolated(zpdesc_page(zpdesc));
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 7c7432a00a07..0b318db894a8 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -831,7 +831,7 @@ static void reset_zpdesc(struct zpdesc *zpdesc)
zpdesc->zspage = NULL;
zpdesc->next = NULL;
reset_first_obj_offset(zpdesc);
- __ClearPageZsmalloc(page);
+ __zpdesc_clear_zsmalloc(zpdesc);
}
static int trylock_zspage(struct zspage *zspage)
@@ -1004,13 +1004,13 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
if (!zpdesc) {
while (--i >= 0) {
zpdesc_dec_zone_page_state(zpdescs[i]);
- __ClearPageZsmalloc(zpdesc_page(zpdescs[i]));
+ __zpdesc_clear_zsmalloc(zpdescs[i]);
__free_page(zpdesc_page(zpdescs[i]));
}
cache_free_zspage(pool, zspage);
return NULL;
}
- __SetPageZsmalloc(zpdesc_page(zpdesc));
+ __zpdesc_set_zsmalloc(zpdesc);
zpdesc_inc_zone_page_state(zpdesc);
zpdescs[i] = zpdesc;
@@ -1800,7 +1800,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc));
/* We're committed, tell the world that this is a Zsmalloc page. */
- __SetPageZsmalloc(zpdesc_page(newzpdesc));
+ __zpdesc_set_zsmalloc(newzpdesc);
/* The page is locked, so this pointer must remain valid */
zspage = get_zspage(zpdesc);
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 20/21] mm/zsmalloc: introduce zpdesc_clear_first() helper
2024-08-06 2:22 ` alexs
` (18 preceding siblings ...)
2024-08-06 2:23 ` [PATCH v5 19/21] mm/zsmalloc: introduce __zpdesc_clear/set_zsmalloc() alexs
@ 2024-08-06 2:23 ` alexs
2024-08-06 2:23 ` [PATCH v5 21/21] mm/zsmalloc: update comments for page->zpdesc changes alexs
[not found] ` <20240806123213.2a747a8321bdf452b3307fa9@linux-foundation.org>
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:23 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Alex Shi <alexs@kernel.org>
Like the zpdesc_set_first(), introduce zpdesc_clear_first() helper for
ClearPagePrivate(), then clean up a 'struct page' usage in
reset_zpdesc().
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 0b318db894a8..e92451c6b69c 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -253,6 +253,11 @@ static inline void zpdesc_set_first(struct zpdesc *zpdesc)
SetPagePrivate(zpdesc_page(zpdesc));
}
+static inline void zpdesc_clear_first(struct zpdesc *zpdesc)
+{
+ ClearPagePrivate(zpdesc_page(zpdesc));
+}
+
static inline void zpdesc_inc_zone_page_state(struct zpdesc *zpdesc)
{
inc_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES);
@@ -824,10 +829,8 @@ static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj,
static void reset_zpdesc(struct zpdesc *zpdesc)
{
- struct page *page = zpdesc_page(zpdesc);
-
__zpdesc_clear_movable(zpdesc);
- ClearPagePrivate(page);
+ zpdesc_clear_first(zpdesc);
zpdesc->zspage = NULL;
zpdesc->next = NULL;
reset_first_obj_offset(zpdesc);
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v5 21/21] mm/zsmalloc: update comments for page->zpdesc changes
2024-08-06 2:22 ` alexs
` (19 preceding siblings ...)
2024-08-06 2:23 ` [PATCH v5 20/21] mm/zsmalloc: introduce zpdesc_clear_first() helper alexs
@ 2024-08-06 2:23 ` alexs
[not found] ` <20240806123213.2a747a8321bdf452b3307fa9@linux-foundation.org>
21 siblings, 0 replies; 33+ messages in thread
From: alexs @ 2024-08-06 2:23 UTC (permalink / raw)
To: Vitaly Wool, Miaohe Lin, Andrew Morton, linux-kernel, linux-mm,
minchan, willy, senozhatsky, david, 42.hyeyoo, Yosry Ahmed,
nphamcs
Cc: Alex Shi
From: Alex Shi <alexs@kernel.org>
After the page to zpdesc conversion, there still left few comments or
function named with page not zpdesc, let's update the comments and
rename function create_page_chain() as create_zpdesc_chain().
Signed-off-by: Alex Shi <alexs@kernel.org>
---
mm/zsmalloc.c | 47 ++++++++++++++++++++++++++---------------------
1 file changed, 26 insertions(+), 21 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index e92451c6b69c..a3d9431d29ec 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -17,14 +17,16 @@
*
* Usage of struct zpdesc fields:
* zpdesc->zspage: points to zspage
- * zpdesc->next: links together all component pages of a zspage
+ * zpdesc->next: links together all component zpdescs of a zspage
* For the huge page, this is always 0, so we use this field
* to store handle.
* zpdesc->first_obj_offset: PG_zsmalloc, lower 16 bit locate the first
* object offset in a subpage of a zspage
*
* Usage of struct zpdesc(page) flags:
- * PG_private: identifies the first component page
+ * PG_private: identifies the first component zpdesc
+ * PG_lock: lock all component zpdescs for a zspage free, serialize with
+ * migration
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -191,7 +193,10 @@ struct size_class {
*/
int size;
int objs_per_zspage;
- /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
+ /*
+ * Number of PAGE_SIZE sized zpdescs/pages to combine to
+ * form a 'zspage'
+ */
int pages_per_zspage;
unsigned int index;
@@ -893,7 +898,7 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class,
/*
* Since zs_free couldn't be sleepable, this function cannot call
- * lock_page. The page locks trylock_zspage got will be released
+ * lock_page. The zpdesc locks trylock_zspage got will be released
* by __free_zspage.
*/
if (!trylock_zspage(zspage)) {
@@ -950,7 +955,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
set_freeobj(zspage, 0);
}
-static void create_page_chain(struct size_class *class, struct zspage *zspage,
+static void create_zpdesc_chain(struct size_class *class, struct zspage *zspage,
struct zpdesc *zpdescs[])
{
int i;
@@ -959,9 +964,9 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage,
int nr_zpdescs = class->pages_per_zspage;
/*
- * Allocate individual pages and link them together as:
- * 1. all pages are linked together using zpdesc->next
- * 2. each sub-page point to zspage using zpdesc->zspage
+ * Allocate individual zpdescs and link them together as:
+ * 1. all zpdescs are linked together using zpdesc->next
+ * 2. each sub-zpdesc point to zspage using zpdesc->zspage
*
* we set PG_private to identify the first zpdesc (i.e. no other zpdesc
* has this flag set).
@@ -1019,7 +1024,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
zpdescs[i] = zpdesc;
}
- create_page_chain(class, zspage, zpdescs);
+ create_zpdesc_chain(class, zspage, zpdescs);
init_zspage(class, zspage);
zspage->pool = pool;
zspage->class = class->index;
@@ -1346,7 +1351,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
/* record handle in the header of allocated chunk */
link->handle = handle | OBJ_ALLOCATED_TAG;
else
- /* record handle to page->index */
+ /* record handle to zpdesc->handle */
zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG;
kunmap_atomic(vaddr);
@@ -1679,19 +1684,19 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
#ifdef CONFIG_COMPACTION
/*
* To prevent zspage destroy during migration, zspage freeing should
- * hold locks of all pages in the zspage.
+ * hold locks of all component zpdesc in the zspage.
*/
static void lock_zspage(struct zspage *zspage)
{
struct zpdesc *curr_zpdesc, *zpdesc;
/*
- * Pages we haven't locked yet can be migrated off the list while we're
+ * Zpdesc we haven't locked yet can be migrated off the list while we're
* trying to lock them, so we need to be careful and only attempt to
- * lock each page under migrate_read_lock(). Otherwise, the page we lock
- * may no longer belong to the zspage. This means that we may wait for
- * the wrong page to unlock, so we must take a reference to the page
- * prior to waiting for it to unlock outside migrate_read_lock().
+ * lock each zpdesc under migrate_read_lock(). Otherwise, the zpdesc we
+ * lock may no longer belong to the zspage. This means that we may wait
+ * for the wrong zpdesc to unlock, so we must take a reference to the
+ * zpdesc prior to waiting for it to unlock outside migrate_read_lock().
*/
while (1) {
migrate_read_lock(zspage);
@@ -1766,7 +1771,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
idx++;
} while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL);
- create_page_chain(class, zspage, zpdescs);
+ create_zpdesc_chain(class, zspage, zpdescs);
first_obj_offset = get_first_obj_offset(oldzpdesc);
set_first_obj_offset(newzpdesc, first_obj_offset);
if (unlikely(ZsHugePage(zspage)))
@@ -1777,8 +1782,8 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
{
/*
- * Page is locked so zspage couldn't be destroyed. For detail, look at
- * lock_zspage in free_zspage.
+ * Page/zpdesc is locked so zspage couldn't be destroyed. For detail,
+ * look at lock_zspage in free_zspage.
*/
VM_BUG_ON_PAGE(PageIsolated(page), page);
@@ -1805,7 +1810,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
/* We're committed, tell the world that this is a Zsmalloc page. */
__zpdesc_set_zsmalloc(newzpdesc);
- /* The page is locked, so this pointer must remain valid */
+ /* The zpdesc/page is locked, so this pointer must remain valid */
zspage = get_zspage(zpdesc);
pool = zspage->pool;
@@ -1878,7 +1883,7 @@ static const struct movable_operations zsmalloc_mops = {
};
/*
- * Caller should hold page_lock of all pages in the zspage
+ * Caller should hold zpdesc locks of all in the zspage
* In here, we cannot use zspage meta data.
*/
static void async_free_zspage(struct work_struct *work)
--
2.43.0
^ permalink raw reply related [flat|nested] 33+ messages in thread[parent not found: <20240806123213.2a747a8321bdf452b3307fa9@linux-foundation.org>]