* [PATCH 1/2] erofs: get rid of `struct z_erofs_collector'
@ 2022-03-01 19:49 Gao Xiang
2022-03-01 19:49 ` [PATCH 2/2] erofs: clean up preload_compressed_pages() Gao Xiang
` (2 more replies)
0 siblings, 3 replies; 8+ messages in thread
From: Gao Xiang @ 2022-03-01 19:49 UTC (permalink / raw)
To: linux-erofs, Chao Yu; +Cc: LKML, Gao Xiang
Avoid `struct z_erofs_collector' since there is another context
structure called "struct z_erofs_decompress_frontend".
No logic changes.
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
---
fs/erofs/zdata.c | 163 ++++++++++++++++++++++-------------------------
1 file changed, 77 insertions(+), 86 deletions(-)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 423bc1a61da5..2673fc105861 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -192,7 +192,10 @@ enum z_erofs_collectmode {
COLLECT_PRIMARY_FOLLOWED,
};
-struct z_erofs_collector {
+struct z_erofs_decompress_frontend {
+ struct inode *const inode;
+ struct erofs_map_blocks map;
+
struct z_erofs_pagevec_ctor vector;
struct z_erofs_pcluster *pcl, *tailpcl;
@@ -202,13 +205,6 @@ struct z_erofs_collector {
z_erofs_next_pcluster_t owned_head;
enum z_erofs_collectmode mode;
-};
-
-struct z_erofs_decompress_frontend {
- struct inode *const inode;
-
- struct z_erofs_collector clt;
- struct erofs_map_blocks map;
bool readahead;
/* used for applying cache strategy on the fly */
@@ -216,30 +212,26 @@ struct z_erofs_decompress_frontend {
erofs_off_t headoffset;
};
-#define COLLECTOR_INIT() { \
- .owned_head = Z_EROFS_PCLUSTER_TAIL, \
- .mode = COLLECT_PRIMARY_FOLLOWED }
-
#define DECOMPRESS_FRONTEND_INIT(__i) { \
- .inode = __i, .clt = COLLECTOR_INIT(), \
- .backmost = true, }
+ .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
+ .mode = COLLECT_PRIMARY_FOLLOWED }
static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
static DEFINE_MUTEX(z_pagemap_global_lock);
-static void preload_compressed_pages(struct z_erofs_collector *clt,
+static void preload_compressed_pages(struct z_erofs_decompress_frontend *fe,
struct address_space *mc,
enum z_erofs_cache_alloctype type,
struct page **pagepool)
{
- struct z_erofs_pcluster *pcl = clt->pcl;
+ struct z_erofs_pcluster *pcl = fe->pcl;
bool standalone = true;
gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
struct page **pages;
pgoff_t index;
- if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
+ if (fe->mode < COLLECT_PRIMARY_FOLLOWED)
return;
pages = pcl->compressed_pages;
@@ -288,7 +280,7 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
* managed cache since it can be moved to the bypass queue instead.
*/
if (standalone)
- clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
+ fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
}
/* called by erofs_shrinker to get rid of all compressed_pages */
@@ -350,47 +342,47 @@ int erofs_try_to_free_cached_page(struct page *page)
}
/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
-static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
+static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
struct page *page)
{
- struct z_erofs_pcluster *const pcl = clt->pcl;
+ struct z_erofs_pcluster *const pcl = fe->pcl;
- while (clt->icpage_ptr > pcl->compressed_pages)
- if (!cmpxchg(--clt->icpage_ptr, NULL, page))
+ while (fe->icpage_ptr > pcl->compressed_pages)
+ if (!cmpxchg(--fe->icpage_ptr, NULL, page))
return true;
return false;
}
/* callers must be with collection lock held */
-static int z_erofs_attach_page(struct z_erofs_collector *clt,
+static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
struct page *page, enum z_erofs_page_type type,
bool pvec_safereuse)
{
int ret;
/* give priority for inplaceio */
- if (clt->mode >= COLLECT_PRIMARY &&
+ if (fe->mode >= COLLECT_PRIMARY &&
type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
- z_erofs_try_inplace_io(clt, page))
+ z_erofs_try_inplace_io(fe, page))
return 0;
- ret = z_erofs_pagevec_enqueue(&clt->vector, page, type,
+ ret = z_erofs_pagevec_enqueue(&fe->vector, page, type,
pvec_safereuse);
- clt->cl->vcnt += (unsigned int)ret;
+ fe->cl->vcnt += (unsigned int)ret;
return ret ? 0 : -EAGAIN;
}
-static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
+static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
{
- struct z_erofs_pcluster *pcl = clt->pcl;
- z_erofs_next_pcluster_t *owned_head = &clt->owned_head;
+ struct z_erofs_pcluster *pcl = f->pcl;
+ z_erofs_next_pcluster_t *owned_head = &f->owned_head;
/* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
*owned_head) == Z_EROFS_PCLUSTER_NIL) {
*owned_head = &pcl->next;
/* so we can attach this pcluster to our submission chain. */
- clt->mode = COLLECT_PRIMARY_FOLLOWED;
+ f->mode = COLLECT_PRIMARY_FOLLOWED;
return;
}
@@ -401,24 +393,24 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
*owned_head) == Z_EROFS_PCLUSTER_TAIL) {
*owned_head = Z_EROFS_PCLUSTER_TAIL;
- clt->mode = COLLECT_PRIMARY_HOOKED;
- clt->tailpcl = NULL;
+ f->mode = COLLECT_PRIMARY_HOOKED;
+ f->tailpcl = NULL;
return;
}
/* type 3, it belongs to a chain, but it isn't the end of the chain */
- clt->mode = COLLECT_PRIMARY;
+ f->mode = COLLECT_PRIMARY;
}
-static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
+static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
struct inode *inode,
struct erofs_map_blocks *map)
{
- struct z_erofs_pcluster *pcl = clt->pcl;
+ struct z_erofs_pcluster *pcl = fe->pcl;
struct z_erofs_collection *cl;
unsigned int length;
/* to avoid unexpected loop formed by corrupted images */
- if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
+ if (fe->owned_head == &pcl->next || pcl == fe->tailpcl) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
@@ -449,15 +441,15 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
}
mutex_lock(&cl->lock);
/* used to check tail merging loop due to corrupted images */
- if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
- clt->tailpcl = pcl;
+ if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
+ fe->tailpcl = pcl;
- z_erofs_try_to_claim_pcluster(clt);
- clt->cl = cl;
+ z_erofs_try_to_claim_pcluster(fe);
+ fe->cl = cl;
return 0;
}
-static int z_erofs_register_collection(struct z_erofs_collector *clt,
+static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
struct inode *inode,
struct erofs_map_blocks *map)
{
@@ -485,8 +477,8 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
/* new pclusters should be claimed as type 1, primary and followed */
- pcl->next = clt->owned_head;
- clt->mode = COLLECT_PRIMARY_FOLLOWED;
+ pcl->next = fe->owned_head;
+ fe->mode = COLLECT_PRIMARY_FOLLOWED;
cl = z_erofs_primarycollection(pcl);
cl->pageofs = map->m_la & ~PAGE_MASK;
@@ -512,18 +504,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
}
if (grp != &pcl->obj) {
- clt->pcl = container_of(grp,
+ fe->pcl = container_of(grp,
struct z_erofs_pcluster, obj);
err = -EEXIST;
goto err_out;
}
}
/* used to check tail merging loop due to corrupted images */
- if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
- clt->tailpcl = pcl;
- clt->owned_head = &pcl->next;
- clt->pcl = pcl;
- clt->cl = cl;
+ if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
+ fe->tailpcl = pcl;
+ fe->owned_head = &pcl->next;
+ fe->pcl = pcl;
+ fe->cl = cl;
return 0;
err_out:
@@ -532,18 +524,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
return err;
}
-static int z_erofs_collector_begin(struct z_erofs_collector *clt,
+static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
struct inode *inode,
struct erofs_map_blocks *map)
{
struct erofs_workgroup *grp;
int ret;
- DBG_BUGON(clt->cl);
+ DBG_BUGON(fe->cl);
/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
- DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
- DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
+ DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
if (map->m_flags & EROFS_MAP_META) {
if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
@@ -555,28 +547,28 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt,
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
if (grp) {
- clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
+ fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
} else {
tailpacking:
- ret = z_erofs_register_collection(clt, inode, map);
+ ret = z_erofs_register_collection(fe, inode, map);
if (!ret)
goto out;
if (ret != -EEXIST)
return ret;
}
- ret = z_erofs_lookup_collection(clt, inode, map);
+ ret = z_erofs_lookup_collection(fe, inode, map);
if (ret) {
- erofs_workgroup_put(&clt->pcl->obj);
+ erofs_workgroup_put(&fe->pcl->obj);
return ret;
}
out:
- z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
- clt->cl->pagevec, clt->cl->vcnt);
+ z_erofs_pagevec_ctor_init(&fe->vector, Z_EROFS_NR_INLINE_PAGEVECS,
+ fe->cl->pagevec, fe->cl->vcnt);
/* since file-backed online pages are traversed in reverse order */
- clt->icpage_ptr = clt->pcl->compressed_pages +
- z_erofs_pclusterpages(clt->pcl);
+ fe->icpage_ptr = fe->pcl->compressed_pages +
+ z_erofs_pclusterpages(fe->pcl);
return 0;
}
@@ -610,24 +602,24 @@ static void z_erofs_collection_put(struct z_erofs_collection *cl)
erofs_workgroup_put(&pcl->obj);
}
-static bool z_erofs_collector_end(struct z_erofs_collector *clt)
+static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
{
- struct z_erofs_collection *cl = clt->cl;
+ struct z_erofs_collection *cl = fe->cl;
if (!cl)
return false;
- z_erofs_pagevec_ctor_exit(&clt->vector, false);
+ z_erofs_pagevec_ctor_exit(&fe->vector, false);
mutex_unlock(&cl->lock);
/*
* if all pending pages are added, don't hold its reference
* any longer if the pcluster isn't hosted by ourselves.
*/
- if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
+ if (fe->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
z_erofs_collection_put(cl);
- clt->cl = NULL;
+ fe->cl = NULL;
return true;
}
@@ -651,7 +643,6 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct inode *const inode = fe->inode;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct erofs_map_blocks *const map = &fe->map;
- struct z_erofs_collector *const clt = &fe->clt;
const loff_t offset = page_offset(page);
bool tight = true;
@@ -672,7 +663,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
if (offset + cur >= map->m_la &&
offset + cur < map->m_la + map->m_llen) {
/* didn't get a valid collection previously (very rare) */
- if (!clt->cl)
+ if (!fe->cl)
goto restart_now;
goto hitted;
}
@@ -680,7 +671,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
/* go ahead the next map_blocks */
erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
- if (z_erofs_collector_end(clt))
+ if (z_erofs_collector_end(fe))
fe->backmost = false;
map->m_la = offset + cur;
@@ -693,11 +684,11 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
if (!(map->m_flags & EROFS_MAP_MAPPED))
goto hitted;
- err = z_erofs_collector_begin(clt, inode, map);
+ err = z_erofs_collector_begin(fe, inode, map);
if (err)
goto err_out;
- if (z_erofs_is_inline_pcluster(clt->pcl)) {
+ if (z_erofs_is_inline_pcluster(fe->pcl)) {
void *mp;
mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
@@ -709,8 +700,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
goto err_out;
}
get_page(fe->map.buf.page);
- WRITE_ONCE(clt->pcl->compressed_pages[0], fe->map.buf.page);
- clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
+ WRITE_ONCE(fe->pcl->compressed_pages[0], fe->map.buf.page);
+ fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
} else {
/* preload all compressed pages (can change mode if needed) */
if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
@@ -719,7 +710,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
else
cache_strategy = DONTALLOC;
- preload_compressed_pages(clt, MNGD_MAPPING(sbi),
+ preload_compressed_pages(fe, MNGD_MAPPING(sbi),
cache_strategy, pagepool);
}
@@ -730,8 +721,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
* those chains are handled asynchronously thus the page cannot be used
* for inplace I/O or pagevec (should be processed in strict order.)
*/
- tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
- clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
+ tight &= (fe->mode >= COLLECT_PRIMARY_HOOKED &&
+ fe->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
cur = end - min_t(unsigned int, offset + end - map->m_la, end);
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
@@ -746,18 +737,18 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
if (cur)
- tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
+ tight &= (fe->mode >= COLLECT_PRIMARY_FOLLOWED);
retry:
- err = z_erofs_attach_page(clt, page, page_type,
- clt->mode >= COLLECT_PRIMARY_FOLLOWED);
+ err = z_erofs_attach_page(fe, page, page_type,
+ fe->mode >= COLLECT_PRIMARY_FOLLOWED);
/* should allocate an additional short-lived page for pagevec */
if (err == -EAGAIN) {
struct page *const newpage =
alloc_page(GFP_NOFS | __GFP_NOFAIL);
set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
- err = z_erofs_attach_page(clt, newpage,
+ err = z_erofs_attach_page(fe, newpage,
Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
if (!err)
goto retry;
@@ -773,7 +764,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
/* bump up the number of spiltted parts of a page */
++spiltted;
/* also update nr_pages */
- clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
+ fe->cl->nr_pages = max_t(pgoff_t, fe->cl->nr_pages, index + 1);
next_part:
/* can be used for verification */
map->m_llen = offset + cur - map->m_la;
@@ -1309,7 +1300,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
void *bi_private;
- z_erofs_next_pcluster_t owned_head = f->clt.owned_head;
+ z_erofs_next_pcluster_t owned_head = f->owned_head;
/* bio is NULL initially, so no need to initialize last_{index,bdev} */
pgoff_t last_index;
struct block_device *last_bdev;
@@ -1417,7 +1408,7 @@ static void z_erofs_runqueue(struct super_block *sb,
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
- if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL)
+ if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
return;
z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
@@ -1517,7 +1508,7 @@ static int z_erofs_readpage(struct file *file, struct page *page)
err = z_erofs_do_read_page(&f, page, &pagepool);
z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false);
- (void)z_erofs_collector_end(&f.clt);
+ (void)z_erofs_collector_end(&f);
/* if some compressed cluster ready, need submit them anyway */
z_erofs_runqueue(inode->i_sb, &f, &pagepool,
@@ -1567,7 +1558,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
put_page(page);
}
z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false);
- (void)z_erofs_collector_end(&f.clt);
+ (void)z_erofs_collector_end(&f);
z_erofs_runqueue(inode->i_sb, &f, &pagepool,
z_erofs_get_sync_decompress_policy(sbi, nr_pages));
--
2.24.4
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/2] erofs: clean up preload_compressed_pages()
2022-03-01 19:49 [PATCH 1/2] erofs: get rid of `struct z_erofs_collector' Gao Xiang
@ 2022-03-01 19:49 ` Gao Xiang
2022-03-03 4:09 ` Yue Hu
2022-03-11 8:20 ` Chao Yu
2022-03-02 4:22 ` [PATCH 1/2] erofs: get rid of `struct z_erofs_collector' Yue Hu
2022-03-11 8:07 ` Chao Yu
2 siblings, 2 replies; 8+ messages in thread
From: Gao Xiang @ 2022-03-01 19:49 UTC (permalink / raw)
To: linux-erofs, Chao Yu; +Cc: LKML, Gao Xiang
Rename preload_compressed_pages() as z_erofs_bind_cache()
since we're try to prepare for adapting folios.
Also, add a comment for the gfp setting. No logic changes.
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
---
fs/erofs/zdata.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 2673fc105861..59aecf42e45c 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -219,13 +219,17 @@ struct z_erofs_decompress_frontend {
static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
static DEFINE_MUTEX(z_pagemap_global_lock);
-static void preload_compressed_pages(struct z_erofs_decompress_frontend *fe,
- struct address_space *mc,
- enum z_erofs_cache_alloctype type,
- struct page **pagepool)
+static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
+ enum z_erofs_cache_alloctype type,
+ struct page **pagepool)
{
+ struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
struct z_erofs_pcluster *pcl = fe->pcl;
bool standalone = true;
+ /*
+ * optimistic allocation without direct reclaim since inplace I/O
+ * can be used if low memory otherwise.
+ */
gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
struct page **pages;
@@ -703,17 +707,15 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
WRITE_ONCE(fe->pcl->compressed_pages[0], fe->map.buf.page);
fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
} else {
- /* preload all compressed pages (can change mode if needed) */
+ /* bind cache first when cached decompression is preferred */
if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
map->m_la))
cache_strategy = TRYALLOC;
else
cache_strategy = DONTALLOC;
- preload_compressed_pages(fe, MNGD_MAPPING(sbi),
- cache_strategy, pagepool);
+ z_erofs_bind_cache(fe, cache_strategy, pagepool);
}
-
hitted:
/*
* Ensure the current partial page belongs to this submit chain rather
--
2.24.4
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] erofs: get rid of `struct z_erofs_collector'
2022-03-01 19:49 [PATCH 1/2] erofs: get rid of `struct z_erofs_collector' Gao Xiang
2022-03-01 19:49 ` [PATCH 2/2] erofs: clean up preload_compressed_pages() Gao Xiang
@ 2022-03-02 4:22 ` Yue Hu
2022-03-02 5:06 ` Gao Xiang
2022-03-11 8:07 ` Chao Yu
2 siblings, 1 reply; 8+ messages in thread
From: Yue Hu @ 2022-03-02 4:22 UTC (permalink / raw)
To: Gao Xiang; +Cc: linux-erofs, Chao Yu, LKML, huyue2, zhangwen
Hi Xiang,
On Wed, 2 Mar 2022 03:49:50 +0800
Gao Xiang <hsiangkao@linux.alibaba.com> wrote:
> Avoid `struct z_erofs_collector' since there is another context
> structure called "struct z_erofs_decompress_frontend".
>
> No logic changes.
>
> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
> ---
> fs/erofs/zdata.c | 163 ++++++++++++++++++++++-------------------------
> 1 file changed, 77 insertions(+), 86 deletions(-)
>
> diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
> index 423bc1a61da5..2673fc105861 100644
> --- a/fs/erofs/zdata.c
> +++ b/fs/erofs/zdata.c
> @@ -192,7 +192,10 @@ enum z_erofs_collectmode {
> COLLECT_PRIMARY_FOLLOWED,
> };
>
> -struct z_erofs_collector {
> +struct z_erofs_decompress_frontend {
> + struct inode *const inode;
> + struct erofs_map_blocks map;
> +
> struct z_erofs_pagevec_ctor vector;
>
> struct z_erofs_pcluster *pcl, *tailpcl;
> @@ -202,13 +205,6 @@ struct z_erofs_collector {
> z_erofs_next_pcluster_t owned_head;
>
> enum z_erofs_collectmode mode;
> -};
> -
> -struct z_erofs_decompress_frontend {
> - struct inode *const inode;
> -
> - struct z_erofs_collector clt;
> - struct erofs_map_blocks map;
>
> bool readahead;
> /* used for applying cache strategy on the fly */
> @@ -216,30 +212,26 @@ struct z_erofs_decompress_frontend {
> erofs_off_t headoffset;
> };
>
> -#define COLLECTOR_INIT() { \
> - .owned_head = Z_EROFS_PCLUSTER_TAIL, \
> - .mode = COLLECT_PRIMARY_FOLLOWED }
> -
> #define DECOMPRESS_FRONTEND_INIT(__i) { \
> - .inode = __i, .clt = COLLECTOR_INIT(), \
> - .backmost = true, }
> + .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
> + .mode = COLLECT_PRIMARY_FOLLOWED }
>
> static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
> static DEFINE_MUTEX(z_pagemap_global_lock);
>
> -static void preload_compressed_pages(struct z_erofs_collector *clt,
> +static void preload_compressed_pages(struct z_erofs_decompress_frontend *fe,
> struct address_space *mc,
> enum z_erofs_cache_alloctype type,
> struct page **pagepool)
> {
> - struct z_erofs_pcluster *pcl = clt->pcl;
> + struct z_erofs_pcluster *pcl = fe->pcl;
> bool standalone = true;
> gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
> __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
> struct page **pages;
> pgoff_t index;
>
> - if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
> + if (fe->mode < COLLECT_PRIMARY_FOLLOWED)
> return;
>
> pages = pcl->compressed_pages;
> @@ -288,7 +280,7 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
> * managed cache since it can be moved to the bypass queue instead.
> */
> if (standalone)
> - clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
> + fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
> }
>
> /* called by erofs_shrinker to get rid of all compressed_pages */
> @@ -350,47 +342,47 @@ int erofs_try_to_free_cached_page(struct page *page)
> }
>
> /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
> -static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
> +static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
> struct page *page)
> {
> - struct z_erofs_pcluster *const pcl = clt->pcl;
> + struct z_erofs_pcluster *const pcl = fe->pcl;
>
> - while (clt->icpage_ptr > pcl->compressed_pages)
> - if (!cmpxchg(--clt->icpage_ptr, NULL, page))
> + while (fe->icpage_ptr > pcl->compressed_pages)
> + if (!cmpxchg(--fe->icpage_ptr, NULL, page))
> return true;
> return false;
> }
>
> /* callers must be with collection lock held */
> -static int z_erofs_attach_page(struct z_erofs_collector *clt,
> +static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
> struct page *page, enum z_erofs_page_type type,
> bool pvec_safereuse)
> {
> int ret;
>
> /* give priority for inplaceio */
> - if (clt->mode >= COLLECT_PRIMARY &&
> + if (fe->mode >= COLLECT_PRIMARY &&
> type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
> - z_erofs_try_inplace_io(clt, page))
> + z_erofs_try_inplace_io(fe, page))
> return 0;
>
> - ret = z_erofs_pagevec_enqueue(&clt->vector, page, type,
> + ret = z_erofs_pagevec_enqueue(&fe->vector, page, type,
> pvec_safereuse);
> - clt->cl->vcnt += (unsigned int)ret;
> + fe->cl->vcnt += (unsigned int)ret;
> return ret ? 0 : -EAGAIN;
> }
>
> -static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
> +static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
> {
> - struct z_erofs_pcluster *pcl = clt->pcl;
> - z_erofs_next_pcluster_t *owned_head = &clt->owned_head;
> + struct z_erofs_pcluster *pcl = f->pcl;
> + z_erofs_next_pcluster_t *owned_head = &f->owned_head;
>
> /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
> if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
> *owned_head) == Z_EROFS_PCLUSTER_NIL) {
> *owned_head = &pcl->next;
> /* so we can attach this pcluster to our submission chain. */
> - clt->mode = COLLECT_PRIMARY_FOLLOWED;
> + f->mode = COLLECT_PRIMARY_FOLLOWED;
> return;
> }
>
> @@ -401,24 +393,24 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
> if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
> *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
> *owned_head = Z_EROFS_PCLUSTER_TAIL;
> - clt->mode = COLLECT_PRIMARY_HOOKED;
> - clt->tailpcl = NULL;
> + f->mode = COLLECT_PRIMARY_HOOKED;
> + f->tailpcl = NULL;
> return;
> }
> /* type 3, it belongs to a chain, but it isn't the end of the chain */
> - clt->mode = COLLECT_PRIMARY;
> + f->mode = COLLECT_PRIMARY;
> }
>
> -static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
> +static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
> struct inode *inode,
> struct erofs_map_blocks *map)
> {
> - struct z_erofs_pcluster *pcl = clt->pcl;
> + struct z_erofs_pcluster *pcl = fe->pcl;
> struct z_erofs_collection *cl;
> unsigned int length;
>
> /* to avoid unexpected loop formed by corrupted images */
> - if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
> + if (fe->owned_head == &pcl->next || pcl == fe->tailpcl) {
> DBG_BUGON(1);
> return -EFSCORRUPTED;
> }
> @@ -449,15 +441,15 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
> }
> mutex_lock(&cl->lock);
> /* used to check tail merging loop due to corrupted images */
> - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
> - clt->tailpcl = pcl;
> + if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
> + fe->tailpcl = pcl;
>
> - z_erofs_try_to_claim_pcluster(clt);
> - clt->cl = cl;
> + z_erofs_try_to_claim_pcluster(fe);
> + fe->cl = cl;
> return 0;
> }
>
> -static int z_erofs_register_collection(struct z_erofs_collector *clt,
> +static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
> struct inode *inode,
> struct erofs_map_blocks *map)
> {
> @@ -485,8 +477,8 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
> Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
>
> /* new pclusters should be claimed as type 1, primary and followed */
> - pcl->next = clt->owned_head;
> - clt->mode = COLLECT_PRIMARY_FOLLOWED;
> + pcl->next = fe->owned_head;
> + fe->mode = COLLECT_PRIMARY_FOLLOWED;
>
> cl = z_erofs_primarycollection(pcl);
> cl->pageofs = map->m_la & ~PAGE_MASK;
> @@ -512,18 +504,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
> }
>
> if (grp != &pcl->obj) {
> - clt->pcl = container_of(grp,
> + fe->pcl = container_of(grp,
> struct z_erofs_pcluster, obj);
> err = -EEXIST;
> goto err_out;
> }
> }
> /* used to check tail merging loop due to corrupted images */
> - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
> - clt->tailpcl = pcl;
> - clt->owned_head = &pcl->next;
> - clt->pcl = pcl;
> - clt->cl = cl;
> + if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
> + fe->tailpcl = pcl;
> + fe->owned_head = &pcl->next;
> + fe->pcl = pcl;
> + fe->cl = cl;
> return 0;
>
> err_out:
> @@ -532,18 +524,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
> return err;
> }
>
> -static int z_erofs_collector_begin(struct z_erofs_collector *clt,
> +static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
> struct inode *inode,
> struct erofs_map_blocks *map)
> {
> struct erofs_workgroup *grp;
> int ret;
>
> - DBG_BUGON(clt->cl);
> + DBG_BUGON(fe->cl);
>
> /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
> - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
> - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
> + DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
> + DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
>
> if (map->m_flags & EROFS_MAP_META) {
> if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
> @@ -555,28 +547,28 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt,
>
> grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
> if (grp) {
> - clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
> + fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
> } else {
> tailpacking:
> - ret = z_erofs_register_collection(clt, inode, map);
> + ret = z_erofs_register_collection(fe, inode, map);
> if (!ret)
> goto out;
> if (ret != -EEXIST)
> return ret;
> }
>
> - ret = z_erofs_lookup_collection(clt, inode, map);
> + ret = z_erofs_lookup_collection(fe, inode, map);
> if (ret) {
> - erofs_workgroup_put(&clt->pcl->obj);
> + erofs_workgroup_put(&fe->pcl->obj);
> return ret;
> }
>
> out:
> - z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
> - clt->cl->pagevec, clt->cl->vcnt);
> + z_erofs_pagevec_ctor_init(&fe->vector, Z_EROFS_NR_INLINE_PAGEVECS,
> + fe->cl->pagevec, fe->cl->vcnt);
> /* since file-backed online pages are traversed in reverse order */
> - clt->icpage_ptr = clt->pcl->compressed_pages +
> - z_erofs_pclusterpages(clt->pcl);
> + fe->icpage_ptr = fe->pcl->compressed_pages +
> + z_erofs_pclusterpages(fe->pcl);
> return 0;
> }
>
> @@ -610,24 +602,24 @@ static void z_erofs_collection_put(struct z_erofs_collection *cl)
> erofs_workgroup_put(&pcl->obj);
> }
>
> -static bool z_erofs_collector_end(struct z_erofs_collector *clt)
> +static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
> {
> - struct z_erofs_collection *cl = clt->cl;
> + struct z_erofs_collection *cl = fe->cl;
>
> if (!cl)
> return false;
>
> - z_erofs_pagevec_ctor_exit(&clt->vector, false);
> + z_erofs_pagevec_ctor_exit(&fe->vector, false);
> mutex_unlock(&cl->lock);
>
> /*
> * if all pending pages are added, don't hold its reference
> * any longer if the pcluster isn't hosted by ourselves.
> */
> - if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
> + if (fe->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
> z_erofs_collection_put(cl);
>
> - clt->cl = NULL;
> + fe->cl = NULL;
> return true;
> }
>
> @@ -651,7 +643,6 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> struct inode *const inode = fe->inode;
> struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
> struct erofs_map_blocks *const map = &fe->map;
> - struct z_erofs_collector *const clt = &fe->clt;
> const loff_t offset = page_offset(page);
> bool tight = true;
>
> @@ -672,7 +663,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> if (offset + cur >= map->m_la &&
> offset + cur < map->m_la + map->m_llen) {
> /* didn't get a valid collection previously (very rare) */
> - if (!clt->cl)
> + if (!fe->cl)
> goto restart_now;
> goto hitted;
> }
> @@ -680,7 +671,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> /* go ahead the next map_blocks */
> erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
>
> - if (z_erofs_collector_end(clt))
> + if (z_erofs_collector_end(fe))
> fe->backmost = false;
>
> map->m_la = offset + cur;
> @@ -693,11 +684,11 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> if (!(map->m_flags & EROFS_MAP_MAPPED))
> goto hitted;
>
> - err = z_erofs_collector_begin(clt, inode, map);
> + err = z_erofs_collector_begin(fe, inode, map);
now, we can get 'inode' and 'map' from 'fe'. so, it should be z_erofs_collector_begin(fe)?
if it's, need to change z_erofs_{register | lookup}_collection() correspondingly.
> if (err)
> goto err_out;
>
> - if (z_erofs_is_inline_pcluster(clt->pcl)) {
> + if (z_erofs_is_inline_pcluster(fe->pcl)) {
> void *mp;
>
> mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
> @@ -709,8 +700,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> goto err_out;
> }
> get_page(fe->map.buf.page);
> - WRITE_ONCE(clt->pcl->compressed_pages[0], fe->map.buf.page);
> - clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
> + WRITE_ONCE(fe->pcl->compressed_pages[0], fe->map.buf.page);
> + fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
> } else {
> /* preload all compressed pages (can change mode if needed) */
> if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
> @@ -719,7 +710,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> else
> cache_strategy = DONTALLOC;
>
> - preload_compressed_pages(clt, MNGD_MAPPING(sbi),
> + preload_compressed_pages(fe, MNGD_MAPPING(sbi),
> cache_strategy, pagepool);
> }
>
> @@ -730,8 +721,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> * those chains are handled asynchronously thus the page cannot be used
> * for inplace I/O or pagevec (should be processed in strict order.)
> */
> - tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
> - clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
> + tight &= (fe->mode >= COLLECT_PRIMARY_HOOKED &&
> + fe->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
>
> cur = end - min_t(unsigned int, offset + end - map->m_la, end);
> if (!(map->m_flags & EROFS_MAP_MAPPED)) {
> @@ -746,18 +737,18 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
>
> if (cur)
> - tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
> + tight &= (fe->mode >= COLLECT_PRIMARY_FOLLOWED);
>
> retry:
> - err = z_erofs_attach_page(clt, page, page_type,
> - clt->mode >= COLLECT_PRIMARY_FOLLOWED);
> + err = z_erofs_attach_page(fe, page, page_type,
> + fe->mode >= COLLECT_PRIMARY_FOLLOWED);
> /* should allocate an additional short-lived page for pagevec */
> if (err == -EAGAIN) {
> struct page *const newpage =
> alloc_page(GFP_NOFS | __GFP_NOFAIL);
>
> set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
> - err = z_erofs_attach_page(clt, newpage,
> + err = z_erofs_attach_page(fe, newpage,
> Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
> if (!err)
> goto retry;
> @@ -773,7 +764,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> /* bump up the number of spiltted parts of a page */
> ++spiltted;
> /* also update nr_pages */
> - clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
> + fe->cl->nr_pages = max_t(pgoff_t, fe->cl->nr_pages, index + 1);
> next_part:
> /* can be used for verification */
> map->m_llen = offset + cur - map->m_la;
> @@ -1309,7 +1300,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
> z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
> struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
> void *bi_private;
> - z_erofs_next_pcluster_t owned_head = f->clt.owned_head;
> + z_erofs_next_pcluster_t owned_head = f->owned_head;
> /* bio is NULL initially, so no need to initialize last_{index,bdev} */
> pgoff_t last_index;
> struct block_device *last_bdev;
> @@ -1417,7 +1408,7 @@ static void z_erofs_runqueue(struct super_block *sb,
> {
> struct z_erofs_decompressqueue io[NR_JOBQUEUES];
>
> - if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL)
> + if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
> return;
> z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
>
> @@ -1517,7 +1508,7 @@ static int z_erofs_readpage(struct file *file, struct page *page)
> err = z_erofs_do_read_page(&f, page, &pagepool);
> z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false);
>
> - (void)z_erofs_collector_end(&f.clt);
> + (void)z_erofs_collector_end(&f);
>
> /* if some compressed cluster ready, need submit them anyway */
> z_erofs_runqueue(inode->i_sb, &f, &pagepool,
> @@ -1567,7 +1558,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
> put_page(page);
> }
> z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false);
> - (void)z_erofs_collector_end(&f.clt);
> + (void)z_erofs_collector_end(&f);
>
> z_erofs_runqueue(inode->i_sb, &f, &pagepool,
> z_erofs_get_sync_decompress_policy(sbi, nr_pages));
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] erofs: get rid of `struct z_erofs_collector'
2022-03-02 4:22 ` [PATCH 1/2] erofs: get rid of `struct z_erofs_collector' Yue Hu
@ 2022-03-02 5:06 ` Gao Xiang
2022-03-02 5:10 ` Yue Hu
0 siblings, 1 reply; 8+ messages in thread
From: Gao Xiang @ 2022-03-02 5:06 UTC (permalink / raw)
To: Yue Hu; +Cc: linux-erofs, Chao Yu, LKML, huyue2, zhangwen
Hi Yue,
On Wed, Mar 02, 2022 at 12:22:03PM +0800, Yue Hu wrote:
> Hi Xiang,
>
> On Wed, 2 Mar 2022 03:49:50 +0800
> Gao Xiang <hsiangkao@linux.alibaba.com> wrote:
>
> > Avoid `struct z_erofs_collector' since there is another context
> > structure called "struct z_erofs_decompress_frontend".
> >
> > No logic changes.
> >
> > Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
> > ---
> > fs/erofs/zdata.c | 163 ++++++++++++++++++++++-------------------------
> > 1 file changed, 77 insertions(+), 86 deletions(-)
> >
> > diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
> > index 423bc1a61da5..2673fc105861 100644
> > --- a/fs/erofs/zdata.c
> > +++ b/fs/erofs/zdata.c
> > @@ -192,7 +192,10 @@ enum z_erofs_collectmode {
> > COLLECT_PRIMARY_FOLLOWED,
> > };
> >
> > -struct z_erofs_collector {
> > +struct z_erofs_decompress_frontend {
> > + struct inode *const inode;
> > + struct erofs_map_blocks map;
> > +
> > struct z_erofs_pagevec_ctor vector;
> >
> > struct z_erofs_pcluster *pcl, *tailpcl;
> > @@ -202,13 +205,6 @@ struct z_erofs_collector {
> > z_erofs_next_pcluster_t owned_head;
> >
> > enum z_erofs_collectmode mode;
> > -};
> > -
> > -struct z_erofs_decompress_frontend {
> > - struct inode *const inode;
> > -
> > - struct z_erofs_collector clt;
> > - struct erofs_map_blocks map;
> >
> > bool readahead;
> > /* used for applying cache strategy on the fly */
> > @@ -216,30 +212,26 @@ struct z_erofs_decompress_frontend {
> > erofs_off_t headoffset;
> > };
> >
> > -#define COLLECTOR_INIT() { \
> > - .owned_head = Z_EROFS_PCLUSTER_TAIL, \
> > - .mode = COLLECT_PRIMARY_FOLLOWED }
> > -
> > #define DECOMPRESS_FRONTEND_INIT(__i) { \
> > - .inode = __i, .clt = COLLECTOR_INIT(), \
> > - .backmost = true, }
> > + .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
> > + .mode = COLLECT_PRIMARY_FOLLOWED }
> >
> > static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
> > static DEFINE_MUTEX(z_pagemap_global_lock);
> >
> > -static void preload_compressed_pages(struct z_erofs_collector *clt,
> > +static void preload_compressed_pages(struct z_erofs_decompress_frontend *fe,
> > struct address_space *mc,
> > enum z_erofs_cache_alloctype type,
> > struct page **pagepool)
> > {
> > - struct z_erofs_pcluster *pcl = clt->pcl;
> > + struct z_erofs_pcluster *pcl = fe->pcl;
> > bool standalone = true;
> > gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
> > __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
> > struct page **pages;
> > pgoff_t index;
> >
> > - if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
> > + if (fe->mode < COLLECT_PRIMARY_FOLLOWED)
> > return;
> >
> > pages = pcl->compressed_pages;
> > @@ -288,7 +280,7 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
> > * managed cache since it can be moved to the bypass queue instead.
> > */
> > if (standalone)
> > - clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
> > + fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
> > }
> >
> > /* called by erofs_shrinker to get rid of all compressed_pages */
> > @@ -350,47 +342,47 @@ int erofs_try_to_free_cached_page(struct page *page)
> > }
> >
> > /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
> > -static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
> > +static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
> > struct page *page)
> > {
> > - struct z_erofs_pcluster *const pcl = clt->pcl;
> > + struct z_erofs_pcluster *const pcl = fe->pcl;
> >
> > - while (clt->icpage_ptr > pcl->compressed_pages)
> > - if (!cmpxchg(--clt->icpage_ptr, NULL, page))
> > + while (fe->icpage_ptr > pcl->compressed_pages)
> > + if (!cmpxchg(--fe->icpage_ptr, NULL, page))
> > return true;
> > return false;
> > }
> >
> > /* callers must be with collection lock held */
> > -static int z_erofs_attach_page(struct z_erofs_collector *clt,
> > +static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
> > struct page *page, enum z_erofs_page_type type,
> > bool pvec_safereuse)
> > {
> > int ret;
> >
> > /* give priority for inplaceio */
> > - if (clt->mode >= COLLECT_PRIMARY &&
> > + if (fe->mode >= COLLECT_PRIMARY &&
> > type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
> > - z_erofs_try_inplace_io(clt, page))
> > + z_erofs_try_inplace_io(fe, page))
> > return 0;
> >
> > - ret = z_erofs_pagevec_enqueue(&clt->vector, page, type,
> > + ret = z_erofs_pagevec_enqueue(&fe->vector, page, type,
> > pvec_safereuse);
> > - clt->cl->vcnt += (unsigned int)ret;
> > + fe->cl->vcnt += (unsigned int)ret;
> > return ret ? 0 : -EAGAIN;
> > }
> >
> > -static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
> > +static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
> > {
> > - struct z_erofs_pcluster *pcl = clt->pcl;
> > - z_erofs_next_pcluster_t *owned_head = &clt->owned_head;
> > + struct z_erofs_pcluster *pcl = f->pcl;
> > + z_erofs_next_pcluster_t *owned_head = &f->owned_head;
> >
> > /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
> > if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
> > *owned_head) == Z_EROFS_PCLUSTER_NIL) {
> > *owned_head = &pcl->next;
> > /* so we can attach this pcluster to our submission chain. */
> > - clt->mode = COLLECT_PRIMARY_FOLLOWED;
> > + f->mode = COLLECT_PRIMARY_FOLLOWED;
> > return;
> > }
> >
> > @@ -401,24 +393,24 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
> > if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
> > *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
> > *owned_head = Z_EROFS_PCLUSTER_TAIL;
> > - clt->mode = COLLECT_PRIMARY_HOOKED;
> > - clt->tailpcl = NULL;
> > + f->mode = COLLECT_PRIMARY_HOOKED;
> > + f->tailpcl = NULL;
> > return;
> > }
> > /* type 3, it belongs to a chain, but it isn't the end of the chain */
> > - clt->mode = COLLECT_PRIMARY;
> > + f->mode = COLLECT_PRIMARY;
> > }
> >
> > -static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
> > +static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
> > struct inode *inode,
> > struct erofs_map_blocks *map)
> > {
> > - struct z_erofs_pcluster *pcl = clt->pcl;
> > + struct z_erofs_pcluster *pcl = fe->pcl;
> > struct z_erofs_collection *cl;
> > unsigned int length;
> >
> > /* to avoid unexpected loop formed by corrupted images */
> > - if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
> > + if (fe->owned_head == &pcl->next || pcl == fe->tailpcl) {
> > DBG_BUGON(1);
> > return -EFSCORRUPTED;
> > }
> > @@ -449,15 +441,15 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
> > }
> > mutex_lock(&cl->lock);
> > /* used to check tail merging loop due to corrupted images */
> > - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
> > - clt->tailpcl = pcl;
> > + if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
> > + fe->tailpcl = pcl;
> >
> > - z_erofs_try_to_claim_pcluster(clt);
> > - clt->cl = cl;
> > + z_erofs_try_to_claim_pcluster(fe);
> > + fe->cl = cl;
> > return 0;
> > }
> >
> > -static int z_erofs_register_collection(struct z_erofs_collector *clt,
> > +static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
> > struct inode *inode,
> > struct erofs_map_blocks *map)
> > {
> > @@ -485,8 +477,8 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
> > Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
> >
> > /* new pclusters should be claimed as type 1, primary and followed */
> > - pcl->next = clt->owned_head;
> > - clt->mode = COLLECT_PRIMARY_FOLLOWED;
> > + pcl->next = fe->owned_head;
> > + fe->mode = COLLECT_PRIMARY_FOLLOWED;
> >
> > cl = z_erofs_primarycollection(pcl);
> > cl->pageofs = map->m_la & ~PAGE_MASK;
> > @@ -512,18 +504,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
> > }
> >
> > if (grp != &pcl->obj) {
> > - clt->pcl = container_of(grp,
> > + fe->pcl = container_of(grp,
> > struct z_erofs_pcluster, obj);
> > err = -EEXIST;
> > goto err_out;
> > }
> > }
> > /* used to check tail merging loop due to corrupted images */
> > - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
> > - clt->tailpcl = pcl;
> > - clt->owned_head = &pcl->next;
> > - clt->pcl = pcl;
> > - clt->cl = cl;
> > + if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
> > + fe->tailpcl = pcl;
> > + fe->owned_head = &pcl->next;
> > + fe->pcl = pcl;
> > + fe->cl = cl;
> > return 0;
> >
> > err_out:
> > @@ -532,18 +524,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
> > return err;
> > }
> >
> > -static int z_erofs_collector_begin(struct z_erofs_collector *clt,
> > +static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
> > struct inode *inode,
> > struct erofs_map_blocks *map)
> > {
> > struct erofs_workgroup *grp;
> > int ret;
> >
> > - DBG_BUGON(clt->cl);
> > + DBG_BUGON(fe->cl);
> >
> > /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
> > - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
> > - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
> > + DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
> > + DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
> >
> > if (map->m_flags & EROFS_MAP_META) {
> > if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
> > @@ -555,28 +547,28 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt,
> >
> > grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
> > if (grp) {
> > - clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
> > + fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
> > } else {
> > tailpacking:
> > - ret = z_erofs_register_collection(clt, inode, map);
> > + ret = z_erofs_register_collection(fe, inode, map);
> > if (!ret)
> > goto out;
> > if (ret != -EEXIST)
> > return ret;
> > }
> >
> > - ret = z_erofs_lookup_collection(clt, inode, map);
> > + ret = z_erofs_lookup_collection(fe, inode, map);
> > if (ret) {
> > - erofs_workgroup_put(&clt->pcl->obj);
> > + erofs_workgroup_put(&fe->pcl->obj);
> > return ret;
> > }
> >
> > out:
> > - z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
> > - clt->cl->pagevec, clt->cl->vcnt);
> > + z_erofs_pagevec_ctor_init(&fe->vector, Z_EROFS_NR_INLINE_PAGEVECS,
> > + fe->cl->pagevec, fe->cl->vcnt);
> > /* since file-backed online pages are traversed in reverse order */
> > - clt->icpage_ptr = clt->pcl->compressed_pages +
> > - z_erofs_pclusterpages(clt->pcl);
> > + fe->icpage_ptr = fe->pcl->compressed_pages +
> > + z_erofs_pclusterpages(fe->pcl);
> > return 0;
> > }
> >
> > @@ -610,24 +602,24 @@ static void z_erofs_collection_put(struct z_erofs_collection *cl)
> > erofs_workgroup_put(&pcl->obj);
> > }
> >
> > -static bool z_erofs_collector_end(struct z_erofs_collector *clt)
> > +static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
> > {
> > - struct z_erofs_collection *cl = clt->cl;
> > + struct z_erofs_collection *cl = fe->cl;
> >
> > if (!cl)
> > return false;
> >
> > - z_erofs_pagevec_ctor_exit(&clt->vector, false);
> > + z_erofs_pagevec_ctor_exit(&fe->vector, false);
> > mutex_unlock(&cl->lock);
> >
> > /*
> > * if all pending pages are added, don't hold its reference
> > * any longer if the pcluster isn't hosted by ourselves.
> > */
> > - if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
> > + if (fe->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
> > z_erofs_collection_put(cl);
> >
> > - clt->cl = NULL;
> > + fe->cl = NULL;
> > return true;
> > }
> >
> > @@ -651,7 +643,6 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> > struct inode *const inode = fe->inode;
> > struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
> > struct erofs_map_blocks *const map = &fe->map;
> > - struct z_erofs_collector *const clt = &fe->clt;
> > const loff_t offset = page_offset(page);
> > bool tight = true;
> >
> > @@ -672,7 +663,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> > if (offset + cur >= map->m_la &&
> > offset + cur < map->m_la + map->m_llen) {
> > /* didn't get a valid collection previously (very rare) */
> > - if (!clt->cl)
> > + if (!fe->cl)
> > goto restart_now;
> > goto hitted;
> > }
> > @@ -680,7 +671,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> > /* go ahead the next map_blocks */
> > erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
> >
> > - if (z_erofs_collector_end(clt))
> > + if (z_erofs_collector_end(fe))
> > fe->backmost = false;
> >
> > map->m_la = offset + cur;
> > @@ -693,11 +684,11 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> > if (!(map->m_flags & EROFS_MAP_MAPPED))
> > goto hitted;
> >
> > - err = z_erofs_collector_begin(clt, inode, map);
> > + err = z_erofs_collector_begin(fe, inode, map);
>
> now, we can get 'inode' and 'map' from 'fe'. so, it should be z_erofs_collector_begin(fe)?
> if it's, need to change z_erofs_{register | lookup}_collection() correspondingly.
This is just a trivial cleanup (just merge `collector' into `frontend').
We can update them in the following patches. Also I'd like to rename
`collection' concept as well. However, since I'm not a native English
speaker, I didn't find a more proper name though.
Thanks,
Gao Xiang
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] erofs: get rid of `struct z_erofs_collector'
2022-03-02 5:06 ` Gao Xiang
@ 2022-03-02 5:10 ` Yue Hu
0 siblings, 0 replies; 8+ messages in thread
From: Yue Hu @ 2022-03-02 5:10 UTC (permalink / raw)
To: Gao Xiang; +Cc: linux-erofs, Chao Yu, LKML, huyue2, zhangwen
On Wed, 2 Mar 2022 13:06:39 +0800
Gao Xiang <hsiangkao@linux.alibaba.com> wrote:
> Hi Yue,
>
> On Wed, Mar 02, 2022 at 12:22:03PM +0800, Yue Hu wrote:
> > Hi Xiang,
> >
> > On Wed, 2 Mar 2022 03:49:50 +0800
> > Gao Xiang <hsiangkao@linux.alibaba.com> wrote:
> >
> > > Avoid `struct z_erofs_collector' since there is another context
> > > structure called "struct z_erofs_decompress_frontend".
> > >
> > > No logic changes.
> > >
> > > Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
> > > ---
> > > fs/erofs/zdata.c | 163 ++++++++++++++++++++++-------------------------
> > > 1 file changed, 77 insertions(+), 86 deletions(-)
> > >
> > > diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
> > > index 423bc1a61da5..2673fc105861 100644
> > > --- a/fs/erofs/zdata.c
> > > +++ b/fs/erofs/zdata.c
> > > @@ -192,7 +192,10 @@ enum z_erofs_collectmode {
> > > COLLECT_PRIMARY_FOLLOWED,
> > > };
> > >
> > > -struct z_erofs_collector {
> > > +struct z_erofs_decompress_frontend {
> > > + struct inode *const inode;
> > > + struct erofs_map_blocks map;
> > > +
> > > struct z_erofs_pagevec_ctor vector;
> > >
> > > struct z_erofs_pcluster *pcl, *tailpcl;
> > > @@ -202,13 +205,6 @@ struct z_erofs_collector {
> > > z_erofs_next_pcluster_t owned_head;
> > >
> > > enum z_erofs_collectmode mode;
> > > -};
> > > -
> > > -struct z_erofs_decompress_frontend {
> > > - struct inode *const inode;
> > > -
> > > - struct z_erofs_collector clt;
> > > - struct erofs_map_blocks map;
> > >
> > > bool readahead;
> > > /* used for applying cache strategy on the fly */
> > > @@ -216,30 +212,26 @@ struct z_erofs_decompress_frontend {
> > > erofs_off_t headoffset;
> > > };
> > >
> > > -#define COLLECTOR_INIT() { \
> > > - .owned_head = Z_EROFS_PCLUSTER_TAIL, \
> > > - .mode = COLLECT_PRIMARY_FOLLOWED }
> > > -
> > > #define DECOMPRESS_FRONTEND_INIT(__i) { \
> > > - .inode = __i, .clt = COLLECTOR_INIT(), \
> > > - .backmost = true, }
> > > + .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
> > > + .mode = COLLECT_PRIMARY_FOLLOWED }
> > >
> > > static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
> > > static DEFINE_MUTEX(z_pagemap_global_lock);
> > >
> > > -static void preload_compressed_pages(struct z_erofs_collector *clt,
> > > +static void preload_compressed_pages(struct z_erofs_decompress_frontend *fe,
> > > struct address_space *mc,
> > > enum z_erofs_cache_alloctype type,
> > > struct page **pagepool)
> > > {
> > > - struct z_erofs_pcluster *pcl = clt->pcl;
> > > + struct z_erofs_pcluster *pcl = fe->pcl;
> > > bool standalone = true;
> > > gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
> > > __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
> > > struct page **pages;
> > > pgoff_t index;
> > >
> > > - if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
> > > + if (fe->mode < COLLECT_PRIMARY_FOLLOWED)
> > > return;
> > >
> > > pages = pcl->compressed_pages;
> > > @@ -288,7 +280,7 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
> > > * managed cache since it can be moved to the bypass queue instead.
> > > */
> > > if (standalone)
> > > - clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
> > > + fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
> > > }
> > >
> > > /* called by erofs_shrinker to get rid of all compressed_pages */
> > > @@ -350,47 +342,47 @@ int erofs_try_to_free_cached_page(struct page *page)
> > > }
> > >
> > > /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
> > > -static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
> > > +static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
> > > struct page *page)
> > > {
> > > - struct z_erofs_pcluster *const pcl = clt->pcl;
> > > + struct z_erofs_pcluster *const pcl = fe->pcl;
> > >
> > > - while (clt->icpage_ptr > pcl->compressed_pages)
> > > - if (!cmpxchg(--clt->icpage_ptr, NULL, page))
> > > + while (fe->icpage_ptr > pcl->compressed_pages)
> > > + if (!cmpxchg(--fe->icpage_ptr, NULL, page))
> > > return true;
> > > return false;
> > > }
> > >
> > > /* callers must be with collection lock held */
> > > -static int z_erofs_attach_page(struct z_erofs_collector *clt,
> > > +static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
> > > struct page *page, enum z_erofs_page_type type,
> > > bool pvec_safereuse)
> > > {
> > > int ret;
> > >
> > > /* give priority for inplaceio */
> > > - if (clt->mode >= COLLECT_PRIMARY &&
> > > + if (fe->mode >= COLLECT_PRIMARY &&
> > > type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
> > > - z_erofs_try_inplace_io(clt, page))
> > > + z_erofs_try_inplace_io(fe, page))
> > > return 0;
> > >
> > > - ret = z_erofs_pagevec_enqueue(&clt->vector, page, type,
> > > + ret = z_erofs_pagevec_enqueue(&fe->vector, page, type,
> > > pvec_safereuse);
> > > - clt->cl->vcnt += (unsigned int)ret;
> > > + fe->cl->vcnt += (unsigned int)ret;
> > > return ret ? 0 : -EAGAIN;
> > > }
> > >
> > > -static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
> > > +static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
> > > {
> > > - struct z_erofs_pcluster *pcl = clt->pcl;
> > > - z_erofs_next_pcluster_t *owned_head = &clt->owned_head;
> > > + struct z_erofs_pcluster *pcl = f->pcl;
> > > + z_erofs_next_pcluster_t *owned_head = &f->owned_head;
> > >
> > > /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
> > > if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
> > > *owned_head) == Z_EROFS_PCLUSTER_NIL) {
> > > *owned_head = &pcl->next;
> > > /* so we can attach this pcluster to our submission chain. */
> > > - clt->mode = COLLECT_PRIMARY_FOLLOWED;
> > > + f->mode = COLLECT_PRIMARY_FOLLOWED;
> > > return;
> > > }
> > >
> > > @@ -401,24 +393,24 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
> > > if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
> > > *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
> > > *owned_head = Z_EROFS_PCLUSTER_TAIL;
> > > - clt->mode = COLLECT_PRIMARY_HOOKED;
> > > - clt->tailpcl = NULL;
> > > + f->mode = COLLECT_PRIMARY_HOOKED;
> > > + f->tailpcl = NULL;
> > > return;
> > > }
> > > /* type 3, it belongs to a chain, but it isn't the end of the chain */
> > > - clt->mode = COLLECT_PRIMARY;
> > > + f->mode = COLLECT_PRIMARY;
> > > }
> > >
> > > -static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
> > > +static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
> > > struct inode *inode,
> > > struct erofs_map_blocks *map)
> > > {
> > > - struct z_erofs_pcluster *pcl = clt->pcl;
> > > + struct z_erofs_pcluster *pcl = fe->pcl;
> > > struct z_erofs_collection *cl;
> > > unsigned int length;
> > >
> > > /* to avoid unexpected loop formed by corrupted images */
> > > - if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
> > > + if (fe->owned_head == &pcl->next || pcl == fe->tailpcl) {
> > > DBG_BUGON(1);
> > > return -EFSCORRUPTED;
> > > }
> > > @@ -449,15 +441,15 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
> > > }
> > > mutex_lock(&cl->lock);
> > > /* used to check tail merging loop due to corrupted images */
> > > - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
> > > - clt->tailpcl = pcl;
> > > + if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
> > > + fe->tailpcl = pcl;
> > >
> > > - z_erofs_try_to_claim_pcluster(clt);
> > > - clt->cl = cl;
> > > + z_erofs_try_to_claim_pcluster(fe);
> > > + fe->cl = cl;
> > > return 0;
> > > }
> > >
> > > -static int z_erofs_register_collection(struct z_erofs_collector *clt,
> > > +static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
> > > struct inode *inode,
> > > struct erofs_map_blocks *map)
> > > {
> > > @@ -485,8 +477,8 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
> > > Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
> > >
> > > /* new pclusters should be claimed as type 1, primary and followed */
> > > - pcl->next = clt->owned_head;
> > > - clt->mode = COLLECT_PRIMARY_FOLLOWED;
> > > + pcl->next = fe->owned_head;
> > > + fe->mode = COLLECT_PRIMARY_FOLLOWED;
> > >
> > > cl = z_erofs_primarycollection(pcl);
> > > cl->pageofs = map->m_la & ~PAGE_MASK;
> > > @@ -512,18 +504,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
> > > }
> > >
> > > if (grp != &pcl->obj) {
> > > - clt->pcl = container_of(grp,
> > > + fe->pcl = container_of(grp,
> > > struct z_erofs_pcluster, obj);
> > > err = -EEXIST;
> > > goto err_out;
> > > }
> > > }
> > > /* used to check tail merging loop due to corrupted images */
> > > - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
> > > - clt->tailpcl = pcl;
> > > - clt->owned_head = &pcl->next;
> > > - clt->pcl = pcl;
> > > - clt->cl = cl;
> > > + if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
> > > + fe->tailpcl = pcl;
> > > + fe->owned_head = &pcl->next;
> > > + fe->pcl = pcl;
> > > + fe->cl = cl;
> > > return 0;
> > >
> > > err_out:
> > > @@ -532,18 +524,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
> > > return err;
> > > }
> > >
> > > -static int z_erofs_collector_begin(struct z_erofs_collector *clt,
> > > +static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
> > > struct inode *inode,
> > > struct erofs_map_blocks *map)
> > > {
> > > struct erofs_workgroup *grp;
> > > int ret;
> > >
> > > - DBG_BUGON(clt->cl);
> > > + DBG_BUGON(fe->cl);
> > >
> > > /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
> > > - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
> > > - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
> > > + DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
> > > + DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
> > >
> > > if (map->m_flags & EROFS_MAP_META) {
> > > if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
> > > @@ -555,28 +547,28 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt,
> > >
> > > grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
> > > if (grp) {
> > > - clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
> > > + fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
> > > } else {
> > > tailpacking:
> > > - ret = z_erofs_register_collection(clt, inode, map);
> > > + ret = z_erofs_register_collection(fe, inode, map);
> > > if (!ret)
> > > goto out;
> > > if (ret != -EEXIST)
> > > return ret;
> > > }
> > >
> > > - ret = z_erofs_lookup_collection(clt, inode, map);
> > > + ret = z_erofs_lookup_collection(fe, inode, map);
> > > if (ret) {
> > > - erofs_workgroup_put(&clt->pcl->obj);
> > > + erofs_workgroup_put(&fe->pcl->obj);
> > > return ret;
> > > }
> > >
> > > out:
> > > - z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
> > > - clt->cl->pagevec, clt->cl->vcnt);
> > > + z_erofs_pagevec_ctor_init(&fe->vector, Z_EROFS_NR_INLINE_PAGEVECS,
> > > + fe->cl->pagevec, fe->cl->vcnt);
> > > /* since file-backed online pages are traversed in reverse order */
> > > - clt->icpage_ptr = clt->pcl->compressed_pages +
> > > - z_erofs_pclusterpages(clt->pcl);
> > > + fe->icpage_ptr = fe->pcl->compressed_pages +
> > > + z_erofs_pclusterpages(fe->pcl);
> > > return 0;
> > > }
> > >
> > > @@ -610,24 +602,24 @@ static void z_erofs_collection_put(struct z_erofs_collection *cl)
> > > erofs_workgroup_put(&pcl->obj);
> > > }
> > >
> > > -static bool z_erofs_collector_end(struct z_erofs_collector *clt)
> > > +static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
> > > {
> > > - struct z_erofs_collection *cl = clt->cl;
> > > + struct z_erofs_collection *cl = fe->cl;
> > >
> > > if (!cl)
> > > return false;
> > >
> > > - z_erofs_pagevec_ctor_exit(&clt->vector, false);
> > > + z_erofs_pagevec_ctor_exit(&fe->vector, false);
> > > mutex_unlock(&cl->lock);
> > >
> > > /*
> > > * if all pending pages are added, don't hold its reference
> > > * any longer if the pcluster isn't hosted by ourselves.
> > > */
> > > - if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
> > > + if (fe->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
> > > z_erofs_collection_put(cl);
> > >
> > > - clt->cl = NULL;
> > > + fe->cl = NULL;
> > > return true;
> > > }
> > >
> > > @@ -651,7 +643,6 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> > > struct inode *const inode = fe->inode;
> > > struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
> > > struct erofs_map_blocks *const map = &fe->map;
> > > - struct z_erofs_collector *const clt = &fe->clt;
> > > const loff_t offset = page_offset(page);
> > > bool tight = true;
> > >
> > > @@ -672,7 +663,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> > > if (offset + cur >= map->m_la &&
> > > offset + cur < map->m_la + map->m_llen) {
> > > /* didn't get a valid collection previously (very rare) */
> > > - if (!clt->cl)
> > > + if (!fe->cl)
> > > goto restart_now;
> > > goto hitted;
> > > }
> > > @@ -680,7 +671,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> > > /* go ahead the next map_blocks */
> > > erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
> > >
> > > - if (z_erofs_collector_end(clt))
> > > + if (z_erofs_collector_end(fe))
> > > fe->backmost = false;
> > >
> > > map->m_la = offset + cur;
> > > @@ -693,11 +684,11 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> > > if (!(map->m_flags & EROFS_MAP_MAPPED))
> > > goto hitted;
> > >
> > > - err = z_erofs_collector_begin(clt, inode, map);
> > > + err = z_erofs_collector_begin(fe, inode, map);
> >
> > now, we can get 'inode' and 'map' from 'fe'. so, it should be z_erofs_collector_begin(fe)?
> > if it's, need to change z_erofs_{register | lookup}_collection() correspondingly.
>
> This is just a trivial cleanup (just merge `collector' into `frontend').
>
> We can update them in the following patches. Also I'd like to rename
> `collection' concept as well. However, since I'm not a native English
> speaker, I didn't find a more proper name though.
>
> Thanks,
> Gao Xiang
understood.
Reviewed-by: Yue Hu <huyue2@coolpad.com>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] erofs: clean up preload_compressed_pages()
2022-03-01 19:49 ` [PATCH 2/2] erofs: clean up preload_compressed_pages() Gao Xiang
@ 2022-03-03 4:09 ` Yue Hu
2022-03-11 8:20 ` Chao Yu
1 sibling, 0 replies; 8+ messages in thread
From: Yue Hu @ 2022-03-03 4:09 UTC (permalink / raw)
To: Gao Xiang; +Cc: linux-erofs, Chao Yu, LKML, huyue2, zhangwen
On Wed, 2 Mar 2022 03:49:51 +0800
Gao Xiang <hsiangkao@linux.alibaba.com> wrote:
> Rename preload_compressed_pages() as z_erofs_bind_cache()
> since we're try to prepare for adapting folios.
>
> Also, add a comment for the gfp setting. No logic changes.
>
> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
> ---
> fs/erofs/zdata.c | 18 ++++++++++--------
> 1 file changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
> index 2673fc105861..59aecf42e45c 100644
> --- a/fs/erofs/zdata.c
> +++ b/fs/erofs/zdata.c
> @@ -219,13 +219,17 @@ struct z_erofs_decompress_frontend {
> static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
> static DEFINE_MUTEX(z_pagemap_global_lock);
>
> -static void preload_compressed_pages(struct z_erofs_decompress_frontend *fe,
> - struct address_space *mc,
> - enum z_erofs_cache_alloctype type,
> - struct page **pagepool)
> +static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
> + enum z_erofs_cache_alloctype type,
> + struct page **pagepool)
> {
> + struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
> struct z_erofs_pcluster *pcl = fe->pcl;
> bool standalone = true;
> + /*
> + * optimistic allocation without direct reclaim since inplace I/O
> + * can be used if low memory otherwise.
> + */
> gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
> __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
> struct page **pages;
> @@ -703,17 +707,15 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> WRITE_ONCE(fe->pcl->compressed_pages[0], fe->map.buf.page);
> fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
> } else {
> - /* preload all compressed pages (can change mode if needed) */
> + /* bind cache first when cached decompression is preferred */
> if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
> map->m_la))
> cache_strategy = TRYALLOC;
> else
> cache_strategy = DONTALLOC;
>
> - preload_compressed_pages(fe, MNGD_MAPPING(sbi),
> - cache_strategy, pagepool);
> + z_erofs_bind_cache(fe, cache_strategy, pagepool);
> }
> -
> hitted:
> /*
> * Ensure the current partial page belongs to this submit chain rather
Reviewed-by: Yue Hu <huyue2@coolpad.com>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] erofs: get rid of `struct z_erofs_collector'
2022-03-01 19:49 [PATCH 1/2] erofs: get rid of `struct z_erofs_collector' Gao Xiang
2022-03-01 19:49 ` [PATCH 2/2] erofs: clean up preload_compressed_pages() Gao Xiang
2022-03-02 4:22 ` [PATCH 1/2] erofs: get rid of `struct z_erofs_collector' Yue Hu
@ 2022-03-11 8:07 ` Chao Yu
2 siblings, 0 replies; 8+ messages in thread
From: Chao Yu @ 2022-03-11 8:07 UTC (permalink / raw)
To: Gao Xiang, linux-erofs; +Cc: LKML
On 2022/3/2 3:49, Gao Xiang wrote:
> Avoid `struct z_erofs_collector' since there is another context
> structure called "struct z_erofs_decompress_frontend".
>
> No logic changes.
>
> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Thanks,
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] erofs: clean up preload_compressed_pages()
2022-03-01 19:49 ` [PATCH 2/2] erofs: clean up preload_compressed_pages() Gao Xiang
2022-03-03 4:09 ` Yue Hu
@ 2022-03-11 8:20 ` Chao Yu
1 sibling, 0 replies; 8+ messages in thread
From: Chao Yu @ 2022-03-11 8:20 UTC (permalink / raw)
To: Gao Xiang, linux-erofs; +Cc: LKML
On 2022/3/2 3:49, Gao Xiang wrote:
> Rename preload_compressed_pages() as z_erofs_bind_cache()
> since we're try to prepare for adapting folios.
>
> Also, add a comment for the gfp setting. No logic changes.
>
> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Thanks,
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2022-03-11 8:20 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-03-01 19:49 [PATCH 1/2] erofs: get rid of `struct z_erofs_collector' Gao Xiang
2022-03-01 19:49 ` [PATCH 2/2] erofs: clean up preload_compressed_pages() Gao Xiang
2022-03-03 4:09 ` Yue Hu
2022-03-11 8:20 ` Chao Yu
2022-03-02 4:22 ` [PATCH 1/2] erofs: get rid of `struct z_erofs_collector' Yue Hu
2022-03-02 5:06 ` Gao Xiang
2022-03-02 5:10 ` Yue Hu
2022-03-11 8:07 ` Chao Yu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox