* [PATCH RFC] f2fs: fix to use spinlock to avoid page.private update race
@ 2023-04-10 9:39 Chao Yu
2023-04-10 23:25 ` Jaegeuk Kim
0 siblings, 1 reply; 2+ messages in thread
From: Chao Yu @ 2023-04-10 9:39 UTC (permalink / raw)
To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu
There may be subtle race condition, make PagePrivate and page_private
being inconsistent, result in decreasing page count incorrectly,
introduce a per-inode spinlock to avoid such condition.
Signed-off-by: Chao Yu <chao@kernel.org>
---
fs/f2fs/f2fs.h | 19 ++++++++++++++++++-
fs/f2fs/super.c | 2 ++
2 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c378aedcadea..6b31bef5853e 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -856,6 +856,8 @@ struct f2fs_inode_info {
unsigned int atomic_write_cnt;
loff_t original_i_size; /* original i_size before atomic write */
+
+ spinlock_t private_lock; /* protect page->private */
};
static inline void get_read_extent_info(struct extent_info *ext,
@@ -1413,21 +1415,28 @@ static inline bool page_private_##name(struct page *page) \
test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
}
+static inline struct f2fs_inode_info *F2FS_I(struct inode *inode);
#define PAGE_PRIVATE_SET_FUNC(name, flagname) \
static inline void set_page_private_##name(struct page *page) \
{ \
+ unsigned long flags; \
+ spin_lock_irqsave(&F2FS_I(page->mapping->host)->private_lock, flags); \
if (!PagePrivate(page)) \
attach_page_private(page, (void *)page->private); \
set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
+ spin_unlock_irqrestore(&F2FS_I(page->mapping->host)->private_lock, flags); \
}
#define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
static inline void clear_page_private_##name(struct page *page) \
{ \
+ unsigned long flags; \
+ spin_lock_irqsave(&F2FS_I(page->mapping->host)->private_lock, flags); \
clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
- if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \
+ if (page_private(page) == (BIT(PAGE_PRIVATE_NOT_POINTER))) \
detach_page_private(page); \
+ spin_unlock_irqrestore(&F2FS_I(page->mapping->host)->private_lock, flags); \
}
PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
@@ -1456,17 +1465,25 @@ static inline unsigned long get_page_private_data(struct page *page)
static inline void set_page_private_data(struct page *page, unsigned long data)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&F2FS_I(page->mapping->host)->private_lock, flags);
if (!PagePrivate(page))
attach_page_private(page, 0);
set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
page_private(page) |= data << PAGE_PRIVATE_MAX;
+ spin_unlock_irqrestore(&F2FS_I(page->mapping->host)->private_lock, flags);
}
static inline void clear_page_private_data(struct page *page)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&F2FS_I(page->mapping->host)->private_lock, flags);
page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0);
if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER))
detach_page_private(page);
+ spin_unlock_irqrestore(&F2FS_I(page->mapping->host)->private_lock, flags);
}
/* For compression */
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index a1b570a5e50f..555424dd85fd 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1419,6 +1419,8 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
init_f2fs_rwsem(&fi->i_xattr_sem);
+ spin_lock_init(&fi->private_lock);
+
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
--
2.25.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH RFC] f2fs: fix to use spinlock to avoid page.private update race
2023-04-10 9:39 [PATCH RFC] f2fs: fix to use spinlock to avoid page.private update race Chao Yu
@ 2023-04-10 23:25 ` Jaegeuk Kim
0 siblings, 0 replies; 2+ messages in thread
From: Jaegeuk Kim @ 2023-04-10 23:25 UTC (permalink / raw)
To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel
On 04/10, Chao Yu wrote:
> There may be subtle race condition, make PagePrivate and page_private
> being inconsistent, result in decreasing page count incorrectly,
> introduce a per-inode spinlock to avoid such condition.
No...what have you found? The set/clear.. were supposed to be done in page_lock,
and checking the flag should not corrupt any memory.
>
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
> fs/f2fs/f2fs.h | 19 ++++++++++++++++++-
> fs/f2fs/super.c | 2 ++
> 2 files changed, 20 insertions(+), 1 deletion(-)
>
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index c378aedcadea..6b31bef5853e 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -856,6 +856,8 @@ struct f2fs_inode_info {
>
> unsigned int atomic_write_cnt;
> loff_t original_i_size; /* original i_size before atomic write */
> +
> + spinlock_t private_lock; /* protect page->private */
> };
>
> static inline void get_read_extent_info(struct extent_info *ext,
> @@ -1413,21 +1415,28 @@ static inline bool page_private_##name(struct page *page) \
> test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
> }
>
> +static inline struct f2fs_inode_info *F2FS_I(struct inode *inode);
> #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
> static inline void set_page_private_##name(struct page *page) \
> { \
> + unsigned long flags; \
> + spin_lock_irqsave(&F2FS_I(page->mapping->host)->private_lock, flags); \
> if (!PagePrivate(page)) \
> attach_page_private(page, (void *)page->private); \
> set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
> set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
> + spin_unlock_irqrestore(&F2FS_I(page->mapping->host)->private_lock, flags); \
> }
>
> #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
> static inline void clear_page_private_##name(struct page *page) \
> { \
> + unsigned long flags; \
> + spin_lock_irqsave(&F2FS_I(page->mapping->host)->private_lock, flags); \
> clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
> - if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \
> + if (page_private(page) == (BIT(PAGE_PRIVATE_NOT_POINTER))) \
> detach_page_private(page); \
> + spin_unlock_irqrestore(&F2FS_I(page->mapping->host)->private_lock, flags); \
> }
>
> PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
> @@ -1456,17 +1465,25 @@ static inline unsigned long get_page_private_data(struct page *page)
>
> static inline void set_page_private_data(struct page *page, unsigned long data)
> {
> + unsigned long flags;
> +
> + spin_lock_irqsave(&F2FS_I(page->mapping->host)->private_lock, flags);
> if (!PagePrivate(page))
> attach_page_private(page, 0);
> set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
> page_private(page) |= data << PAGE_PRIVATE_MAX;
> + spin_unlock_irqrestore(&F2FS_I(page->mapping->host)->private_lock, flags);
> }
>
> static inline void clear_page_private_data(struct page *page)
> {
> + unsigned long flags;
> +
> + spin_lock_irqsave(&F2FS_I(page->mapping->host)->private_lock, flags);
> page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0);
> if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER))
> detach_page_private(page);
> + spin_unlock_irqrestore(&F2FS_I(page->mapping->host)->private_lock, flags);
> }
>
> /* For compression */
> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> index a1b570a5e50f..555424dd85fd 100644
> --- a/fs/f2fs/super.c
> +++ b/fs/f2fs/super.c
> @@ -1419,6 +1419,8 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
> init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
> init_f2fs_rwsem(&fi->i_xattr_sem);
>
> + spin_lock_init(&fi->private_lock);
> +
> /* Will be used by directory only */
> fi->i_dir_level = F2FS_SB(sb)->dir_level;
>
> --
> 2.25.1
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2023-04-10 23:26 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-04-10 9:39 [PATCH RFC] f2fs: fix to use spinlock to avoid page.private update race Chao Yu
2023-04-10 23:25 ` Jaegeuk Kim
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox