* [V4 PATCH 1/2] tmpfs: add fallocate support
@ 2011-11-29 5:33 Cong Wang
2011-11-29 5:33 ` [V2 PATCH 2/2] fs: wire up .truncate_range and .fallocate Cong Wang
2011-11-29 6:02 ` [V4 PATCH 1/2] tmpfs: add fallocate support KAMEZAWA Hiroyuki
0 siblings, 2 replies; 5+ messages in thread
From: Cong Wang @ 2011-11-29 5:33 UTC (permalink / raw)
To: linux-kernel
Cc: akpm, Pekka Enberg, Christoph Hellwig, Hugh Dickins, Dave Hansen,
Lennart Poettering, Kay Sievers, KOSAKI Motohiro, WANG Cong,
linux-mm
Systemd needs tmpfs to support fallocate [1], to be able
to safely use mmap(), regarding SIGBUS, on files on the
/dev/shm filesystem. The glibc fallback loop for -ENOSYS
on fallocate is just ugly.
This patch adds fallocate support to tmpfs, and as we
already have shmem_truncate_range(), it is also easy to
add FALLOC_FL_PUNCH_HOLE support too.
1. http://lkml.org/lkml/2011/10/20/275
V3->V4:
Handle 'undo' ENOSPC more correctly.
V2->V3:
a) Read i_size directly after holding i_mutex;
b) Call page_cache_release() too after shmem_getpage();
c) Undo previous changes when -ENOSPC.
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lennart Poettering <lennart@poettering.net>
Cc: Kay Sievers <kay.sievers@vrfy.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: WANG Cong <amwang@redhat.com>
---
mm/shmem.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 90 insertions(+), 0 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index d672250..90c835b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -30,6 +30,7 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/swap.h>
+#include <linux/falloc.h>
static struct vfsmount *shm_mnt;
@@ -1016,6 +1017,35 @@ failed:
return error;
}
+static void shmem_putpage_noswap(struct inode *inode, pgoff_t index)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct shmem_inode_info *info;
+ struct shmem_sb_info *sbinfo;
+ struct page *page;
+
+ page = find_lock_page(mapping, index);
+
+ if (page) {
+ info = SHMEM_I(inode);
+ sbinfo = SHMEM_SB(inode->i_sb);
+ shmem_acct_block(info->flags);
+ if (PageDirty(page)) {
+ ClearPageDirty(page);
+ delete_from_page_cache(page);
+ spin_lock(&info->lock);
+ info->alloced--;
+ inode->i_blocks -= BLOCKS_PER_PAGE;
+ spin_unlock(&info->lock);
+ }
+ if (sbinfo->max_blocks)
+ percpu_counter_add(&sbinfo->used_blocks, -1);
+ shmem_unacct_blocks(info->flags, 1);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+}
+
static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
@@ -1431,6 +1461,65 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
return error;
}
+static long shmem_fallocate(struct file *file, int mode,
+ loff_t offset, loff_t len)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ pgoff_t start = offset >> PAGE_CACHE_SHIFT;
+ pgoff_t end = DIV_ROUND_UP((offset + len), PAGE_CACHE_SIZE);
+ pgoff_t index = start;
+ loff_t i_size;
+ struct page *page = NULL;
+ int ret = 0;
+
+ if (IS_SWAPFILE(inode))
+ return -ETXTBSY;
+
+ mutex_lock(&inode->i_mutex);
+ i_size = inode->i_size;
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ if (!(offset > i_size || (end << PAGE_CACHE_SHIFT) > i_size))
+ shmem_truncate_range(inode, offset,
+ (end << PAGE_CACHE_SHIFT) - 1);
+ goto unlock;
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE)) {
+ ret = inode_newsize_ok(inode, (offset + len));
+ if (ret)
+ goto unlock;
+ }
+
+ while (index < end) {
+ ret = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
+ if (ret) {
+ if (ret == -ENOSPC)
+ goto undo;
+ else
+ goto unlock;
+ }
+ if (page) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ index++;
+ }
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && (index << PAGE_CACHE_SHIFT) > i_size)
+ i_size_write(inode, index << PAGE_CACHE_SHIFT);
+
+ goto unlock;
+
+undo:
+ while (index > start) {
+ shmem_putpage_noswap(inode, index);
+ index--;
+ }
+
+unlock:
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+}
+
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -2286,6 +2375,7 @@ static const struct file_operations shmem_file_operations = {
.fsync = noop_fsync,
.splice_read = shmem_file_splice_read,
.splice_write = generic_file_splice_write,
+ .fallocate = shmem_fallocate,
#endif
};
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [V2 PATCH 2/2] fs: wire up .truncate_range and .fallocate
2011-11-29 5:33 [V4 PATCH 1/2] tmpfs: add fallocate support Cong Wang
@ 2011-11-29 5:33 ` Cong Wang
2011-11-29 6:02 ` [V4 PATCH 1/2] tmpfs: add fallocate support KAMEZAWA Hiroyuki
1 sibling, 0 replies; 5+ messages in thread
From: Cong Wang @ 2011-11-29 5:33 UTC (permalink / raw)
To: linux-kernel
Cc: akpm, Hugh Dickins, Christoph Hellwig, Al Viro, WANG Cong,
Matthew Wilcox, Andrea Arcangeli, Rik van Riel, Mel Gorman,
Minchan Kim, Johannes Weiner, linux-fsdevel, linux-mm
V1->V2:
Move tmpfs stuff into shmem_fallocate(), suggested by Christoph.
As Hugh suggested, with FALLOC_FL_PUNCH_HOLE, we can use do_fallocate()
to implement madvise_remove and finally remove .truncate_range call back.
Cc: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: WANG Cong <amwang@redhat.com>
---
include/linux/fs.h | 1 -
include/linux/mm.h | 2 +-
mm/madvise.c | 6 +++---
mm/shmem.c | 12 +++++++++++-
mm/truncate.c | 22 +++++-----------------
5 files changed, 20 insertions(+), 23 deletions(-)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e313022..266df73 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1635,7 +1635,6 @@ struct inode_operations {
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
- void (*truncate_range)(struct inode *, loff_t, loff_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
} ____cacheline_aligned;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3dc3a8c..0582ce8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -951,7 +951,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
extern int vmtruncate(struct inode *inode, loff_t offset);
-extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
+extern int vmtruncate_file_range(struct file *file, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
diff --git a/mm/madvise.c b/mm/madvise.c
index 74bf193..3a281b7 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -194,7 +194,7 @@ static long madvise_remove(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
- struct address_space *mapping;
+ struct file *file;
loff_t offset, endoff;
int error;
@@ -211,7 +211,7 @@ static long madvise_remove(struct vm_area_struct *vma,
if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
return -EACCES;
- mapping = vma->vm_file->f_mapping;
+ file = vma->vm_file;
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
@@ -220,7 +220,7 @@ static long madvise_remove(struct vm_area_struct *vma,
/* vmtruncate_range needs to take i_mutex */
up_read(¤t->mm->mmap_sem);
- error = vmtruncate_range(mapping->host, offset, endoff);
+ error = vmtruncate_file_range(file, offset, endoff);
down_read(¤t->mm->mmap_sem);
return error;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 90c835b..b435da8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1465,6 +1465,7 @@ static long shmem_fallocate(struct file *file, int mode,
loff_t offset, loff_t len)
{
struct inode *inode = file->f_path.dentry->d_inode;
+ struct address_space *mapping = file->f_mapping;
pgoff_t start = offset >> PAGE_CACHE_SHIFT;
pgoff_t end = DIV_ROUND_UP((offset + len), PAGE_CACHE_SIZE);
pgoff_t index = start;
@@ -1476,6 +1477,12 @@ static long shmem_fallocate(struct file *file, int mode,
return -ETXTBSY;
mutex_lock(&inode->i_mutex);
+
+ if (mapping) {
+ inode_dio_wait(mapping->host);
+ unmap_mapping_range(mapping, offset, len, 1);
+ }
+
i_size = inode->i_size;
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (!(offset > i_size || (end << PAGE_CACHE_SHIFT) > i_size))
@@ -1507,6 +1514,10 @@ static long shmem_fallocate(struct file *file, int mode,
if (!(mode & FALLOC_FL_KEEP_SIZE) && (index << PAGE_CACHE_SHIFT) > i_size)
i_size_write(inode, index << PAGE_CACHE_SHIFT);
+ /* unmap again to remove racily COWed private pages */
+ if (mapping)
+ unmap_mapping_range(mapping, offset, len, 1);
+
goto unlock;
undo:
@@ -2381,7 +2392,6 @@ static const struct file_operations shmem_file_operations = {
static const struct inode_operations shmem_inode_operations = {
.setattr = shmem_setattr,
- .truncate_range = shmem_truncate_range,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
diff --git a/mm/truncate.c b/mm/truncate.c
index 632b15e..5a7ddda 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -20,6 +20,7 @@
#include <linux/buffer_head.h> /* grr. try_to_release_page,
do_invalidatepage */
#include <linux/cleancache.h>
+#include <linux/falloc.h>
#include "internal.h"
@@ -602,27 +603,14 @@ int vmtruncate(struct inode *inode, loff_t newsize)
}
EXPORT_SYMBOL(vmtruncate);
-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+int vmtruncate_file_range(struct file *file, loff_t lstart, loff_t lend)
{
- struct address_space *mapping = inode->i_mapping;
loff_t holebegin = round_up(lstart, PAGE_SIZE);
loff_t holelen = 1 + lend - holebegin;
- /*
- * If the underlying filesystem is not going to provide
- * a way to truncate a range of blocks (punch a hole) -
- * we should return failure right now.
- */
- if (!inode->i_op->truncate_range)
+ if (!file->f_op->fallocate)
return -ENOSYS;
- mutex_lock(&inode->i_mutex);
- inode_dio_wait(inode);
- unmap_mapping_range(mapping, holebegin, holelen, 1);
- inode->i_op->truncate_range(inode, lstart, lend);
- /* unmap again to remove racily COWed private pages */
- unmap_mapping_range(mapping, holebegin, holelen, 1);
- mutex_unlock(&inode->i_mutex);
-
- return 0;
+ return do_fallocate(file, FALLOC_FL_KEEP_SIZE|FALLOC_FL_PUNCH_HOLE,
+ holebegin, holelen);
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [V4 PATCH 1/2] tmpfs: add fallocate support
2011-11-29 5:33 [V4 PATCH 1/2] tmpfs: add fallocate support Cong Wang
2011-11-29 5:33 ` [V2 PATCH 2/2] fs: wire up .truncate_range and .fallocate Cong Wang
@ 2011-11-29 6:02 ` KAMEZAWA Hiroyuki
2011-11-29 7:23 ` Cong Wang
1 sibling, 1 reply; 5+ messages in thread
From: KAMEZAWA Hiroyuki @ 2011-11-29 6:02 UTC (permalink / raw)
To: Cong Wang
Cc: linux-kernel, akpm, Pekka Enberg, Christoph Hellwig, Hugh Dickins,
Dave Hansen, Lennart Poettering, Kay Sievers, KOSAKI Motohiro,
linux-mm
On Tue, 29 Nov 2011 13:33:12 +0800
Cong Wang <amwang@redhat.com> wrote:
> Systemd needs tmpfs to support fallocate [1], to be able
> to safely use mmap(), regarding SIGBUS, on files on the
> /dev/shm filesystem. The glibc fallback loop for -ENOSYS
> on fallocate is just ugly.
>
> This patch adds fallocate support to tmpfs, and as we
> already have shmem_truncate_range(), it is also easy to
> add FALLOC_FL_PUNCH_HOLE support too.
>
> 1. http://lkml.org/lkml/2011/10/20/275
>
one question.
> V3->V4:
> Handle 'undo' ENOSPC more correctly.
>
> V2->V3:
> a) Read i_size directly after holding i_mutex;
> b) Call page_cache_release() too after shmem_getpage();
> c) Undo previous changes when -ENOSPC.
>
> Cc: Pekka Enberg <penberg@kernel.org>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Hugh Dickins <hughd@google.com>
> Cc: Dave Hansen <dave@linux.vnet.ibm.com>
> Cc: Lennart Poettering <lennart@poettering.net>
> Cc: Kay Sievers <kay.sievers@vrfy.org>
> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
> Signed-off-by: WANG Cong <amwang@redhat.com>
>
> ---
> mm/shmem.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 files changed, 90 insertions(+), 0 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index d672250..90c835b 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -30,6 +30,7 @@
> #include <linux/mm.h>
> #include <linux/export.h>
> #include <linux/swap.h>
> +#include <linux/falloc.h>
>
> static struct vfsmount *shm_mnt;
>
> @@ -1016,6 +1017,35 @@ failed:
> return error;
> }
>
> +static void shmem_putpage_noswap(struct inode *inode, pgoff_t index)
> +{
> + struct address_space *mapping = inode->i_mapping;
> + struct shmem_inode_info *info;
> + struct shmem_sb_info *sbinfo;
> + struct page *page;
> +
> + page = find_lock_page(mapping, index);
> +
You can't know whether the 'page' is allocated by alloc_page() in fallocate()
or just found as exiting one.
Then, yourwill corrupt existing pages in error path.
Is it allowed ?
Thanks,
-Kame
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [V4 PATCH 1/2] tmpfs: add fallocate support
2011-11-29 6:02 ` [V4 PATCH 1/2] tmpfs: add fallocate support KAMEZAWA Hiroyuki
@ 2011-11-29 7:23 ` Cong Wang
2011-11-29 7:57 ` KAMEZAWA Hiroyuki
0 siblings, 1 reply; 5+ messages in thread
From: Cong Wang @ 2011-11-29 7:23 UTC (permalink / raw)
To: KAMEZAWA Hiroyuki
Cc: linux-kernel, akpm, Pekka Enberg, Christoph Hellwig, Hugh Dickins,
Dave Hansen, Lennart Poettering, Kay Sievers, KOSAKI Motohiro,
linux-mm
[-- Attachment #1: Type: text/plain, Size: 774 bytes --]
ao? 2011a1'11ae??29ae?JPY 14:02, KAMEZAWA Hiroyuki a??e??:
>
> You can't know whether the 'page' is allocated by alloc_page() in fallocate()
> or just found as exiting one.
> Then, yourwill corrupt existing pages in error path.
> Is it allowed ?
>
According to the comment,
/*
* shmem_getpage_gfp - find page in cache, or get from swap, or allocate
*
* If we allocate a new one we do not mark it dirty. That's up to the
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache
*/
so we can know if the page is newly allocated by checking page dirty bit.
Or am I missing something?
But whoops, I sent a wrong version of this patch, the below one is
the correct one. Sorry for this.
[-- Attachment #2: 0001-tmpfs-add-fallocate-support.patch --]
[-- Type: text/plain, Size: 4320 bytes --]
Subject: [V4 PATCH 1/2] tmpfs: add fallocate support
Systemd needs tmpfs to support fallocate [1], to be able
to safely use mmap(), regarding SIGBUS, on files on the
/dev/shm filesystem. The glibc fallback loop for -ENOSYS
on fallocate is just ugly.
This patch adds fallocate support to tmpfs, and as we
already have shmem_truncate_range(), it is also easy to
add FALLOC_FL_PUNCH_HOLE support too.
1. http://lkml.org/lkml/2011/10/20/275
V3->V4:
Handle 'undo' ENOSPC more correctly.
V2->V3:
a) Read i_size directly after holding i_mutex;
b) Call page_cache_release() too after shmem_getpage();
c) Undo previous changes when -ENOSPC.
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lennart Poettering <lennart@poettering.net>
Cc: Kay Sievers <kay.sievers@vrfy.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: WANG Cong <amwang@redhat.com>
---
commit ca055ad343a0d629f8f1fad1df30796d2292f6a2
Author: Cong Wang <amwang@redhat.com>
Date: Wed Nov 23 13:16:26 2011 +0800
tmpfs: add fallocate support
---
mm/shmem.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 90 insertions(+), 0 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index d672250..6a6fc66 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -30,6 +30,7 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/swap.h>
+#include <linux/falloc.h>
static struct vfsmount *shm_mnt;
@@ -1016,6 +1017,35 @@ failed:
return error;
}
+static void shmem_putpage_noswap(struct inode *inode, pgoff_t index, bool new)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct shmem_inode_info *info;
+ struct shmem_sb_info *sbinfo;
+ struct page *page;
+
+ page = find_lock_page(mapping, index);
+
+ if (page) {
+ info = SHMEM_I(inode);
+ sbinfo = SHMEM_SB(inode->i_sb);
+ shmem_acct_block(info->flags);
+ if (!new && PageDirty(page)) {
+ ClearPageDirty(page);
+ delete_from_page_cache(page);
+ spin_lock(&info->lock);
+ info->alloced--;
+ inode->i_blocks -= BLOCKS_PER_PAGE;
+ spin_unlock(&info->lock);
+ }
+ if (sbinfo->max_blocks)
+ percpu_counter_add(&sbinfo->used_blocks, -1);
+ shmem_unacct_blocks(info->flags, 1);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+}
+
static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
@@ -1431,6 +1461,65 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
return error;
}
+static long shmem_fallocate(struct file *file, int mode,
+ loff_t offset, loff_t len)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ pgoff_t start = offset >> PAGE_CACHE_SHIFT;
+ pgoff_t end = DIV_ROUND_UP((offset + len), PAGE_CACHE_SIZE);
+ pgoff_t index = start;
+ loff_t i_size;
+ struct page *page = NULL;
+ int ret = 0;
+
+ if (IS_SWAPFILE(inode))
+ return -ETXTBSY;
+
+ mutex_lock(&inode->i_mutex);
+ i_size = inode->i_size;
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ if (!(offset > i_size || (end << PAGE_CACHE_SHIFT) > i_size))
+ shmem_truncate_range(inode, offset,
+ (end << PAGE_CACHE_SHIFT) - 1);
+ goto unlock;
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE)) {
+ ret = inode_newsize_ok(inode, (offset + len));
+ if (ret)
+ goto unlock;
+ }
+
+ while (index < end) {
+ ret = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
+ if (ret) {
+ if (ret == -ENOSPC)
+ goto undo;
+ else
+ goto unlock;
+ }
+ if (page) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ index++;
+ }
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && (index << PAGE_CACHE_SHIFT) > i_size)
+ i_size_write(inode, index << PAGE_CACHE_SHIFT);
+
+ goto unlock;
+
+undo:
+ while (index > start) {
+ shmem_putpage_noswap(inode, index, true);
+ index--;
+ }
+
+unlock:
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+}
+
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -2286,6 +2375,7 @@ static const struct file_operations shmem_file_operations = {
.fsync = noop_fsync,
.splice_read = shmem_file_splice_read,
.splice_write = generic_file_splice_write,
+ .fallocate = shmem_fallocate,
#endif
};
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [V4 PATCH 1/2] tmpfs: add fallocate support
2011-11-29 7:23 ` Cong Wang
@ 2011-11-29 7:57 ` KAMEZAWA Hiroyuki
0 siblings, 0 replies; 5+ messages in thread
From: KAMEZAWA Hiroyuki @ 2011-11-29 7:57 UTC (permalink / raw)
To: Cong Wang
Cc: linux-kernel, akpm, Pekka Enberg, Christoph Hellwig, Hugh Dickins,
Dave Hansen, Lennart Poettering, Kay Sievers, KOSAKI Motohiro,
linux-mm
On Tue, 29 Nov 2011 15:23:58 +0800
Cong Wang <amwang@redhat.com> wrote:
> 于 2011年11月29日 14:02, KAMEZAWA Hiroyuki 写道:
> >
> > You can't know whether the 'page' is allocated by alloc_page() in fallocate()
> > or just found as exiting one.
> > Then, yourwill corrupt existing pages in error path.
> > Is it allowed ?
> >
>
> According to the comment,
>
> /*
> * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
> *
> * If we allocate a new one we do not mark it dirty. That's up to the
> * vm. If we swap it in we mark it dirty since we also free the swap
> * entry since a page cannot live in both the swap and page cache
> */
>
> so we can know if the page is newly allocated by checking page dirty bit.
> Or am I missing something?
>
If swap-in doesn't happen and a page is found...
==
page = find_lock_page(mapping, index); <=============== you find a page
if (radix_tree_exceptional_entry(page)) {
swap = radix_to_swp_entry(page);
page = NULL;
}
if (sgp != SGP_WRITE &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
goto failed;
}
if (page || (sgp == SGP_READ && !swap.val)) {
/*
* Once we can get the page lock, it must be uptodate:
* if there were an error in reading back from swap,
* the page would not be inserted into the filecache.
*/
BUG_ON(page && !PageUptodate(page));
*pagep = page; <========================= return here.
return 0;
}
==
Page will not be marked as dirty.
Thanks,
-Kame
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2011-11-29 7:58 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-11-29 5:33 [V4 PATCH 1/2] tmpfs: add fallocate support Cong Wang
2011-11-29 5:33 ` [V2 PATCH 2/2] fs: wire up .truncate_range and .fallocate Cong Wang
2011-11-29 6:02 ` [V4 PATCH 1/2] tmpfs: add fallocate support KAMEZAWA Hiroyuki
2011-11-29 7:23 ` Cong Wang
2011-11-29 7:57 ` KAMEZAWA Hiroyuki
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).