From: Amerigo Wang <amwang@redhat.com>
To: linux-kernel@vger.kernel.org
Cc: akpm@linux-foundation.org, WANG Cong <amwang@redhat.com>,
Hugh Dickins <hughd@google.com>,
linux-mm@kvack.org
Subject: [Patch] tmpfs: add fallocate support
Date: Tue, 15 Nov 2011 16:42:05 +0800 [thread overview]
Message-ID: <1321346525-10187-1-git-send-email-amwang@redhat.com> (raw)
This patch adds fallocate support to tmpfs. I tested this patch
with the following test case,
% sudo mount -t tmpfs -o size=100 tmpfs /mnt
% touch /mnt/foobar
% echo hi > /mnt/foobar
% fallocate -o 3 -l 5000 /mnt/foobar
fallocate: /mnt/foobar: fallocate failed: No space left on device
% fallocate -o 3 -l 3000 /mnt/foobar
% ls -l /mnt/foobar
-rw-rw-r-- 1 wangcong wangcong 3003 Nov 15 16:10 /mnt/foobar
% dd if=/dev/zero of=/mnt/foobar seek=3 bs=1 count=3000
3000+0 records in
3000+0 records out
3000 bytes (3.0 kB) copied, 0.0153224 s, 196 kB/s
% hexdump -C /mnt/foobar
00000000 68 69 0a 00 00 00 00 00 00 00 00 00 00 00 00 00 |hi..............|
00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
*
00000bb0 00 00 00 00 00 00 00 00 00 00 00 |...........|
00000bbb
% cat /mnt/foobar
hi
Signed-off-by: WANG Cong <amwang@redhat.com>
---
diff --git a/mm/shmem.c b/mm/shmem.c
index d672250..438b7b8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -30,6 +30,7 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/swap.h>
+#include <linux/falloc.h>
static struct vfsmount *shm_mnt;
@@ -1431,6 +1432,102 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
return error;
}
+static long shmem_fallocate(struct file *file, int mode,
+ loff_t offset, loff_t len)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct address_space *mapping = inode->i_mapping;
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ pgoff_t start = DIV_ROUND_UP(offset, PAGE_CACHE_SIZE);
+ pgoff_t end = DIV_ROUND_UP((offset + len), PAGE_CACHE_SIZE);
+ pgoff_t index = start;
+ gfp_t gfp = mapping_gfp_mask(mapping);
+ loff_t i_size = i_size_read(inode);
+ struct page *page = NULL;
+ int ret;
+
+ if ((offset + len) <= i_size)
+ return 0;
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE)) {
+ ret = inode_newsize_ok(inode, (offset + len));
+ if (ret)
+ return ret;
+ }
+
+ if (start == end) {
+ if (!(mode & FALLOC_FL_KEEP_SIZE))
+ i_size_write(inode, offset + len);
+ return 0;
+ }
+
+ if (shmem_acct_block(info->flags))
+ return -ENOSPC;
+
+ if (sbinfo->max_blocks) {
+ unsigned long blocks = (end - index) * BLOCKS_PER_PAGE;
+ if (blocks + percpu_counter_sum(&sbinfo->used_blocks)
+ >= sbinfo->max_blocks) {
+ ret = -ENOSPC;
+ goto unacct;
+ }
+ }
+
+ while (index < end) {
+ if (sbinfo->max_blocks)
+ percpu_counter_add(&sbinfo->used_blocks, BLOCKS_PER_PAGE);
+
+ page = shmem_alloc_page(gfp, info, index);
+ if (!page) {
+ ret = -ENOMEM;
+ goto decused;
+ }
+
+ SetPageSwapBacked(page);
+ __set_page_locked(page);
+ ret = mem_cgroup_cache_charge(page, current->mm,
+ gfp & GFP_RECLAIM_MASK);
+ if (!ret)
+ ret = shmem_add_to_page_cache(page, mapping, index,
+ gfp, NULL);
+ if (ret)
+ goto unlock;
+ lru_cache_add_anon(page);
+
+ spin_lock(&info->lock);
+ info->alloced++;
+ inode->i_blocks += BLOCKS_PER_PAGE;
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+ shmem_recalc_inode(inode);
+ spin_unlock(&info->lock);
+
+ clear_highpage(page);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ page_cache_release(page);
+ cond_resched();
+ index++;
+ if (!(mode & FALLOC_FL_KEEP_SIZE))
+ i_size_write(inode, index << PAGE_CACHE_SHIFT);
+ }
+
+ goto unacct;
+
+unlock:
+ if (page) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+decused:
+ if (sbinfo->max_blocks)
+ percpu_counter_sub(&sbinfo->used_blocks, BLOCKS_PER_PAGE);
+unacct:
+ shmem_unacct_blocks(info->flags, 1);
+ return ret;
+}
+
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -2286,6 +2383,7 @@ static const struct file_operations shmem_file_operations = {
.fsync = noop_fsync,
.splice_read = shmem_file_splice_read,
.splice_write = generic_file_splice_write,
+ .fallocate = shmem_fallocate,
#endif
};
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next reply other threads:[~2011-11-15 8:42 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-11-15 8:42 Amerigo Wang [this message]
2011-11-15 10:23 ` [Patch] tmpfs: add fallocate support Cong Wang
2011-11-15 17:43 ` Dave Hansen
2011-11-16 7:21 ` Cong Wang
2011-11-16 23:21 ` Dave Hansen
2011-11-15 11:23 ` Pekka Enberg
2011-11-16 7:09 ` Cong Wang
2011-11-16 7:12 ` Pekka Enberg
2011-11-16 7:16 ` Cong Wang
2011-11-17 1:31 ` Hugh Dickins
2011-11-18 10:27 ` Cong Wang
2011-11-16 1:18 ` KAMEZAWA Hiroyuki
2011-11-16 7:12 ` Cong Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1321346525-10187-1-git-send-email-amwang@redhat.com \
--to=amwang@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=hughd@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).