From: clameter@sgi.com
To: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: Christoph Hellwig <hch@lst.de>, Mel Gorman <mel@skynet.ie>
Cc: William Lee Irwin III <wli@holomorphy.com>, David Chinner <dgc@sgi.com>
Cc: Jens Axboe <jens.axboe@oracle.com>, Badari Pulavarty <pbadari@gmail.com>
Cc: Maxim Levitsky <maximlevitsky@gmail.com>
Subject: [15/37] Use page_cache_xxx functions in fs/ext2
Date: Wed, 20 Jun 2007 11:29:22 -0700 [thread overview]
Message-ID: <20070620183009.414966558@sgi.com> (raw)
In-Reply-To: 20070620182907.506775016@sgi.com
[-- Attachment #1: vps_fs_ext2 --]
[-- Type: text/plain, Size: 5655 bytes --]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
fs/ext2/dir.c | 40 +++++++++++++++++++++++-----------------
1 file changed, 23 insertions(+), 17 deletions(-)
Index: linux-2.6.22-rc4-mm2/fs/ext2/dir.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/fs/ext2/dir.c 2007-06-15 17:35:32.000000000 -0700
+++ linux-2.6.22-rc4-mm2/fs/ext2/dir.c 2007-06-18 18:57:34.000000000 -0700
@@ -45,7 +45,8 @@ static inline void ext2_put_page(struct
static inline unsigned long dir_pages(struct inode *inode)
{
- return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
+ return (inode->i_size+page_cache_size(inode->i_mapping)-1)>>
+ page_cache_shift(inode->i_mapping);
}
/*
@@ -56,10 +57,11 @@ static unsigned
ext2_last_byte(struct inode *inode, unsigned long page_nr)
{
unsigned last_byte = inode->i_size;
+ struct address_space *mapping = inode->i_mapping;
- last_byte -= page_nr << PAGE_CACHE_SHIFT;
- if (last_byte > PAGE_CACHE_SIZE)
- last_byte = PAGE_CACHE_SIZE;
+ last_byte -= page_nr << page_cache_shift(mapping);
+ if (last_byte > page_cache_size(mapping))
+ last_byte = page_cache_size(mapping);
return last_byte;
}
@@ -88,18 +90,19 @@ static int ext2_commit_chunk(struct page
static void ext2_check_page(struct page *page)
{
- struct inode *dir = page->mapping->host;
+ struct address_space *mapping = page->mapping;
+ struct inode *dir = mapping->host;
struct super_block *sb = dir->i_sb;
unsigned chunk_size = ext2_chunk_size(dir);
char *kaddr = page_address(page);
u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
unsigned offs, rec_len;
- unsigned limit = PAGE_CACHE_SIZE;
+ unsigned limit = page_cache_size(mapping);
ext2_dirent *p;
char *error;
- if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
- limit = dir->i_size & ~PAGE_CACHE_MASK;
+ if (page_cache_index(mapping, dir->i_size) == page->index) {
+ limit = page_cache_offset(mapping, dir->i_size);
if (limit & (chunk_size - 1))
goto Ebadsize;
if (!limit)
@@ -151,7 +154,7 @@ Einumber:
bad_entry:
ext2_error (sb, "ext2_check_page", "bad entry in directory #%lu: %s - "
"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
- dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ dir->i_ino, error, page_cache_pos(mapping, page->index, offs),
(unsigned long) le32_to_cpu(p->inode),
rec_len, p->name_len);
goto fail;
@@ -160,7 +163,7 @@ Eend:
ext2_error (sb, "ext2_check_page",
"entry in directory #%lu spans the page boundary"
"offset=%lu, inode=%lu",
- dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ dir->i_ino, page_cache_pos(mapping, page->index, offs),
(unsigned long) le32_to_cpu(p->inode));
fail:
SetPageChecked(page);
@@ -258,8 +261,9 @@ ext2_readdir (struct file * filp, void *
loff_t pos = filp->f_pos;
struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block *sb = inode->i_sb;
- unsigned int offset = pos & ~PAGE_CACHE_MASK;
- unsigned long n = pos >> PAGE_CACHE_SHIFT;
+ struct address_space *mapping = inode->i_mapping;
+ unsigned int offset = page_cache_offset(mapping, pos);
+ unsigned long n = page_cache_index(mapping, pos);
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
unsigned char *types = NULL;
@@ -280,14 +284,14 @@ ext2_readdir (struct file * filp, void *
ext2_error(sb, __FUNCTION__,
"bad page in #%lu",
inode->i_ino);
- filp->f_pos += PAGE_CACHE_SIZE - offset;
+ filp->f_pos += page_cache_size(mapping) - offset;
return -EIO;
}
kaddr = page_address(page);
if (unlikely(need_revalidate)) {
if (offset) {
offset = ext2_validate_entry(kaddr, offset, chunk_mask);
- filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
+ filp->f_pos = page_cache_pos(mapping, n, offset);
}
filp->f_version = inode->i_version;
need_revalidate = 0;
@@ -310,7 +314,7 @@ ext2_readdir (struct file * filp, void *
offset = (char *)de - kaddr;
over = filldir(dirent, de->name, de->name_len,
- (n<<PAGE_CACHE_SHIFT) | offset,
+ page_cache_pos(mapping, n, offset),
le32_to_cpu(de->inode), d_type);
if (over) {
ext2_put_page(page);
@@ -336,6 +340,7 @@ struct ext2_dir_entry_2 * ext2_find_entr
struct dentry *dentry, struct page ** res_page)
{
const char *name = dentry->d_name.name;
+ struct address_space *mapping = dir->i_mapping;
int namelen = dentry->d_name.len;
unsigned reclen = EXT2_DIR_REC_LEN(namelen);
unsigned long start, n;
@@ -377,7 +382,7 @@ struct ext2_dir_entry_2 * ext2_find_entr
if (++n >= npages)
n = 0;
/* next page is past the blocks we've got */
- if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
+ if (unlikely(n > (dir->i_blocks >> (page_cache_shift(mapping) - 9)))) {
ext2_error(dir->i_sb, __FUNCTION__,
"dir %lu size %lld exceeds block count %llu",
dir->i_ino, dir->i_size,
@@ -448,6 +453,7 @@ void ext2_set_link(struct inode *dir, st
int ext2_add_link (struct dentry *dentry, struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
+ struct address_space *mapping = inode->i_mapping;
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned chunk_size = ext2_chunk_size(dir);
@@ -477,7 +483,7 @@ int ext2_add_link (struct dentry *dentry
kaddr = page_address(page);
dir_end = kaddr + ext2_last_byte(dir, n);
de = (ext2_dirent *)kaddr;
- kaddr += PAGE_CACHE_SIZE - reclen;
+ kaddr += page_cache_size(mapping) - reclen;
while ((char *)de <= kaddr) {
if ((char *)de == dir_end) {
/* We hit i_size */
--
next prev parent reply other threads:[~2007-06-20 18:30 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-06-20 18:29 [00/37] Large Blocksize Support V4 clameter
2007-06-20 18:29 ` [01/37] Define functions for page cache handling clameter
2007-06-20 18:29 ` [02/37] Pagecache zeroing: zero_user_segment, zero_user_segments and zero_user clameter
2007-06-20 18:29 ` [03/37] Use page_cache_xxx function in mm/filemap.c clameter
2007-06-20 18:29 ` [04/37] Use page_cache_xxx in mm/page-writeback.c clameter
2007-06-20 18:29 ` [05/37] Use page_cache_xxx in mm/truncate.c clameter
2007-06-20 18:29 ` [06/37] Use page_cache_xxx in mm/rmap.c clameter
2007-06-20 18:29 ` [07/37] Use page_cache_xxx in mm/filemap_xip.c clameter
2007-06-20 18:29 ` [08/37] Use page_cache_xxx in mm/migrate.c clameter
2007-06-20 18:29 ` [09/37] Use page_cache_xxx in fs/libfs.c clameter
2007-06-20 18:29 ` [10/37] Use page_cache_xxx in fs/sync clameter
2007-06-20 18:29 ` [11/37] Use page_cache_xxx in fs/buffer.c clameter
2007-06-20 18:29 ` [12/37] Use page_cache_xxx in mm/mpage.c clameter
2007-06-20 18:29 ` [13/37] Use page_cache_xxx in mm/fadvise.c clameter
2007-06-20 18:29 ` [14/37] Use page_cache_xxx in fs/splice.c clameter
2007-06-20 18:29 ` clameter [this message]
2007-06-20 18:29 ` [16/37] Use page_cache_xxx in fs/ext3 clameter
2007-06-20 18:29 ` [17/37] Use page_cache_xxx in fs/ext4 clameter
2007-06-20 18:29 ` [18/37] Use page_cache_xxx in fs/reiserfs clameter
2007-06-20 18:29 ` [19/37] Use page_cache_xxx for fs/xfs clameter
2007-06-20 18:29 ` [20/37] Fix PAGE SIZE assumption in miscellaneous places clameter
2007-06-20 18:29 ` [21/37] Use page_cache_xxx in drivers/block/loop.c clameter
2007-06-20 18:29 ` [22/37] Use page_cache_xxx in drivers/block/rd.c clameter
2007-06-20 18:29 ` [23/37] compound pages: PageHead/PageTail instead of PageCompound clameter
2007-06-20 18:29 ` [24/37] compound pages: Add new support functions clameter
2007-06-20 18:29 ` [25/37] compound pages: vmstat support clameter
2007-06-20 18:29 ` [26/37] compound pages: Use new compound vmstat functions in SLUB clameter
2007-06-20 18:29 ` [27/37] compound pages: Allow use of get_page_unless_zero with compound pages clameter
2007-06-20 18:29 ` [28/37] compound pages: Allow freeing of compound pages via pagevec clameter
2007-06-20 18:29 ` [29/37] Large blocksize support: Fix up reclaim counters clameter
2007-06-20 18:29 ` [30/37] Add VM_BUG_ONs to check for correct page order clameter
2007-06-20 18:29 ` [31/37] Large blocksize support: Core piece clameter
2007-06-21 0:20 ` Bob Picco
2007-06-21 5:26 ` Christoph Lameter
2007-06-20 18:29 ` [32/37] Readahead changes to support large blocksize clameter
2007-06-20 18:29 ` [33/37] Large blocksize: Compound page zeroing and flushing clameter
2007-06-20 18:29 ` [34/37] Large blocksize support in ramfs clameter
2007-06-20 20:50 ` Andreas Dilger
2007-06-20 21:29 ` Christoph Lameter
2007-06-20 18:29 ` [35/37] Large blocksize support in XFS clameter
2007-06-20 18:29 ` [36/37] Large blocksize support for ext2 clameter
2007-06-20 20:56 ` Andreas Dilger
2007-06-20 21:27 ` Christoph Lameter
2007-06-20 22:19 ` Andreas Dilger
2007-06-20 18:29 ` [37/37] Reiserfs: Fix up for mapping_set_gfp_mask clameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20070620183009.414966558@sgi.com \
--to=clameter@sgi.com \
--cc=hch@lst.de \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mel@skynet.ie \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).