linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Bob Copeland <me@bobcopeland.com>
To: linux-fsdevel@vger.kernel.org
Cc: Bob Copeland <me@bobcopeland.com>
Subject: [RFC][PATCH 4/7] omfs: file routines
Date: Wed, 15 Mar 2006 22:01:44 -0500	[thread overview]
Message-ID: <11424781041183-git-send-email-me@bobcopeland.com> (raw)
In-Reply-To: <11424781041688-git-send-email-me@bobcopeland.com>

File routines for OMFS.  Each file is stored as a table of extents,
containing a block number and count for each entry, followed by a
terminator that has a 1's complement of the preceeding block counts.
If more entries are needed than will fit in an inode, continuation 
blocks are added as needed.

---

 fs/omfs/file.c |  301 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 301 insertions(+), 0 deletions(-)
 create mode 100644 fs/omfs/file.c

af9af9dd0f834c570412e415d3dca319122ef04d
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
new file mode 100644
index 0000000..0e32506
--- /dev/null
+++ b/fs/omfs/file.c
@@ -0,0 +1,301 @@
+/*
+ * fs/omfs/file.c
+ * OMFS (as used by RIO Karma) file operations.
+ * Copyright (C) 2005 Bob Copeland <me@bobcopeland.com>
+ */
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include "omfs.h"
+
+void omfs_make_empty_table(struct buffer_head *bh, int offset)
+{
+	struct omfs_extent *oe = (struct omfs_extent *) &bh->b_data[offset];
+
+	memset(bh->b_data, 0, offset);
+
+	oe->e_next = ~0;
+	oe->e_extent_count = cpu_to_be32(1),
+	oe->e_fill = cpu_to_be32(0x22),
+	oe->e_entry.e_cluster = ~0;
+	oe->e_entry.e_blocks = ~0;
+}
+
+#ifdef OMFS_WRITE
+static void update_continuation(struct super_block *sb, struct buffer_head *bh)
+{
+	struct omfs_inode *oi = (struct omfs_inode *) bh->b_data;
+	omfs_update_checksums(oi, sb, be64_to_cpu(oi->i_head.h_self));
+	// copy mirrors too.
+}
+
+static int omfs_add_continuation(struct super_block *sb, u64 *ret_block, 
+		struct buffer_head **ret_bh)
+{
+	int dummy, ret;
+	struct omfs_inode *oi;
+	struct omfs_sb_info *sbi = OMFS_SB(sb);
+
+	// alloc a block
+	ret = omfs_allocate_range(sb, sbi->s_mirrors, sbi->s_mirrors, ret_block, 
+			&dummy);
+	if (ret)
+		return ret;
+
+	// read it
+	*ret_bh = sb_bread(sb, clus_to_blk(sbi, *ret_block));
+	if (!*ret_bh)
+		return -ENOMEM;
+
+	omfs_make_empty_table(*ret_bh, OMFS_EXTENT_CONT);
+
+	oi = (struct omfs_inode *) (*ret_bh)->b_data;
+
+	oi->i_head.h_self = cpu_to_be64(*ret_block); 
+	// and so forth
+	oi->i_head.h_body_size = cpu_to_be32(sbi->s_sys_blocksize - 
+		sizeof(struct omfs_header));
+	oi->i_head.h_version = 1;
+	oi->i_head.h_type = OMFS_INODE_NORMAL;
+	oi->i_head.h_magic = OMFS_IMAGIC;
+
+	return 0;
+}
+#endif
+
+
+/* 
+ * Scans across the directory table for a given file block number.  
+ * If block not found, return 0.
+ */
+static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent, 
+	 	       sector_t block, int count)
+{
+	// count>1 because of terminator 
+	sector_t searched = 0;
+	for (; count > 1; count--)
+	{
+		int numblocks = clus_to_blk(OMFS_SB(inode->i_sb), 
+			be64_to_cpu(ent->e_blocks));
+
+		if (block >= searched  && 
+		    block < searched + numblocks)
+		{
+			// found it at cluster + (block - searched)
+			return clus_to_blk(OMFS_SB(inode->i_sb), 
+				be64_to_cpu(ent->e_cluster)) + 
+				block - searched;
+		}
+		searched += numblocks;
+		ent++;
+	}
+	return 0;
+}
+
+static int omfs_get_block(struct inode *inode, sector_t block,
+			  struct buffer_head *bh_result, int create)
+{
+	struct buffer_head *bh;
+	sector_t next, offset;
+#ifdef OMFS_WRITE
+	u64 new_block;
+	int new_count;
+	int ret;
+	int is_continue = 0;
+	int max_count;
+	struct omfs_extent_entry *terminator;
+#endif
+	int extent_count;
+	struct omfs_extent *oe;
+	struct omfs_extent_entry *entry;
+	struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
+
+	bh = sb_bread(inode->i_sb, clus_to_blk(sbi, inode->i_ino));
+	if (!bh)
+		goto err;
+
+	oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
+
+	for(;;) {
+		extent_count = be32_to_cpu(oe->e_extent_count);
+		next = be64_to_cpu(oe->e_next);
+		entry = &oe->e_entry;
+
+		offset = find_block(inode, entry, block, extent_count);
+		if (offset > 0) {
+			brelse(bh);
+
+			map_bh(bh_result, inode->i_sb, offset);
+			return 0;
+		}
+		if (next == ~0)
+			break;
+
+		brelse(bh);
+		bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next));
+		if (!bh) goto err;
+		oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
+	}
+	if (!create) {
+		brelse(bh);
+		return 0;
+	}
+
+#ifdef OMFS_WRITE
+
+	/* reached the end of the extent table with no blocks mapped.
+	 * there are three possibilities for adding: grow last extent,
+	 * add a new extent to the current extent table, and add a 
+	 * continuation inode.  in last two cases need an allocator for
+	 * sbi->s_cluster_size
+	 */
+
+	/* TODO: handle holes */
+
+	// should always have a terminator
+	if (extent_count < 1)
+		return -EIO;
+
+	/* trivially grow current extent, if next block is not taken */
+	terminator = entry + extent_count - 1;
+	if (extent_count > 1)
+	{
+		entry = terminator-1;
+		new_block = be64_to_cpu(entry->e_cluster) + 
+			be64_to_cpu(entry->e_blocks);
+
+		printk(KERN_DEBUG "trying to grow %llx+%llx = %llx\n",
+				be64_to_cpu(entry->e_cluster), 
+				be64_to_cpu(entry->e_blocks), 
+				new_block);
+	
+		if (omfs_allocate_block(inode->i_sb, new_block))
+		{
+			entry->e_blocks = 
+				cpu_to_be64(be64_to_cpu(entry->e_blocks) + 1);
+			terminator->e_blocks = ~(cpu_to_be64(
+				be64_to_cpu(~terminator->e_blocks) + 1));
+
+			mark_buffer_dirty(bh);
+			brelse(bh);
+	
+			mark_inode_dirty(inode);
+
+			map_bh(bh_result, inode->i_sb, 
+				clus_to_blk(sbi, new_block));
+			return 0;
+		}
+	}
+	max_count = (sbi->s_sys_blocksize - OMFS_EXTENT_START - 
+		sizeof(struct omfs_extent)) / 
+		sizeof(struct omfs_extent_entry) + 1;
+
+	// add a continuation block here
+	if (be32_to_cpu(oe->e_extent_count) > max_count-1)
+	{
+		struct omfs_extent *new_table;
+		u64 cblock;
+		struct buffer_head *new_bh;
+
+		return -EIO;
+
+		ret = omfs_add_continuation(inode->i_sb, &cblock, &new_bh);
+		if (ret)
+			return ret;
+
+		new_table = (struct omfs_extent *) 
+			&new_bh->b_data[OMFS_EXTENT_CONT];
+
+		oe->e_next = cblock;
+		mark_buffer_dirty(bh);
+		brelse(bh);
+
+		bh = new_bh;
+		oe = new_table;
+		terminator = &oe->e_entry;
+
+		is_continue = 1;
+	}
+
+	// try to allocate a new cluster
+	ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize, 
+		&new_block, &new_count);
+	if (ret)
+		return ret;
+
+	printk(KERN_DEBUG "omfs: made a new block at %llx\n", new_block);
+
+	// copy terminator down an entry
+	entry = terminator;
+	terminator ++;
+	memcpy(terminator, entry, sizeof(struct omfs_extent_entry));
+
+	entry->e_cluster = cpu_to_be64(new_block);
+	entry->e_blocks = cpu_to_be64((u64) new_count);
+
+	terminator->e_blocks = ~(cpu_to_be64(
+		be64_to_cpu(~terminator->e_blocks) + (u64) new_count));
+
+	// write in new entry
+	oe->e_extent_count = cpu_to_be32(1 + be32_to_cpu(oe->e_extent_count));
+
+	if (is_continue)
+	{
+		update_continuation(inode->i_sb, bh);
+	}
+	mark_buffer_dirty(bh);
+	brelse(bh);
+
+	if (!is_continue)
+		mark_inode_dirty(inode);
+
+	map_bh(bh_result, inode->i_sb, clus_to_blk(sbi, new_block));
+	return 0;
+#endif
+
+err:
+	return -EIO;
+}
+
+static int omfs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page, omfs_get_block);
+}
+
+#ifdef OMFS_WRITE
+static int omfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, omfs_get_block, wbc);
+}
+
+static int omfs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return block_prepare_write(page,from,to,omfs_get_block);
+}
+
+static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,omfs_get_block);
+}
+#endif
+
+struct file_operations omfs_file_operations = {
+	.llseek = generic_file_llseek,
+	.read = generic_file_read,
+	.write = generic_file_write,
+	.mmap = generic_file_mmap,
+	.sendfile = generic_file_sendfile,
+};
+
+struct address_space_operations omfs_aops = {
+	.readpage = omfs_readpage,
+#ifdef OMFS_WRITE
+	.writepage = omfs_writepage,
+	.sync_page = block_sync_page,
+	.prepare_write = omfs_prepare_write,
+	.commit_write = generic_commit_write,
+	.bmap = omfs_bmap,
+#endif
+};
+
-- 
1.2.1



  reply	other threads:[~2006-03-16  3:01 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2006-03-16  3:01 [RFC][PATCH 0/7] Optimized MPEG file system Bob Copeland
2006-03-16  3:01 ` [RFC][PATCH 1/7] omfs: filesystem headers Bob Copeland
2006-03-16  3:01   ` [RFC][PATCH 2/7] omfs: inode and superblock routines Bob Copeland
2006-03-16  3:01     ` [RFC][PATCH 3/7] omfs: directory routines Bob Copeland
2006-03-16  3:01       ` Bob Copeland [this message]
2006-03-16  3:01         ` [RFC][PATCH 5/7] omfs: bitmap / block allocation routines Bob Copeland
2006-03-16  3:01           ` [RFC][PATCH 6/7] omfs: checksumming routines Bob Copeland
2006-03-16  3:01             ` [RFC][PATCH 7/7] omfs: kbuild updates Bob Copeland
2006-03-16  4:33 ` [RFC][PATCH 0/7] Optimized MPEG file system Brad Boyer
2006-03-16 18:33   ` Bob Copeland

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=11424781041183-git-send-email-me@bobcopeland.com \
    --to=me@bobcopeland.com \
    --cc=linux-fsdevel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).