public inbox for linux-mtd@lists.infradead.org
 help / color / mirror / Atom feed
* [PATCH] [MTD] [UBI] add block device layer on top of UBI
@ 2008-03-03 11:17 Nancy
  2008-03-04  9:40 ` Nancy
  0 siblings, 1 reply; 14+ messages in thread
From: Nancy @ 2008-03-03 11:17 UTC (permalink / raw)
  To: linux-mtd

[-- Attachment #1: Type: text/plain, Size: 695 bytes --]

This patch based on
git://git.infradead.org/users/dedekind/ubifs-v2.6.24.git/  UBI version
  #cd linux-2.6.24/drivers/mtd/ubi
  #path -p1 < ~/ubi_new.diff
  Then copy "ubi_blktrans.h" to linux-2.6.24/include/linux/mtd
  Notice: Please do not select MTD_UBI_GLUEBI

#mkfs.vfat /dev/ubiblock0      works fine, but
#mount -t vfat /dev/ubiblock0 /mnt
mount: mounting /dev/ubiblock0 on /mnt failed: Invalid argument

I grep the whole kernel, can't find where this error come from. I
totally don't know how to debug step by step, I mean follow the mount
application.

Could you please give me a suggestion?  a keyword, a bookname, an
artical link what ever it is.

Many thanks!

--
Best wishes,
Nancy

[-- Attachment #2: ubi_new.diff --]
[-- Type: text/plain, Size: 25747 bytes --]

diff -nprBN ubi.orig/bdev.c ubi.new/bdev.c
a0 477
/*
 * $Id: bdev.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $
 *
 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
 *
 * Interface to Linux 2.5 block layer for UBI 'translation layers'.
 *
 * 2008 Yurong Tan <nancydreaming@gmail.com>:
 *      borrow from mtd_blkdevs.c for building block device layer on top of UBI  
 *
 */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/mtd/ubi_blktrans.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/freezer.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <asm/uaccess.h>
#include "ubi.h"


static LIST_HEAD(blktrans_majors);
extern struct mutex vol_table_mutex;  
extern struct ubi_volume *vol_table[];      

extern void register_vol_user (struct vol_notifier *new);
extern int unregister_vol_user (struct vol_notifier *old);
extern int ubi_major2num(int major);

struct ubi_blkcore_priv {
	struct task_struct *thread;
	struct request_queue *rq;
	spinlock_t queue_lock;
};

static int do_blktrans_request(struct ubi_blktrans_ops *tr,
			       struct ubi_blktrans_dev *dev,
			       struct request *req)
{
	unsigned long block, nsect;
	char *buf;

	block = req->sector << 9 >> tr->blkshift;
	nsect = req->current_nr_sectors << 9 >> tr->blkshift;

	buf = req->buffer;

	if (!blk_fs_request(req))
		return 0;

	if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
		return 0;

	switch(rq_data_dir(req)) {
	case READ:
		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
			if (tr->readsect(dev, block, buf))
				return 0;
		return 1;

	case WRITE:
		if (!tr->writesect)
			return 0;

		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
			if (tr->writesect(dev, block, buf))
				return 0;
		return 1;

	default:
		printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
		return 0;		
	}
}

static int ubi_blktrans_thread(void *arg)
{
	struct ubi_blktrans_ops *tr = arg;
	struct request_queue *rq = tr->blkcore_priv->rq;

	/* we might get involved when memory gets low, so use PF_MEMALLOC */
	current->flags |= PF_MEMALLOC;

	spin_lock_irq(rq->queue_lock);
	while (!kthread_should_stop()) {
		struct request *req;
		struct ubi_blktrans_dev *dev;
		int res = 0;

		req = elv_next_request(rq);

		if (!req) {
			set_current_state(TASK_INTERRUPTIBLE);
			spin_unlock_irq(rq->queue_lock);
			schedule();
			spin_lock_irq(rq->queue_lock);
			continue;
		}

		dev = req->rq_disk->private_data;
		tr = dev->tr;

		spin_unlock_irq(rq->queue_lock);

		mutex_lock(&dev->lock);
		res = do_blktrans_request(tr, dev, req);
		mutex_unlock(&dev->lock);

		spin_lock_irq(rq->queue_lock);

		end_request(req, res);
	}
	spin_unlock_irq(rq->queue_lock);

	return 0;
}

static void ubi_blktrans_request(struct request_queue *rq)
{
	struct ubi_blktrans_ops *tr = rq->queuedata;
	wake_up_process(tr->blkcore_priv->thread);
}

static int blktrans_open(struct inode *i, struct file *f)
{
	struct ubi_volume_desc *desc;
	int ubi_num = ubi_major2num(imajor(i)); 
	int vol_id = iminor(i);
      
	int mode;
	struct ubi_blktrans_dev *dev;
	struct ubi_blktrans_ops *tr; 
	int ret = -ENODEV;

	if (f->f_mode & FMODE_WRITE)
		mode = UBI_READWRITE;
	else
		mode = UBI_READONLY;

	desc = ubi_open_volume(ubi_num, vol_id, mode);
	if (IS_ERR(desc))
		return PTR_ERR(desc);
	
	desc->vol->bdev_mode = mode;
	dev = i->i_bdev->bd_disk->private_data;
	dev->uv = desc; // add  by Nancy 
	tr = dev->tr;

	if (!try_module_get(tr->owner))
		goto out_tr;
	
	/* FIXME: Locking. A hot pluggable device can go away
	   (del_mtd_device can be called for it) without its module
	   being unloaded. */

	ret = 0;
	if (tr->open && (ret = tr->open(dev))) {
		ubi_close_volume(desc);
	out_tr:
		module_put(tr->owner);
	}
 out:
	return ret;
}

static int blktrans_release(struct inode *i, struct file *f)
{
	struct ubi_blktrans_dev *dev;
	struct ubi_blktrans_ops *tr;
	struct ubi_volume_desc *desc;
	int ret = 0;
	
	dev = i->i_bdev->bd_disk->private_data;
	tr = dev->tr;
	desc = dev->uv; 
	
	if (tr->release)
		ret = tr->release(dev);

	if (!ret) {
		ubi_close_volume(desc);
		module_put(tr->owner);
	}
	return ret;
}

static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	struct ubi_blktrans_dev *dev = bdev->bd_disk->private_data;

	if (dev->tr->getgeo)
		return dev->tr->getgeo(dev, geo);
	return -ENOTTY;
}

static int blktrans_ioctl(struct inode *inode, struct file *file,
			      unsigned int cmd, unsigned long arg)
{
	struct ubi_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
	struct ubi_blktrans_ops *tr = dev->tr;

	switch (cmd) {
	case BLKFLSBUF:
		if (tr->flush)
			return tr->flush(dev);
		/* The core code did the work, we had nothing to do. */
		return 0;
	default:
		return -ENOTTY;
	}
}

struct block_device_operations ubi_blktrans_ops = {
	.owner		= THIS_MODULE,
	.open		= blktrans_open,
	.release	         = blktrans_release,
	.ioctl	          	= blktrans_ioctl,
	.getgeo		= blktrans_getgeo,
};

int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new)
{
	struct ubi_blktrans_ops *tr = new->tr;
	struct list_head *this;
	int last_devnum = -1;
	struct gendisk *gd;
	
	if (mutex_trylock(&vol_table_mutex)) {
		mutex_unlock(&vol_table_mutex);
		BUG();
	}

	list_for_each(this, &tr->devs) {
		struct ubi_blktrans_dev *d = list_entry(this, struct ubi_blktrans_dev, list);
		if (new->devnum == -1) {
			/* Use first free number */
			if (d->devnum != last_devnum+1) {
				/* Found a free devnum. Plug it in here */
				new->devnum = last_devnum+1;
				list_add_tail(&new->list, &d->list);
				goto added;
			}
		} else if (d->devnum == new->devnum) {
			/* Required number taken */
			return -EBUSY;
		} else if (d->devnum > new->devnum) {
			/* Required number was free */
			list_add_tail(&new->list, &d->list);
			goto added;
		}
		last_devnum = d->devnum;
	}
	if (new->devnum == -1)
		new->devnum = last_devnum+1;

	if ((new->devnum << tr->part_bits) > 256) {
		return -EBUSY;
	}

	mutex_init(&new->lock);
	list_add_tail(&new->list, &tr->devs);
 added:
	if (!tr->writesect)
		new->readonly = 1;

	gd = alloc_disk(1 << tr->part_bits);
	if (!gd) {
		list_del(&new->list);
		return -ENOMEM;
	}
	gd->major = tr->major;
	gd->first_minor = (new->devnum) << tr->part_bits;
	gd->fops = &ubi_blktrans_ops;

	if (tr->part_bits)
		if (new->devnum < 26)
			snprintf(gd->disk_name, sizeof(gd->disk_name),
				 "%s%c", tr->name, 'a' + new->devnum);
		else
			snprintf(gd->disk_name, sizeof(gd->disk_name),
				 "%s%c%c", tr->name,
				 'a' - 1 + new->devnum / 26,
				 'a' + new->devnum % 26);
	else
		snprintf(gd->disk_name, sizeof(gd->disk_name),
			 "%s%d", tr->name, new->devnum);

	/* 2.5 has capacity in units of 512 bytes while still
	   having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
	set_capacity(gd, (new->size * tr->blksize) >> 9);

	gd->private_data = new;
	new->blkcore_priv = gd;
	gd->queue = tr->blkcore_priv->rq;

	if (new->readonly)
		set_disk_ro(gd, 1);

	add_disk(gd);

	return 0;
}

int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old)
{
	if (mutex_trylock(&vol_table_mutex)) {
		mutex_unlock(&vol_table_mutex);
		BUG();
	}

	list_del(&old->list);

	del_gendisk(old->blkcore_priv);
	put_disk(old->blkcore_priv);

	return 0;
}

static void blktrans_notify_remove(struct ubi_volume *vol)
{
	struct list_head *this, *this2, *next;

	list_for_each(this, &blktrans_majors) {
		struct ubi_blktrans_ops *tr = list_entry(this, struct ubi_blktrans_ops, list);

		list_for_each_safe(this2, next, &tr->devs) {
			struct ubi_blktrans_dev *dev = list_entry(this2, struct ubi_blktrans_dev, list);

			if (dev->uv->vol == vol)
				tr->remove_vol(dev);
		}
	}
}

static void blktrans_notify_add(struct ubi_volume *vol)
{
	struct list_head *this;
 #if 0
	if (mtd->type == MTD_ABSENT)
		return;
#endif

	list_for_each(this, &blktrans_majors) {
		struct ubi_blktrans_ops *tr = list_entry(this, struct ubi_blktrans_ops, list);

		tr->add_vol(tr,vol);
	}

}

static struct vol_notifier blktrans_notifier = {
	.add = blktrans_notify_add,
	.remove = blktrans_notify_remove,
};


int register_ubi_blktrans(struct ubi_blktrans_ops *tr)
{
	int ret, i;


	/* Register the notifier if/when the first device type is
	   registered, to prevent the link/init ordering from fucking
	   us over. */
	if (!blktrans_notifier.list.next)
		register_vol_user(&blktrans_notifier);

	tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
	if (!tr->blkcore_priv)
		return -ENOMEM;

	mutex_lock(&vol_table_mutex);  

	tr->major = register_blkdev(0, tr->name);
#if 0	
	if (ret) {
//		printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
		printk("Unable to register %s block device on major %d: %d\n",
		       tr->name, tr->major, ret);
		kfree(tr->blkcore_priv);
		mutex_unlock(&vol_table_mutex);
		return ret;
	}
#endif
	spin_lock_init(&tr->blkcore_priv->queue_lock);

	tr->blkcore_priv->rq = blk_init_queue(ubi_blktrans_request, &tr->blkcore_priv->queue_lock);
	if (!tr->blkcore_priv->rq) {
		unregister_blkdev(tr->major, tr->name);
		kfree(tr->blkcore_priv);
		mutex_unlock(&vol_table_mutex);
		return -ENOMEM;
	}

	tr->blkcore_priv->rq->queuedata = tr;
	blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
	tr->blkshift = ffs(tr->blksize) - 1;

	tr->blkcore_priv->thread = kthread_run(ubi_blktrans_thread, tr,
			"%sd", tr->name);
	if (IS_ERR(tr->blkcore_priv->thread)) {
		blk_cleanup_queue(tr->blkcore_priv->rq);
		unregister_blkdev(tr->major, tr->name);
		kfree(tr->blkcore_priv);
		mutex_unlock(&vol_table_mutex);  
		return PTR_ERR(tr->blkcore_priv->thread);
	}

	INIT_LIST_HEAD(&tr->devs);
	list_add(&tr->list, &blktrans_majors);

	for (i=0; i<UBI_MAX_VOLUMES; i++) {
		if (vol_table[i] )
			tr->add_vol(tr, vol_table[i]);
	}
	
	mutex_unlock(&vol_table_mutex);
	return 0;
}

int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr)
{
	struct list_head *this, *next;

	mutex_lock(&vol_table_mutex);  

	/* Clean up the kernel thread */
	kthread_stop(tr->blkcore_priv->thread);

	/* Remove it from the list of active majors */
	list_del(&tr->list);

	list_for_each_safe(this, next, &tr->devs) {
		struct ubi_blktrans_dev *dev = list_entry(this, struct ubi_blktrans_dev, list);
		tr->remove_vol(dev);
	}

	blk_cleanup_queue(tr->blkcore_priv->rq);
	unregister_blkdev(tr->major, tr->name);

	mutex_unlock(&vol_table_mutex); 

	kfree(tr->blkcore_priv);

	BUG_ON(!list_empty(&tr->devs));
	return 0;
}

static void __exit ubi_blktrans_exit(void)
{
	/* No race here -- if someone's currently in register_mtd_blktrans
	   we're screwed anyway. */
	if (blktrans_notifier.list.next)
		unregister_vol_user(&blktrans_notifier);

}


module_exit(ubi_blktrans_exit); 

EXPORT_SYMBOL_GPL(register_ubi_blktrans);
EXPORT_SYMBOL_GPL(deregister_ubi_blktrans);
EXPORT_SYMBOL_GPL(add_ubi_blktrans_dev);
EXPORT_SYMBOL_GPL(del_ubi_blktrans_dev);

MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>, Yurong Tan <nancydreaming@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Common interface to block layer for UBI 'translation layers'");

diff -nprBN ubi.orig/block-jz.c ubi.new/block-jz.c
a0 315
/*
 * Direct UBI block device access
 *
 * $Id: ubiblock.c,v 1.68 2005/11/07 11:14:20 gleixner Exp $
 *
 * (C) 2000-2003 Nicolas Pitre <nico@cam.org>
 * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
 * (C) 2008 Yurong Tan <nancydreaming@gmail.com> :
 *        borrow mtdblock.c to work on top of UBI
 */

#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/hdreg.h>
#include <linux/mtd/ubi_blktrans.h>
#include <linux/mutex.h>
#include "ubi.h"

#define UNMAPPED 1

static struct ubiblk_dev {
	struct ubi_volume_desc *uv;
	int count;
	struct mutex cache_mutex;
	unsigned short vbw;           //virt block number of write cache
	unsigned short vbr;            //virt block number of read cache
	unsigned char *write_cache;
	unsigned char *read_cache;
	enum { STATE_UNUSED, STATE_USED } read_cache_state, write_cache_state;
} *ubiblks[UBI_MAX_VOLUMES];   

void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int virt_block);
int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk);

int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk)
{
	int offset = ubiblk->uv->vol->ubi->ec_hdr_alsize + ubiblk->uv->vol->ubi->vid_hdr_alsize; 
	
	if (STATE_UNUSED == ubiblk->write_cache_state)
		return 0;
	ubi_leb_write(ubiblk->uv, ubiblk->vbw, ubiblk->write_cache, offset, 
		      ubiblk->uv->vol->usable_leb_size, UBI_UNKNOWN);
	ubiblk->write_cache_state = STATE_UNUSED;
	return 0;
}

void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int virt_block)
{
	int offset = ubiblk->uv->vol->ubi->ec_hdr_alsize + ubiblk->uv->vol->ubi->vid_hdr_alsize; 
	ubiblk->vbw = virt_block;
	ubiblk->write_cache_state = STATE_USED;
	ubi_leb_read(ubiblk->uv, ubiblk->vbw, ubiblk->write_cache, offset, 
		     ubiblk->uv->vol->usable_leb_size, UBI_UNKNOWN);
	//memset(write_cache, 0xff, ubiblk->uv->vol->usable_leb_size);
}

static int do_cached_write (struct ubiblk_dev *ubiblk, unsigned long sector, 
			    int len, const char *buf)
{
	struct ubi_volume_desc *uv = ubiblk->uv;
	int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size;
	unsigned short sectors_per_page =  uv->vol->ubi->min_io_size >> 9;
	unsigned short page_shift =  ffs(uv->vol->ubi->min_io_size) - 1;
	unsigned short virt_block, page, page_offset; 	
	unsigned long virt_page; 
	
	virt_page = sector / sectors_per_page;
	page_offset = sector % sectors_per_page;
	virt_block = virt_page / ppb; 
	page = virt_page % ppb;

	if(ubi_is_mapped(uv, virt_block ) == UNMAPPED ){
		mutex_lock(&ubiblk->cache_mutex);
		ubiblk_flush_writecache(ubiblk);
		mutex_unlock(&ubiblk->cache_mutex);
	
		ubiblk_setup_writecache(ubiblk, virt_block);
          	ubi_leb_map(uv,  virt_block, UBI_UNKNOWN);

	} else {
		if ( STATE_USED == ubiblk->write_cache_state ) {
			if ( ubiblk->vbw != virt_block) {
			// Commit before we start a new cache.
				mutex_lock(&ubiblk->cache_mutex);
				ubiblk_flush_writecache(ubiblk);
				mutex_unlock(&ubiblk->cache_mutex);

				ubiblk_setup_writecache(ubiblk, virt_block);
				ubi_leb_unmap(uv, virt_block);
			  	ubi_leb_map(uv, virt_block, UBI_UNKNOWN);
			} else {
				//dprintk("cache hit: 0x%x\n", virt_page);
			}
		} else {
//			printk("with existing mapping\n");
			ubiblk_setup_writecache(ubiblk, virt_block);
			ubi_leb_unmap(uv, virt_block);		
			ubi_leb_map(uv, virt_block, UBI_UNKNOWN);
		}                        
	}		
	memcpy(&ubiblk->write_cache[(page<<page_shift) +(page_offset<<9)],
	       buf,len);
	return 0;
}

static int do_cached_read (struct ubiblk_dev *ubiblk, unsigned long sector, 
			   int len, char *buf)
{
	struct ubi_volume_desc *uv = ubiblk->uv;
	int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size;
	unsigned short sectors_per_page =  uv->vol->ubi->min_io_size >> 9;
	unsigned short page_shift =  ffs(uv->vol->ubi->min_io_size) - 1;
	unsigned short virt_block, page, page_offset; 	
	unsigned long virt_page; 
	int offset = uv->vol->ubi->ec_hdr_alsize + uv->vol->ubi->vid_hdr_alsize; 
	
	mutex_lock(&ubiblk->cache_mutex);
	ubiblk_flush_writecache(ubiblk);
	mutex_unlock(&ubiblk->cache_mutex);
	
	virt_page = sector / sectors_per_page;
	page_offset = sector % sectors_per_page;
	virt_block = virt_page / ppb; 
	page = virt_page % ppb;

	if ( ubi_is_mapped( uv, virt_block) == UNMAPPED){
		// In a Flash Memory device, there might be a logical block that is
		// not allcated to a physical block due to the block not being used.
		// All data returned should be set to 0xFF when accessing this logical 
		// block.
		//	dprintk("address translate fail\n");
		memset(buf, 0xFF, 512);
	} else {

		if( ubiblk->vbr != virt_block ||ubiblk->read_cache_state == STATE_UNUSED ){
			ubiblk->vbr = virt_block;
			ubi_leb_read(uv, virt_block, ubiblk->read_cache, offset, uv->vol->usable_leb_size, 0);				
			ubiblk->read_cache_state = STATE_USED;
		}
		memcpy(buf, &ubiblk->read_cache[(page<<page_shift)+(page_offset<<9)], len);
	}
	return 0;
}

static int ubiblk_readsect(struct ubi_blktrans_dev *dev,
			      unsigned long block, char *buf)
{
	struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
	return do_cached_read(ubiblk, block, 512, buf);
}

static int ubiblk_writesect(struct ubi_blktrans_dev *dev,
			      unsigned long block, char *buf)
{
	struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
	return do_cached_write(ubiblk, block, 512, buf);
}

static int ubiblk_init_vol(int dev, struct ubi_volume_desc *uv)
{
	struct ubiblk_dev *ubiblk;
	int ret;
			
	ubiblk = kmalloc(sizeof(struct ubiblk_dev), GFP_KERNEL);
	if (!ubiblk)
		return -ENOMEM;

	memset(ubiblk, 0, sizeof(*ubiblk));

	ubiblk->count = 1;
	ubiblk->uv = uv;
	mutex_init (&ubiblk->cache_mutex);

	ubiblk->write_cache = vmalloc(ubiblk->uv->vol->usable_leb_size); 
	ubiblk->read_cache = vmalloc(ubiblk->uv->vol->usable_leb_size);
	
	if(!ubiblk->write_cache || 
		!ubiblk->read_cache )
		return -ENOMEM;

	ubiblk->write_cache_state = STATE_UNUSED;
	ubiblk->read_cache_state = STATE_UNUSED;

	ubiblks[dev] = ubiblk;
	DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
	return 0;
}

static int ubiblk_open(struct ubi_blktrans_dev *ubd)
{
	int dev = ubd->devnum;
	int res = 0;

	DEBUG(MTD_DEBUG_LEVEL1,"ubiblock_open\n");

	if (ubiblks[dev]) {
		ubiblks[dev]->count++;
		printk("%s: increase use count\n",__FUNCTION__);
		return 0;
	}

	/* OK, it's not open. Create cache info for it */
	res = ubiblk_init_vol(dev, ubd->uv);
	return res;
}

static int ubiblk_release(struct ubi_blktrans_dev *ubd)
{
	int dev = ubd->devnum;
	struct ubiblk_dev *ubiblk = ubiblks[dev];
	struct ubi_device *ubi = ubiblk->uv->vol->ubi;
	
	mutex_lock(&ubiblk->cache_mutex);
	ubiblk_flush_writecache(ubiblk);
	mutex_unlock(&ubiblk->cache_mutex);

	if (!--ubiblk->count) {
		/* It was the last usage. Free the device */
		ubiblks[dev] = NULL;

		if (ubi->mtd->sync)
			ubi->mtd->sync(ubi->mtd);

		vfree(ubiblk->write_cache);
		vfree(ubiblk->read_cache);
		kfree(ubiblk);
	}
	return 0;
}

static int ubiblk_flush(struct ubi_blktrans_dev *dev)
{
	struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
	struct ubi_device *ubi = ubiblk->uv->vol->ubi;
	
	mutex_lock(&ubiblk->cache_mutex);
	ubiblk_flush_writecache(ubiblk);
	mutex_unlock(&ubiblk->cache_mutex);

	if (ubi->mtd->sync)
		ubi->mtd->sync(ubi->mtd);
	return 0;
}

static void ubiblk_add_vol_dev(struct ubi_blktrans_ops *tr, struct ubi_volume *vol)
{
	struct ubi_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return;

	dev->devnum = vol->vol_id;
	dev->size = vol->used_bytes >> 9;
	dev->tr = tr;

	if (vol->bdev_mode == UBI_READONLY)
		dev->readonly = 1;

	vol->ubi->bdev_major = tr->major; 

	add_ubi_blktrans_dev(dev);
}

static void ubiblk_remove_vol_dev(struct ubi_blktrans_dev *dev)
{
	del_ubi_blktrans_dev(dev);
	kfree(dev);
}

static int ubiblk_getgeo(struct ubi_blktrans_dev *dev, struct hd_geometry *geo)
{
	memset(geo, 0, sizeof(*geo));
	geo->heads     = 4;
	geo->sectors   = 16;
	geo->cylinders = dev->size/(4*16); 
	return 0;
}

static struct ubi_blktrans_ops ubiblk_tr = {
	.name		         = "ubiblock",
	.major                   = 0,
	.part_bits	         = 0,
	.blksize 	         = 512,
	.open		         = ubiblk_open,
	.release	         = ubiblk_release,
	.readsect	         = ubiblk_readsect,
	.writesect	         = ubiblk_writesect,
	.getgeo                  = ubiblk_getgeo,
	.flush		         = ubiblk_flush,
	.add_vol	         = ubiblk_add_vol_dev,
	.remove_vol	         = ubiblk_remove_vol_dev,
	.owner		         = THIS_MODULE,
};

static int __init init_ubiblock(void)
{
	return register_ubi_blktrans(&ubiblk_tr);
}

static void __exit cleanup_ubiblock(void)
{
	deregister_ubi_blktrans(&ubiblk_tr);
}

module_init(init_ubiblock);
module_exit(cleanup_ubiblock);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> , Yurong Tan <nancydreaming@gmail.com>");
MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to UBI volumes");
diff -nprBN ubi.orig/build.c ubi.new/build.c
a48 152
/* add by Nancy begin */

/* These are exported solely for the purpose of mtd_blkdevs.c. You
   should not use them for _anything_ else */
DEFINE_MUTEX(vol_table_mutex);
struct ubi_volume *vol_table[UBI_MAX_VOLUMES];

EXPORT_SYMBOL_GPL(vol_table_mutex);
EXPORT_SYMBOL_GPL(vol_table);

static LIST_HEAD(vol_notifiers);

/**
 *	add_mtd_device - register an MTD device
 *	@mtd: pointer to new MTD device info structure
 *
 *	Add a device to the list of MTD devices present in the system, and
 *	notify each currently active MTD 'user' of its arrival. Returns
 *	zero on success or 1 on failure, which currently will only happen
 *	if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
 */

int add_vol_device(struct ubi_volume *vol)
{
	int i;

	mutex_lock(&vol_table_mutex);
	if (!vol_table[vol->vol_id]) {
		struct list_head *this;

		vol_table[vol->vol_id] = vol;			

		/* No need to get a refcount on the module containing
		   the notifier, since we hold the mtd_table_mutex */
		list_for_each(this, &vol_notifiers) {
			struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
			not->add(vol);
		}

		mutex_unlock(&vol_table_mutex);
		/* We _know_ we aren't being removed, because
		   our caller is still holding us here. So none
		   of this try_ nonsense, and no bitching about it
		   either. :) */
		__module_get(THIS_MODULE);
		return 0;
	}
	mutex_unlock(&vol_table_mutex);
	return 1;
}

/**
 *	del_mtd_device - unregister an MTD device
 *	@mtd: pointer to MTD device info structure
 *
 *	Remove a device from the list of MTD devices present in the system,
 *	and notify each currently active MTD 'user' of its departure.
 *	Returns zero on success or 1 on failure, which currently will happen
 *	if the requested device does not appear to be present in the list.
 */

int del_vol_device (struct ubi_volume *vol)
{
	int ret;

	mutex_lock(&vol_table_mutex);
	if (vol_table[vol->vol_id] != vol) {
		ret = -ENODEV;
	} else if (vol->readers ||vol->writers || vol->exclusive) {
		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count 0\n",
		       vol->vol_id, vol->name);
		ret = -EBUSY;
	} else {
		struct list_head *this;

		/* No need to get a refcount on the module containing
		   the notifier, since we hold the mtd_table_mutex */
		list_for_each(this, &vol_notifiers) {
			struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
			not->remove(vol);
		}

		vol_table[vol->vol_id] = NULL;

		module_put(THIS_MODULE);
		ret = 0;
	}
	mutex_unlock(&vol_table_mutex);
	return ret;
}

/**
 *	register_mtd_user - register a 'user' of MTD devices.
 *	@new: pointer to notifier info structure
 *
 *	Registers a pair of callbacks function to be called upon addition
 *	or removal of MTD devices. Causes the 'add' callback to be immediately
 *	invoked for each MTD device currently present in the system.
 */

void register_vol_user(struct vol_notifier *new)
{
	int i;

	mutex_lock(&vol_table_mutex);

	list_add(&new->list, &vol_notifiers);

 	__module_get(THIS_MODULE);

	for (i=0; i< UBI_MAX_VOLUMES;  i++)
		if (vol_table[i])
			new->add(vol_table[i]);

	mutex_unlock(&vol_table_mutex);
}

/**
 *	unregister_mtd_user - unregister a 'user' of MTD devices.
 *	@old: pointer to notifier info structure
 *
 *	Removes a callback function pair from the list of 'users' to be
 *	notified upon addition or removal of MTD devices. Causes the
 *	'remove' callback to be immediately invoked for each MTD device
 *	currently present in the system.
 */

int unregister_vol_user(struct vol_notifier *old)
{
	int i;

	mutex_lock(&vol_table_mutex);

	module_put(THIS_MODULE);

	for (i=0; i< UBI_MAX_VOLUMES; i++)
		if (vol_table[i])
			old->remove(vol_table[i]);

	list_del(&old->list);
	mutex_unlock(&vol_table_mutex);
	return 0;
}
EXPORT_SYMBOL_GPL(add_vol_device);
EXPORT_SYMBOL_GPL(del_vol_device);
EXPORT_SYMBOL_GPL(register_vol_user);
EXPORT_SYMBOL_GPL(unregister_vol_user);

/* add by Nancy end*/



a86 1
EXPORT_SYMBOL_GPL(ubi_devices_lock);
d207 1
a207 2
		if ( (ubi && MAJOR(ubi->cdev.dev) == major) ||
			(ubi && ubi->bdev_major == major)) {
a215 1
EXPORT_SYMBOL_GPL(ubi_major2num);
a674 9
/* add by Nancy */
static int bdev_init(struct ubi_device *ubi){
	int i;
	for(i=0; i<ubi->vtbl_slots; i++)
		if(ubi->volumes[i])
			add_vol_device(ubi->volumes[i]);
	return 0;
}

a795 4
	err = bdev_init(ubi);
	if(err)
		goto out_detach;

diff -nprBN ubi.orig/cdev.c ubi.new/cdev.c
a115 4
	
	const struct ubi_device *ubi = ubi_get_by_major(imajor(inode));
	if(ubi->ubi_num != ubi_num)
		printk("ubi_num not equal!\n");
d121 1
a121 1
	file->private_data = desc; 
diff -nprBN ubi.orig/Kconfig ubi.new/Kconfig
a57 14

config MTD_UBI_BLKDEVS
	tristate "Common interface to block layer for UBI 'translation layers'"
	depends on BLOCK
	default n

config MTD_UBI_BLOCK
	tristate "Emulate block devices"
	default n
	depends on MTD_UBI_BLKDEVS
	help
	   This option enables Block layer emulation on top of UBI volumes: for
	   each UBI volumes an block device is created. This is handy to make
	   traditional filesystem (like ext2, VFAT) work on top of UBI.
diff -nprBN ubi.orig/Makefile ubi.new/Makefile
a7 3

obj-$(CONFIG_MTD_UBI_BLKDEVS) += bdev.o 
obj-$(CONFIG_MTD_UBI_BLOCK) += block-jz.o
diff -nprBN ubi.orig/ubi.h ubi.new/ubi.h
a135 6
struct vol_notifier {
	void (*add)(struct ubi_volume *vol);
	void (*remove)(struct ubi_volume *vol);
	struct list_head list;
};

a234 1
	int  bdev_mode;  //add by Nancy
a338 1
	int bdev_major;  //add by Nancy
d507 1
a507 1
 

[-- Attachment #3: ubi_blktrans.h --]
[-- Type: text/plain, Size: 1973 bytes --]

/*
 * $Id: ubi_blktrans.h
 *
 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
 * (C) 2008 Yurong Tan <nancydreaming@gmail.com> : borrow from MTD blktrans.h for UBI used
 * Interface to Linux block layer for UBI 'translation layers'.
 *
 */

#ifndef __UBI_TRANS_H__
#define __UBI_TRANS_H__

#include <linux/mutex.h>

struct hd_geometry;
struct ubi_volume_desc;
struct ubi_blktrans_ops;
struct file;
struct inode;

struct ubi_blktrans_dev {
	struct ubi_blktrans_ops *tr;
	struct list_head list;
	struct ubi_volume_desc *uv;
      	struct mutex lock;
	int devnum;
	unsigned long size;
	int readonly;
	void *blkcore_priv; /* gendisk in 2.5, devfs_handle in 2.4 */
};

struct blkcore_priv; /* Differs for 2.4 and 2.5 kernels; private */

struct ubi_blktrans_ops {
	char *name;
	int major;
	int part_bits;
	int blksize;
	int blkshift;

	/* Access functions */
	int (*readsect)(struct ubi_blktrans_dev *dev,
		    unsigned long block, char *buffer);
	int (*writesect)(struct ubi_blktrans_dev *dev,
		     unsigned long block, char *buffer);

	/* Block layer ioctls */
	int (*getgeo)(struct ubi_blktrans_dev *dev, struct hd_geometry *geo);
	int (*flush)(struct ubi_blktrans_dev *dev);

	/* Called with mtd_table_mutex held; no race with add/remove */
	int (*open)(struct ubi_blktrans_dev *dev);
	int (*release)(struct ubi_blktrans_dev *dev);

	/* Called on {de,}registration and on subsequent addition/removal
	   of devices, with mtd_table_mutex held. */
	void (*add_vol)(struct ubi_blktrans_ops *tr, struct ubi_volume *vol);
	void (*remove_vol)(struct ubi_blktrans_dev *dev);

	struct list_head devs;
	struct list_head list;
	struct module *owner;

	struct ubi_blkcore_priv *blkcore_priv;
};

extern int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new);
extern int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old);
extern int register_ubi_blktrans(struct ubi_blktrans_ops *tr);
extern int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr);


#endif /* __UBI_TRANS_H__ */

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2008-05-15  5:54 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-03-03 11:17 [PATCH] [MTD] [UBI] add block device layer on top of UBI Nancy
2008-03-04  9:40 ` Nancy
2008-03-04  9:40   ` Artem Bityutskiy
2008-03-04  9:54     ` Nancy
2008-03-04  9:53       ` Artem Bityutskiy
2008-03-04 10:07         ` Nancy
2008-03-04 13:55           ` Nancy
2008-05-13  4:16             ` Nancy
2008-05-13  9:53               ` Artem Bityutskiy
2008-05-13 13:31                 ` Nancy
2008-05-14 13:50                   ` Artem Bityutskiy
2008-05-15  4:59                     ` Nancy
2008-05-14 18:15                   ` N Cheung
2008-05-15  5:54                     ` Nancy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox