public inbox for linux-mtd@lists.infradead.org
 help / color / mirror / Atom feed
* UBI: adds block device layer on top of UBI
@ 2008-03-02  3:27 Nancy
  2008-03-03 12:18 ` Artem Bityutskiy
  0 siblings, 1 reply; 3+ messages in thread
From: Nancy @ 2008-03-02  3:27 UTC (permalink / raw)
  To: linux-mtd

[-- Attachment #1: Type: text/plain, Size: 745 bytes --]

Hi dear all,
    First of all, this patch do not work properly. I need some help to fix it.
    I don't know what cause the system call "close" do not pass the
parameter "struct file *f" to block layer's release function. This
beyond of the code scope I can deal with.
    If my poor english does not make this problem description clear,
please run it.
    #mkfs.vfat /dev/ubiblock0
    Many thanks for your notice and help!

pls:
    This patch based
ongit://git.infradead.org/users/dedekind/ubifs-v2.6.24.git/  UBI
version
    #cd linux-2.6.24/drivers/mtd/ubi
    #path -p1 < ~/ubi.diff
    Then copy another header file ubi_blktrans.h under
linux-2.6.24/include/linux/mtd
    Notice: Please do not select MTD_UBI_GLUEBI

--
Best wishes
Nancy

[-- Attachment #2: ubi_blktrans.h --]
[-- Type: text/plain, Size: 1973 bytes --]

/*
 * $Id: ubi_blktrans.h
 *
 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
 * (C) 2008 Yurong Tan <nancydreaming@gmail.com> : borrow from MTD blktrans.h for UBI used
 * Interface to Linux block layer for UBI 'translation layers'.
 *
 */

#ifndef __UBI_TRANS_H__
#define __UBI_TRANS_H__

#include <linux/mutex.h>

struct hd_geometry;
struct ubi_volume_desc;
struct ubi_blktrans_ops;
struct file;
struct inode;

struct ubi_blktrans_dev {
	struct ubi_blktrans_ops *tr;
	struct list_head list;
	struct ubi_volume_desc *uv;
      	struct mutex lock;
	int devnum;
	unsigned long size;
	int readonly;
	void *blkcore_priv; /* gendisk in 2.5, devfs_handle in 2.4 */
};

struct blkcore_priv; /* Differs for 2.4 and 2.5 kernels; private */

struct ubi_blktrans_ops {
	char *name;
	int major;
	int part_bits;
	int blksize;
	int blkshift;

	/* Access functions */
	int (*readsect)(struct ubi_blktrans_dev *dev,
		    unsigned long block, char *buffer);
	int (*writesect)(struct ubi_blktrans_dev *dev,
		     unsigned long block, char *buffer);

	/* Block layer ioctls */
	int (*getgeo)(struct ubi_blktrans_dev *dev, struct hd_geometry *geo);
	int (*flush)(struct ubi_blktrans_dev *dev);

	/* Called with mtd_table_mutex held; no race with add/remove */
	int (*open)(struct ubi_blktrans_dev *dev);
	int (*release)(struct ubi_blktrans_dev *dev);

	/* Called on {de,}registration and on subsequent addition/removal
	   of devices, with mtd_table_mutex held. */
	void (*add_vol)(struct ubi_blktrans_ops *tr, struct ubi_volume *vol);
	void (*remove_vol)(struct ubi_blktrans_dev *dev);

	struct list_head devs;
	struct list_head list;
	struct module *owner;

	struct ubi_blkcore_priv *blkcore_priv;
};

extern int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new);
extern int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old);
extern int register_ubi_blktrans(struct ubi_blktrans_ops *tr);
extern int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr);


#endif /* __UBI_TRANS_H__ */

[-- Attachment #3: ubi.diff --]
[-- Type: text/plain, Size: 34286 bytes --]

diff -uprBN ubi.v24/bdev.c ubi.test/bdev.c
--- ubi.v24/bdev.c	1969-12-31 19:00:00.000000000 -0500
+++ ubi.test/bdev.c	2008-03-01 21:22:56.000000000 -0500
@@ -0,0 +1,497 @@
+/*
+ * $Id: bdev.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $
+ *
+ * (C) 2003 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Interface to Linux 2.5 block layer for UBI 'translation layers'.
+ *
+ * 2008 Yurong Tan <nancydreaming@gmail.com>:
+ *      borrow from mtd_blkdevs.c for building block device layer on top of UBI  
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/mtd/ubi_blktrans.h>
+#include <linux/mtd/ubi.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/freezer.h>
+#include <linux/spinlock.h>
+#include <linux/hdreg.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <asm/uaccess.h>
+#include "ubi.h"
+
+
+static LIST_HEAD(blktrans_majors);
+extern struct mutex vol_table_mutex;  
+extern struct ubi_volume *vol_table[];      
+
+extern void register_vol_user (struct vol_notifier *new);
+extern int unregister_vol_user (struct vol_notifier *old);
+extern int ubi_major2num(int major);
+
+struct ubi_blkcore_priv {
+	struct task_struct *thread;
+	struct request_queue *rq;
+	spinlock_t queue_lock;
+};
+
+static int do_blktrans_request(struct ubi_blktrans_ops *tr,
+			       struct ubi_blktrans_dev *dev,
+			       struct request *req)
+{
+	unsigned long block, nsect;
+	char *buf;
+
+	block = req->sector << 9 >> tr->blkshift;
+	nsect = req->current_nr_sectors << 9 >> tr->blkshift;
+
+	buf = req->buffer;
+
+	if (!blk_fs_request(req))
+		return 0;
+
+	if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
+		return 0;
+
+	switch(rq_data_dir(req)) {
+	case READ:
+		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+			if (tr->readsect(dev, block, buf))
+				return 0;
+		return 1;
+
+	case WRITE:
+		if (!tr->writesect)
+			return 0;
+
+		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+			if (tr->writesect(dev, block, buf))
+				return 0;
+		return 1;
+
+	default:
+		printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
+		return 0;		
+	}
+}
+
+static int ubi_blktrans_thread(void *arg)
+{
+	struct ubi_blktrans_ops *tr = arg;
+	struct request_queue *rq = tr->blkcore_priv->rq;
+
+	/* we might get involved when memory gets low, so use PF_MEMALLOC */
+	//current->flags |= PF_MEMALLOC | PF_NOFREEZE;
+	current->flags |= PF_MEMALLOC;
+
+	spin_lock_irq(rq->queue_lock);
+	while (!kthread_should_stop()) {
+		struct request *req;
+		struct ubi_blktrans_dev *dev;
+		int res = 0;
+
+		req = elv_next_request(rq);
+
+		if (!req) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_irq(rq->queue_lock);
+			schedule();
+			spin_lock_irq(rq->queue_lock);
+			continue;
+		}
+
+		dev = req->rq_disk->private_data;
+		tr = dev->tr;
+
+		spin_unlock_irq(rq->queue_lock);
+
+		mutex_lock(&dev->lock);
+		res = do_blktrans_request(tr, dev, req);
+		mutex_unlock(&dev->lock);
+
+		spin_lock_irq(rq->queue_lock);
+
+		end_request(req, res);
+	}
+	spin_unlock_irq(rq->queue_lock);
+
+	return 0;
+}
+
+static void ubi_blktrans_request(struct request_queue *rq)
+{
+	struct ubi_blktrans_ops *tr = rq->queuedata;
+	wake_up_process(tr->blkcore_priv->thread);
+}
+
+static int blktrans_open(struct inode *i, struct file *f)
+{
+	struct ubi_volume_desc *desc;
+	int ubi_num = ubi_major2num(imajor(i)); 
+	int vol_id = iminor(i);
+      
+	int mode;
+	struct ubi_blktrans_dev *dev;
+	struct ubi_blktrans_ops *tr; 
+	int ret = -ENODEV;
+
+	if (f->f_mode & FMODE_WRITE)
+		mode = UBI_READWRITE;
+	else
+		mode = UBI_READONLY;
+
+	printk("\nopen ubi device %d as blockdev, vol_id %d, mode: %d\n", ubi_num, vol_id, mode);
+
+	desc = ubi_open_volume(ubi_num, vol_id, mode);
+	if (IS_ERR(desc)){
+		printk("return %d\n",__LINE__);
+		return PTR_ERR(desc);
+	}
+	desc->vol->bdev_mode = mode;
+	f->private_data = desc;
+	dev = i->i_bdev->bd_disk->private_data;
+	dev->uv = desc; // add  by Nancy 
+	tr = dev->tr;
+
+	if (!try_module_get(tr->owner)){
+		printk("return %d\n",__LINE__);
+		goto out_tr;
+	}
+
+	/* FIXME: Locking. A hot pluggable device can go away
+	   (del_mtd_device can be called for it) without its module
+	   being unloaded. */
+//	ubi_get_volume(desc);
+
+	ret = 0;
+	if (tr->open && (ret = tr->open(dev))) {
+//		ubi_put_volume(desc);
+		printk("tr->open(dev) faild\n");
+		ubi_close_volume(desc);
+	out_tr:
+		module_put(tr->owner);
+	}
+ out:
+	return ret;
+}
+
+static int blktrans_release(struct inode *i, struct file *f)
+{
+	struct ubi_blktrans_dev *dev;
+	struct ubi_blktrans_ops *tr;
+	struct ubi_volume_desc *desc;
+	int ret = 0;
+	if(f==NULL)
+		printk("%s : file do not deliver here\n",__FUNCTION__);
+	while(1);
+	
+	dev = i->i_bdev->bd_disk->private_data;
+	tr = dev->tr;
+	desc = f->private_data;
+
+	printk("%s line: %d\n",__FUNCTION__,__LINE__);	 
+
+	if (tr->release){
+		ret = tr->release(dev);
+		printk("%s line: %d\n",__FUNCTION__,__LINE__);
+	}
+
+	if (!ret) {
+//		ubi_put_volume(desc);
+		printk("%s line: %d\n",__FUNCTION__,__LINE__);
+		ubi_close_volume(desc);
+		module_put(tr->owner);
+	}
+	return ret;
+}
+
+static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+	struct ubi_blktrans_dev *dev = bdev->bd_disk->private_data;
+
+	if (dev->tr->getgeo)
+		return dev->tr->getgeo(dev, geo);
+	return -ENOTTY;
+}
+
+static int blktrans_ioctl(struct inode *inode, struct file *file,
+			      unsigned int cmd, unsigned long arg)
+{
+	struct ubi_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
+	struct ubi_blktrans_ops *tr = dev->tr;
+
+	switch (cmd) {
+	case BLKFLSBUF:
+		if (tr->flush)
+			return tr->flush(dev);
+		/* The core code did the work, we had nothing to do. */
+		return 0;
+	default:
+		return -ENOTTY;
+	}
+}
+
+struct block_device_operations ubi_blktrans_ops = {
+	.owner		= THIS_MODULE,
+	.open		= blktrans_open,
+	.release	         = blktrans_release,
+	.ioctl	          	= blktrans_ioctl,
+	.getgeo		= blktrans_getgeo,
+};
+
+int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new)
+{
+	struct ubi_blktrans_ops *tr = new->tr;
+	struct list_head *this;
+	int last_devnum = -1;
+	struct gendisk *gd;
+	
+	if (mutex_trylock(&vol_table_mutex)) {
+		mutex_unlock(&vol_table_mutex);
+		BUG();
+	}
+
+	list_for_each(this, &tr->devs) {
+		struct ubi_blktrans_dev *d = list_entry(this, struct ubi_blktrans_dev, list);
+		if (new->devnum == -1) {
+			/* Use first free number */
+			if (d->devnum != last_devnum+1) {
+				/* Found a free devnum. Plug it in here */
+				new->devnum = last_devnum+1;
+				list_add_tail(&new->list, &d->list);
+				goto added;
+			}
+		} else if (d->devnum == new->devnum) {
+			/* Required number taken */
+			return -EBUSY;
+		} else if (d->devnum > new->devnum) {
+			/* Required number was free */
+			list_add_tail(&new->list, &d->list);
+			goto added;
+		}
+		last_devnum = d->devnum;
+	}
+	if (new->devnum == -1)
+		new->devnum = last_devnum+1;
+
+	if ((new->devnum << tr->part_bits) > 256) {
+		return -EBUSY;
+	}
+
+	mutex_init(&new->lock);
+	list_add_tail(&new->list, &tr->devs);
+ added:
+	if (!tr->writesect)
+		new->readonly = 1;
+
+	gd = alloc_disk(1 << tr->part_bits);
+	if (!gd) {
+		list_del(&new->list);
+		return -ENOMEM;
+	}
+	gd->major = tr->major;
+	gd->first_minor = (new->devnum) << tr->part_bits;
+	gd->fops = &ubi_blktrans_ops;
+
+	if (tr->part_bits)
+		if (new->devnum < 26)
+			snprintf(gd->disk_name, sizeof(gd->disk_name),
+				 "%s%c", tr->name, 'a' + new->devnum);
+		else
+			snprintf(gd->disk_name, sizeof(gd->disk_name),
+				 "%s%c%c", tr->name,
+				 'a' - 1 + new->devnum / 26,
+				 'a' + new->devnum % 26);
+	else
+		snprintf(gd->disk_name, sizeof(gd->disk_name),
+			 "%s%d", tr->name, new->devnum);
+
+	/* 2.5 has capacity in units of 512 bytes while still
+	   having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
+	set_capacity(gd, (new->size * tr->blksize) >> 9);
+
+	gd->private_data = new;
+	new->blkcore_priv = gd;
+	gd->queue = tr->blkcore_priv->rq;
+
+	if (new->readonly)
+		set_disk_ro(gd, 1);
+
+	add_disk(gd);
+
+	return 0;
+}
+
+int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old)
+{
+	if (mutex_trylock(&vol_table_mutex)) {
+		mutex_unlock(&vol_table_mutex);
+		BUG();
+	}
+
+	list_del(&old->list);
+
+	del_gendisk(old->blkcore_priv);
+	put_disk(old->blkcore_priv);
+
+	return 0;
+}
+
+static void blktrans_notify_remove(struct ubi_volume *vol)
+{
+	struct list_head *this, *this2, *next;
+
+	list_for_each(this, &blktrans_majors) {
+		struct ubi_blktrans_ops *tr = list_entry(this, struct ubi_blktrans_ops, list);
+
+		list_for_each_safe(this2, next, &tr->devs) {
+			struct ubi_blktrans_dev *dev = list_entry(this2, struct ubi_blktrans_dev, list);
+
+			if (dev->uv->vol == vol)
+				tr->remove_vol(dev);
+		}
+	}
+}
+
+static void blktrans_notify_add(struct ubi_volume *vol)
+{
+	struct list_head *this;
+ #if 0
+	if (mtd->type == MTD_ABSENT)
+		return;
+#endif
+
+	list_for_each(this, &blktrans_majors) {
+		struct ubi_blktrans_ops *tr = list_entry(this, struct ubi_blktrans_ops, list);
+
+		tr->add_vol(tr,vol);
+	}
+
+}
+
+static struct vol_notifier blktrans_notifier = {
+	.add = blktrans_notify_add,
+	.remove = blktrans_notify_remove,
+};
+
+
+int register_ubi_blktrans(struct ubi_blktrans_ops *tr)
+{
+	int ret, i;
+
+
+	/* Register the notifier if/when the first device type is
+	   registered, to prevent the link/init ordering from fucking
+	   us over. */
+	if (!blktrans_notifier.list.next)
+		register_vol_user(&blktrans_notifier);
+
+	tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
+	if (!tr->blkcore_priv)
+		return -ENOMEM;
+
+	mutex_lock(&vol_table_mutex);  
+
+	tr->major = register_blkdev(0, tr->name);
+#if 0	
+	if (ret) {
+//		printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
+		printk("Unable to register %s block device on major %d: %d\n",
+		       tr->name, tr->major, ret);
+		kfree(tr->blkcore_priv);
+		mutex_unlock(&vol_table_mutex);
+		return ret;
+	}
+#endif
+	spin_lock_init(&tr->blkcore_priv->queue_lock);
+
+	tr->blkcore_priv->rq = blk_init_queue(ubi_blktrans_request, &tr->blkcore_priv->queue_lock);
+	if (!tr->blkcore_priv->rq) {
+		unregister_blkdev(tr->major, tr->name);
+		kfree(tr->blkcore_priv);
+		mutex_unlock(&vol_table_mutex);
+		return -ENOMEM;
+	}
+
+	tr->blkcore_priv->rq->queuedata = tr;
+	blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
+	tr->blkshift = ffs(tr->blksize) - 1;
+
+	tr->blkcore_priv->thread = kthread_run(ubi_blktrans_thread, tr,
+			"%sd", tr->name);
+	if (IS_ERR(tr->blkcore_priv->thread)) {
+		blk_cleanup_queue(tr->blkcore_priv->rq);
+		unregister_blkdev(tr->major, tr->name);
+		kfree(tr->blkcore_priv);
+		mutex_unlock(&vol_table_mutex);  
+		return PTR_ERR(tr->blkcore_priv->thread);
+	}
+
+	INIT_LIST_HEAD(&tr->devs);
+	list_add(&tr->list, &blktrans_majors);
+
+	for (i=0; i<UBI_MAX_VOLUMES; i++) {
+		if (vol_table[i] )
+			tr->add_vol(tr, vol_table[i]);
+	}
+	
+	mutex_unlock(&vol_table_mutex);
+	return 0;
+}
+
+int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr)
+{
+	struct list_head *this, *next;
+
+	mutex_lock(&vol_table_mutex);  
+
+	/* Clean up the kernel thread */
+	kthread_stop(tr->blkcore_priv->thread);
+
+	/* Remove it from the list of active majors */
+	list_del(&tr->list);
+
+	list_for_each_safe(this, next, &tr->devs) {
+		struct ubi_blktrans_dev *dev = list_entry(this, struct ubi_blktrans_dev, list);
+		tr->remove_vol(dev);
+	}
+
+	blk_cleanup_queue(tr->blkcore_priv->rq);
+	unregister_blkdev(tr->major, tr->name);
+
+	mutex_unlock(&vol_table_mutex); 
+
+	kfree(tr->blkcore_priv);
+
+	BUG_ON(!list_empty(&tr->devs));
+	return 0;
+}
+
+static void __exit ubi_blktrans_exit(void)
+{
+	/* No race here -- if someone's currently in register_mtd_blktrans
+	   we're screwed anyway. */
+	if (blktrans_notifier.list.next)
+		unregister_vol_user(&blktrans_notifier);
+
+}
+
+
+module_exit(ubi_blktrans_exit); 
+
+EXPORT_SYMBOL_GPL(register_ubi_blktrans);
+EXPORT_SYMBOL_GPL(deregister_ubi_blktrans);
+EXPORT_SYMBOL_GPL(add_ubi_blktrans_dev);
+EXPORT_SYMBOL_GPL(del_ubi_blktrans_dev);
+
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>, Yurong Tan <nancydreaming@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Common interface to block layer for UBI 'translation layers'");
+
diff -uprBN ubi.v24/block-jz.c ubi.test/block-jz.c
--- ubi.v24/block-jz.c	1969-12-31 19:00:00.000000000 -0500
+++ ubi.test/block-jz.c	2008-03-01 21:25:06.000000000 -0500
@@ -0,0 +1,301 @@
+/*
+ * Direct UBI block device access
+ *
+ * $Id: ubiblock.c,v 1.68 2005/11/07 11:14:20 gleixner Exp $
+ *
+ * (C) 2000-2003 Nicolas Pitre <nico@cam.org>
+ * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
+ * (C) 2008 Yurong Tan <nancydreaming@gmail.com> :
+ *        borrow mtdblock.c to work on top of UBI
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/ubi.h>
+#include <linux/mtd/ubi_blktrans.h>
+#include <linux/mutex.h>
+#include "ubi.h"
+
+#define UNMAPPED 1
+
+static struct ubiblk_dev {
+	struct ubi_volume_desc *uv;
+	int count;
+	struct mutex cache_mutex;
+	unsigned short vbw;           //virt block number of write cache
+	unsigned short vbr;            //virt block number of read cache
+	unsigned char *write_cache;
+	unsigned char *read_cache;
+	enum { STATE_UNUSED, STATE_USED } read_cache_state, write_cache_state;
+} *ubiblks[UBI_MAX_VOLUMES];   
+
+void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int virt_block);
+int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk);
+
+int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk)
+{
+	if (STATE_UNUSED == ubiblk->write_cache_state)
+		return 0;
+	ubi_leb_write(ubiblk->uv, ubiblk->vbw, ubiblk->write_cache, 0, 
+		      ubiblk->uv->vol->usable_leb_size, UBI_UNKNOWN);
+	ubiblk->write_cache_state = STATE_UNUSED;
+	return 0;
+}
+
+void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int virt_block)
+{
+	ubiblk->vbw = virt_block;
+	ubiblk->write_cache_state = STATE_USED;
+	ubi_leb_read(ubiblk->uv, ubiblk->vbw, ubiblk->write_cache, 0, 
+		     ubiblk->uv->vol->usable_leb_size, UBI_UNKNOWN);
+	//memset(write_cache, 0xff, ubiblk->uv->vol->usable_leb_size);
+}
+
+static int do_cached_write (struct ubiblk_dev *ubiblk, unsigned long sector, 
+			    int len, const char *buf)
+{
+	struct ubi_volume_desc *uv = ubiblk->uv;
+	int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size;
+	unsigned short sectors_per_page =  uv->vol->ubi->min_io_size >> 9;
+	unsigned short page_shift =  ffs(uv->vol->ubi->min_io_size) - 1;
+	unsigned short virt_block, page, page_offset; 	
+	unsigned long virt_page; 
+	
+	virt_page = sector / sectors_per_page;
+	page_offset = sector % sectors_per_page;
+	virt_block = virt_page / ppb; 
+	page = virt_page % ppb;
+
+	if(ubi_is_mapped(uv, virt_block ) == UNMAPPED ){
+		mutex_lock(&ubiblk->cache_mutex);
+		ubiblk_flush_writecache(ubiblk);
+		mutex_unlock(&ubiblk->cache_mutex);
+	
+		ubiblk_setup_writecache(ubiblk, virt_block);
+          	ubi_leb_map(uv,  virt_block, UBI_UNKNOWN);
+
+	} else {
+		if ( STATE_USED == ubiblk->write_cache_state ) {
+			if ( ubiblk->vbw != virt_block) {
+			// Commit before we start a new cache.
+				mutex_lock(&ubiblk->cache_mutex);
+				ubiblk_flush_writecache(ubiblk);
+				mutex_unlock(&ubiblk->cache_mutex);
+
+				ubiblk_setup_writecache(ubiblk, virt_block);
+				ubi_leb_unmap(uv, virt_block);
+			  	ubi_leb_map(uv, virt_block, UBI_UNKNOWN);
+			} else {
+				//dprintk("cache hit: 0x%x\n", virt_page);
+			}
+		} else {
+			printk("with existing mapping\n");
+
+			ubiblk_setup_writecache(ubiblk, virt_block);
+			ubi_leb_unmap(uv, virt_block);		
+			ubi_leb_map(uv, virt_block, UBI_UNKNOWN);
+		}                        
+	}		
+	memcpy(&ubiblk->write_cache[(page<<page_shift) +(page_offset<<9)],
+	       buf,len);
+	return 0;
+}
+
+static int do_cached_read (struct ubiblk_dev *ubiblk, unsigned long sector, 
+			   int len, char *buf)
+{
+	struct ubi_volume_desc *uv = ubiblk->uv;
+	int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size;
+	unsigned short sectors_per_page =  uv->vol->ubi->min_io_size >> 9;
+	unsigned short page_shift =  ffs(uv->vol->ubi->min_io_size) - 1;
+	unsigned short virt_block, page, page_offset; 	
+	unsigned long virt_page; 
+	
+	mutex_lock(&ubiblk->cache_mutex);
+	ubiblk_flush_writecache(ubiblk);
+	mutex_unlock(&ubiblk->cache_mutex);
+	
+	virt_page = sector / sectors_per_page;
+	page_offset = sector % sectors_per_page;
+	virt_block = virt_page / ppb; 
+	page = virt_page % ppb;
+	
+	if ( ubi_is_mapped( uv, virt_block) == UNMAPPED){
+		// In a Flash Memory device, there might be a logical block that is
+		// not allcated to a physical block due to the block not being used.
+		// All data returned should be set to 0xFF when accessing this logical 
+		// block.
+		//	dprintk("address translate fail\n");
+		memset(buf, 0xFF, 512);
+	} else {
+
+		if( ubiblk->vbr != virt_block ||ubiblk->read_cache_state == STATE_UNUSED ){
+			ubiblk->vbr = virt_block;
+			ubi_leb_read(uv, virt_block, ubiblk->read_cache, 0, uv->vol->usable_leb_size, 0);				
+			ubiblk->read_cache_state = STATE_USED;
+		}
+		memcpy(buf, &ubiblk->read_cache[(page<<page_shift)+(page_offset<<9)], len);
+	}
+	return 0;
+}
+
+static int ubiblk_readsect(struct ubi_blktrans_dev *dev,
+			      unsigned long block, char *buf)
+{
+	struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+	return do_cached_read(ubiblk, block, 512, buf);
+}
+
+static int ubiblk_writesect(struct ubi_blktrans_dev *dev,
+			      unsigned long block, char *buf)
+{
+	struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+	return do_cached_write(ubiblk, block, 512, buf);
+}
+
+static int ubiblk_init_vol(int dev, struct ubi_volume_desc *uv)
+{
+	struct ubiblk_dev *ubiblk;
+	int ret;
+			
+	ubiblk = kmalloc(sizeof(struct ubiblk_dev), GFP_KERNEL);
+	if (!ubiblk)
+		return -ENOMEM;
+
+	memset(ubiblk, 0, sizeof(*ubiblk));
+
+	ubiblk->count = 1;
+	ubiblk->uv = uv;
+	mutex_init (&ubiblk->cache_mutex);
+
+	ubiblk->write_cache = vmalloc(ubiblk->uv->vol->usable_leb_size); 
+	ubiblk->read_cache = vmalloc(ubiblk->uv->vol->usable_leb_size);
+	
+	if(!ubiblk->write_cache || 
+		!ubiblk->read_cache )
+		return -ENOMEM;
+
+	ubiblk->write_cache_state = STATE_UNUSED;
+	ubiblk->read_cache_state = STATE_UNUSED;
+
+	ubiblks[dev] = ubiblk;
+	DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
+	return 0;
+}
+
+static int ubiblk_open(struct ubi_blktrans_dev *ubd)
+{
+	int dev = ubd->devnum;
+	int res = 0;
+
+	DEBUG(MTD_DEBUG_LEVEL1,"ubiblock_open\n");
+
+	if (ubiblks[dev]) {
+		ubiblks[dev]->count++;
+		printk("%s: increase use count\n",__FUNCTION__);
+		return 0;
+	}
+
+	/* OK, it's not open. Create cache info for it */
+	res = ubiblk_init_vol(dev, ubd->uv);
+	return res;
+}
+
+static int ubiblk_release(struct ubi_blktrans_dev *ubd)
+{
+	int dev = ubd->devnum;
+	struct ubiblk_dev *ubiblk = ubiblks[dev];
+
+	mutex_lock(&ubiblk->cache_mutex);
+	ubiblk_flush_writecache(ubiblk);
+	mutex_unlock(&ubiblk->cache_mutex);
+
+	if (!--ubiblk->count) {
+		/* It was the last usage. Free the device */
+		ubiblks[dev] = NULL;
+#if 0
+		if (ubiblk->mtd->sync)
+			ubiblk->mtd->sync(ubiblk->mtd);
+#endif
+		vfree(ubiblk->write_cache);
+		vfree(ubiblk->read_cache);
+		kfree(ubiblk);
+	}
+	return 0;
+}
+
+static int ubiblk_flush(struct ubi_blktrans_dev *dev)
+{
+	struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+
+	mutex_lock(&ubiblk->cache_mutex);
+	ubiblk_flush_writecache(ubiblk);
+	mutex_unlock(&ubiblk->cache_mutex);
+#if 0
+	if (ubiblk->ubi->sync)
+		ubiblk->ubi->sync(ubiblk->ubi);
+#endif
+	return 0;
+}
+
+static void ubiblk_add_vol_dev(struct ubi_blktrans_ops *tr, struct ubi_volume *vol)
+{
+	struct ubi_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return;
+
+	dev->devnum = vol->vol_id;
+	dev->size = vol->used_bytes >> 9;
+	dev->tr = tr;
+
+	if (vol->bdev_mode == UBI_READONLY)
+		dev->readonly = 1;
+
+	vol->ubi->bdev_major = tr->major; 
+
+	add_ubi_blktrans_dev(dev);
+}
+
+static void ubiblk_remove_vol_dev(struct ubi_blktrans_dev *dev)
+{
+	del_ubi_blktrans_dev(dev);
+	kfree(dev);
+}
+
+static struct ubi_blktrans_ops ubiblk_tr = {
+	.name		         = "ubiblock",
+	.major                   = 0,
+	.part_bits	         = 0,
+	.blksize 	         = 512,
+	.open		         = ubiblk_open,
+	.flush		         = ubiblk_flush,
+	.release	         = ubiblk_release,
+	.readsect	         = ubiblk_readsect,
+	.writesect	         = ubiblk_writesect,
+	.add_vol	         = ubiblk_add_vol_dev,
+	.remove_vol	         = ubiblk_remove_vol_dev,
+	.owner		         = THIS_MODULE,
+};
+
+static int __init init_ubiblock(void)
+{
+	return register_ubi_blktrans(&ubiblk_tr);
+}
+
+static void __exit cleanup_ubiblock(void)
+{
+	deregister_ubi_blktrans(&ubiblk_tr);
+}
+
+module_init(init_ubiblock);
+module_exit(cleanup_ubiblock);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> , Yurong Tan <nancydreaming@gmail.com>");
+MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to UBI volumes");
diff -uprBN ubi.v24/build.c ubi.test/build.c
--- ubi.v24/build.c	2008-02-22 23:57:48.000000000 -0500
+++ ubi.test/build.c	2008-03-01 21:22:56.000000000 -0500
@@ -46,6 +46,303 @@
 /* Maximum length of the 'mtd=' parameter */
 #define MTD_PARAM_LEN_MAX 64
 
+/* add by Nancy begin */
+
+/* These are exported solely for the purpose of mtd_blkdevs.c. You
+   should not use them for _anything_ else */
+DEFINE_MUTEX(vol_table_mutex);
+struct ubi_volume *vol_table[UBI_MAX_VOLUMES];
+
+EXPORT_SYMBOL_GPL(vol_table_mutex);
+EXPORT_SYMBOL_GPL(vol_table);
+
+static LIST_HEAD(vol_notifiers);
+
+/**
+ *	add_mtd_device - register an MTD device
+ *	@mtd: pointer to new MTD device info structure
+ *
+ *	Add a device to the list of MTD devices present in the system, and
+ *	notify each currently active MTD 'user' of its arrival. Returns
+ *	zero on success or 1 on failure, which currently will only happen
+ *	if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
+ */
+
+int add_vol_device(struct ubi_volume *vol)
+{
+	int i;
+
+	mutex_lock(&vol_table_mutex);
+
+//	for (i=0; i < UBI_MAX_VOLUMES; i++)
+		if (!vol_table[vol->vol_id]) {
+			struct list_head *this;
+
+			vol_table[vol->vol_id] = vol;			
+#if 0
+			mtd->index = i;
+			mtd->usecount = 0;
+
+			/* Some chips always power up locked. Unlock them now */
+			if ((mtd->flags & MTD_WRITEABLE)
+			    && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) {
+				if (mtd->unlock(mtd, 0, mtd->size))
+					printk(KERN_WARNING
+					       "%s: unlock failed, "
+					       "writes may not work\n",
+					       mtd->name);
+			}
+
+			DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
+#endif
+			/* No need to get a refcount on the module containing
+			   the notifier, since we hold the mtd_table_mutex */
+			list_for_each(this, &vol_notifiers) {
+				struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
+				not->add(vol);
+			}
+
+			mutex_unlock(&vol_table_mutex);
+			/* We _know_ we aren't being removed, because
+			   our caller is still holding us here. So none
+			   of this try_ nonsense, and no bitching about it
+			   either. :) */
+			__module_get(THIS_MODULE);
+			return 0;
+		}
+
+	mutex_unlock(&vol_table_mutex);
+	return 1;
+}
+
+/**
+ *	del_mtd_device - unregister an MTD device
+ *	@mtd: pointer to MTD device info structure
+ *
+ *	Remove a device from the list of MTD devices present in the system,
+ *	and notify each currently active MTD 'user' of its departure.
+ *	Returns zero on success or 1 on failure, which currently will happen
+ *	if the requested device does not appear to be present in the list.
+ */
+
+int del_vol_device (struct ubi_volume *vol)
+{
+	int ret;
+
+	mutex_lock(&vol_table_mutex);
+
+	if (vol_table[vol->vol_id] != vol) {
+		ret = -ENODEV;
+	} else if (vol->readers ||vol->writers || vol->exclusive) {
+		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count 0\n",
+		       vol->vol_id, vol->name);
+		ret = -EBUSY;
+	} else {
+		struct list_head *this;
+
+		/* No need to get a refcount on the module containing
+		   the notifier, since we hold the mtd_table_mutex */
+		list_for_each(this, &vol_notifiers) {
+			struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
+			not->remove(vol);
+		}
+
+		vol_table[vol->vol_id] = NULL;
+
+		module_put(THIS_MODULE);
+		ret = 0;
+	}
+
+	mutex_unlock(&vol_table_mutex);
+	return ret;
+}
+
+/**
+ *	register_mtd_user - register a 'user' of MTD devices.
+ *	@new: pointer to notifier info structure
+ *
+ *	Registers a pair of callbacks function to be called upon addition
+ *	or removal of MTD devices. Causes the 'add' callback to be immediately
+ *	invoked for each MTD device currently present in the system.
+ */
+
+void register_vol_user(struct vol_notifier *new)
+{
+	int i;
+
+	mutex_lock(&vol_table_mutex);
+
+	list_add(&new->list, &vol_notifiers);
+
+ 	__module_get(THIS_MODULE);
+
+	for (i=0; i< UBI_MAX_VOLUMES;  i++)
+		if (vol_table[i])
+			new->add(vol_table[i]);
+
+	mutex_unlock(&vol_table_mutex);
+}
+
+/**
+ *	unregister_mtd_user - unregister a 'user' of MTD devices.
+ *	@old: pointer to notifier info structure
+ *
+ *	Removes a callback function pair from the list of 'users' to be
+ *	notified upon addition or removal of MTD devices. Causes the
+ *	'remove' callback to be immediately invoked for each MTD device
+ *	currently present in the system.
+ */
+
+int unregister_vol_user(struct vol_notifier *old)
+{
+	int i;
+
+	mutex_lock(&vol_table_mutex);
+
+	module_put(THIS_MODULE);
+
+	for (i=0; i< UBI_MAX_VOLUMES; i++)
+		if (vol_table[i])
+			old->remove(vol_table[i]);
+
+	list_del(&old->list);
+	mutex_unlock(&vol_table_mutex);
+	return 0;
+}
+
+
+/**
+ *	get_mtd_device - obtain a validated handle for an MTD device
+ *	@mtd: last known address of the required MTD device
+ *	@num: internal device number of the required MTD device
+ *
+ *	Given a number and NULL address, return the num'th entry in the device
+ *	table, if any.	Given an address and num == -1, search the device table
+ *	for a device with that address and return if it's still present. Given
+ *	both, return the num'th driver only if its address matches. Return
+ *	error code if not.
+ */
+
+#if 0
+struct ubi_volume_desc *get_vol_device(struct ubi_volume_desc *uv, int num)
+{
+	struct ubi_volume_desc *ret = NULL;
+	int i, err = -ENODEV;
+
+	mutex_lock(&vol_table_mutex);
+
+	if (num == -1) {
+		for (i=0; i< UBI_MAX_VOLUMES; i++)
+			if (vol_table[i] == uv)
+				ret = vol_table[i];
+	} else if (num < UBI_MAX_VOLUMES) {
+		ret =vol_table[num];
+		if (uv && uv != ret)
+			ret = NULL;
+	}
+
+	if (!ret)
+		goto out_unlock;
+#if 0
+	if (!try_module_get(ret->owner))
+		goto out_unlock;
+
+	if (ret->get_device) {
+		err = ret->get_device(ret);
+		if (err)
+			goto out_put;
+	}
+
+	ret->usecount++;
+#endif
+	mutex_unlock(&vol_table_mutex);
+	return ret;
+
+out_put:
+//	module_put(ret->owner);
+out_unlock:
+	mutex_unlock(&vol_table_mutex);
+	return ERR_PTR(err);
+}
+
+/**
+ *	get_mtd_device_nm - obtain a validated handle for an MTD device by
+ *	device name
+ *	@name: MTD device name to open
+ *
+ * 	This function returns MTD device description structure in case of
+ * 	success and an error code in case of failure.
+ */
+
+struct ubi_volume_desc *get_vol_device_nm(const char *name)
+{
+	int i, err = -ENODEV;
+	struct ubi_volume_desc *uv = NULL;
+
+	mutex_lock(&vol_table_mutex);
+
+	for (i = 0; i < UBI_MAX_VOLUMES; i++) {
+		if (vol_table[i] && !strcmp(name, vol_table[i]->vol->name)) {
+			uv = vol_table[i];
+			break;
+		}
+	}
+
+	if (!uv)
+		goto out_unlock;
+#if 0
+	if (!try_module_get(mtd->owner))
+		goto out_unlock;
+
+	if (mtd->get_device) {
+		err = mtd->get_device(mtd);
+		if (err)
+			goto out_put;
+	}
+
+	mtd->usecount++;
+#endif
+	mutex_unlock(&vol_table_mutex);
+	return uv;
+
+out_put:
+//	module_put(mtd->owner);
+out_unlock:
+	mutex_unlock(&vol_table_mutex);
+	return ERR_PTR(err);
+}
+
+void put_vol_device(struct ubi_volume_desc *uv)
+{
+	int c;
+
+	mutex_lock(&vol_table_mutex);
+#if 0
+	c = --mtd->usecount;
+	if (mtd->put_device)
+		mtd->put_device(mtd);
+#endif
+	mutex_unlock(&vol_table_mutex);
+//	BUG_ON(c < 0);
+
+//	module_put(mtd->owner);
+}
+EXPORT_SYMBOL_GPL(get_vol_device);
+EXPORT_SYMBOL_GPL(get_vol_device_nm);
+EXPORT_SYMBOL_GPL(put_vol_device);
+
+#endif
+
+EXPORT_SYMBOL_GPL(add_vol_device);
+EXPORT_SYMBOL_GPL(del_vol_device);
+EXPORT_SYMBOL_GPL(register_vol_user);
+EXPORT_SYMBOL_GPL(unregister_vol_user);
+
+
+/* add by Nancy done*/
+
+
+
 /**
  * struct mtd_dev_param - MTD device parameter description data structure.
  * @name: MTD device name or number string
@@ -84,6 +381,7 @@ DEFINE_MUTEX(ubi_devices_mutex);
 
 /* Protects @ubi_devices and @ubi->ref_count */
 static DEFINE_SPINLOCK(ubi_devices_lock);
+EXPORT_SYMBOL_GPL(ubi_devices_lock);
 
 /* "Show" method for files in '/<sysfs>/class/ubi/' */
 static ssize_t ubi_version_show(struct class *class, char *buf)
@@ -204,7 +502,8 @@ int ubi_major2num(int major)
 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
 		struct ubi_device *ubi = ubi_devices[i];
 
-		if (ubi && MAJOR(ubi->cdev.dev) == major) {
+		if ( (ubi && MAJOR(ubi->cdev.dev) == major) ||
+			(ubi && ubi->bdev_major == major)) {
 			ubi_num = ubi->ubi_num;
 			break;
 		}
@@ -213,6 +512,7 @@ int ubi_major2num(int major)
 
 	return ubi_num;
 }
+EXPORT_SYMBOL_GPL(ubi_major2num);
 
 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
 static ssize_t dev_attribute_show(struct device *dev,
@@ -672,6 +972,15 @@ static int autoresize(struct ubi_device 
 	return 0;
 }
 
+/* add by Nancy */
+static int bdev_init(struct ubi_device *ubi){
+	int i;
+	for(i=0; i<ubi->vtbl_slots; i++)
+		if(ubi->volumes[i])
+			add_vol_device(ubi->volumes[i]);
+	return 0;
+}
+
 /**
  * ubi_attach_mtd_dev - attach an MTD device.
  * @mtd_dev: MTD device description object
@@ -793,6 +1102,10 @@ int ubi_attach_mtd_dev(struct mtd_info *
 	if (err)
 		goto out_detach;
 
+	err = bdev_init(ubi);
+	if(err)
+		goto out_detach;
+
 	ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
 	if (IS_ERR(ubi->bgt_thread)) {
 		err = PTR_ERR(ubi->bgt_thread);
diff -uprBN ubi.v24/cdev.c ubi.test/cdev.c
--- ubi.v24/cdev.c	2008-02-22 23:57:48.000000000 -0500
+++ ubi.test/cdev.c	2008-03-01 21:22:56.000000000 -0500
@@ -100,6 +100,7 @@ static void revoke_exclusive(struct ubi_
 
 static int vol_cdev_open(struct inode *inode, struct file *file)
 {
+	printk("%s: line: %d\n",__FUNCTION__,__LINE__);
 	struct ubi_volume_desc *desc;
 	int vol_id = iminor(inode) - 1, mode, ubi_num;
 
@@ -113,12 +114,18 @@ static int vol_cdev_open(struct inode *i
 		mode = UBI_READONLY;
 
 	dbg_msg("open volume %d, mode %d", vol_id, mode);
+	
+	const struct ubi_device *ubi = ubi_get_by_major(imajor(inode));
+	if(ubi->ubi_num != ubi_num)
+		printk("ubi_num not equal!\n");
 
+	printk("%s: line: %d\n",__FUNCTION__,__LINE__);
+	printk("vol_cdev_open: ubi_num: %d, vol_id: %d, mode: %d\n", ubi_num, vol_id, mode);
 	desc = ubi_open_volume(ubi_num, vol_id, mode);
 	if (IS_ERR(desc))
 		return PTR_ERR(desc);
 
-	file->private_data = desc;
+	file->private_data = desc; 
 	return 0;
 }
 
@@ -653,7 +660,7 @@ static int ubi_cdev_ioctl(struct inode *
 			err = -EFAULT;
 			break;
 		}
-
+		printk("%s: line: %d\n",__FUNCTION__,__LINE__);
 		desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
 		if (IS_ERR(desc)) {
 			err = PTR_ERR(desc);
@@ -690,7 +697,7 @@ static int ubi_cdev_ioctl(struct inode *
 		err = verify_rsvol_req(ubi, &req);
 		if (err)
 			break;
-
+		printk("%s: line: %d\n",__FUNCTION__,__LINE__);
 		desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
 		if (IS_ERR(desc)) {
 			err = PTR_ERR(desc);
diff -uprBN ubi.v24/Kconfig ubi.test/Kconfig
--- ubi.v24/Kconfig	2008-02-22 23:57:48.000000000 -0500
+++ ubi.test/Kconfig	2008-03-01 21:22:56.000000000 -0500
@@ -55,4 +55,18 @@ config MTD_UBI_GLUEBI
 	   this if no legacy software will be used.
 
 source "drivers/mtd/ubi/Kconfig.debug"
+
+config MTD_UBI_BLKDEVS
+	tristate "Common interface to block layer for UBI 'translation layers'"
+	depends on BLOCK
+	default n
+
+config MTD_UBI_BLOCK
+	tristate "Emulate block devices"
+	default n
+	depends on MTD_UBI_BLKDEVS
+	help
+	   This option enables Block layer emulation on top of UBI volumes: for
+	   each UBI volumes an block device is created. This is handy to make
+	   traditional filesystem (like ext2, VFAT) work on top of UBI.
 endmenu
diff -uprBN ubi.v24/Makefile ubi.test/Makefile
--- ubi.v24/Makefile	2008-02-22 23:57:48.000000000 -0500
+++ ubi.test/Makefile	2008-03-01 21:22:56.000000000 -0500
@@ -5,3 +5,6 @@ ubi-y += misc.o
 
 ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
 ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+
+obj-$(CONFIG_MTD_UBI_BLKDEVS) += bdev.o 
+obj-$(CONFIG_MTD_UBI_BLOCK) += block-jz.o
diff -uprBN ubi.v24/ubi.h ubi.test/ubi.h
--- ubi.v24/ubi.h	2008-02-22 23:57:48.000000000 -0500
+++ ubi.test/ubi.h	2008-03-01 21:22:58.000000000 -0500
@@ -133,6 +133,12 @@ struct ubi_ltree_entry {
 
 struct ubi_volume_desc;
 
+struct vol_notifier {
+	void (*add)(struct ubi_volume *vol);
+	void (*remove)(struct ubi_volume *vol);
+	struct list_head list;
+};
+
 /**
  * struct ubi_volume - UBI volume description data structure.
  * @dev: device object to make use of the the Linux device model
@@ -232,6 +238,7 @@ struct ubi_volume {
 	int gluebi_refcount;
 	struct mtd_info gluebi_mtd;
 #endif
+	int  bdev_mode;  //add by Nancy
 };
 
 /**
@@ -336,6 +343,7 @@ struct ubi_wl_entry;
  */
 struct ubi_device {
 	struct cdev cdev;
+	int bdev_major;  //add by Nancy
 	struct device dev;
 	int ubi_num;
 	char ubi_name[sizeof(UBI_NAME_STR)+5];
@@ -504,7 +512,7 @@ int ubi_io_read_vid_hdr(struct ubi_devic
 			struct ubi_vid_hdr *vid_hdr, int verbose);
 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
 			 struct ubi_vid_hdr *vid_hdr);
-
+ 
 /* build.c */
 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
 int ubi_detach_mtd_dev(int ubi_num, int anyway);

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2008-03-04  1:13 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-03-02  3:27 UBI: adds block device layer on top of UBI Nancy
2008-03-03 12:18 ` Artem Bityutskiy
2008-03-04  1:13   ` Nancy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox