* [PATCH] [MTD] [UBI] add block device layer on top of UBI
@ 2008-03-03 11:17 Nancy
2008-03-04 9:40 ` Nancy
0 siblings, 1 reply; 14+ messages in thread
From: Nancy @ 2008-03-03 11:17 UTC (permalink / raw)
To: linux-mtd
[-- Attachment #1: Type: text/plain, Size: 695 bytes --]
This patch based on
git://git.infradead.org/users/dedekind/ubifs-v2.6.24.git/ UBI version
#cd linux-2.6.24/drivers/mtd/ubi
#path -p1 < ~/ubi_new.diff
Then copy "ubi_blktrans.h" to linux-2.6.24/include/linux/mtd
Notice: Please do not select MTD_UBI_GLUEBI
#mkfs.vfat /dev/ubiblock0 works fine, but
#mount -t vfat /dev/ubiblock0 /mnt
mount: mounting /dev/ubiblock0 on /mnt failed: Invalid argument
I grep the whole kernel, can't find where this error come from. I
totally don't know how to debug step by step, I mean follow the mount
application.
Could you please give me a suggestion? a keyword, a bookname, an
artical link what ever it is.
Many thanks!
--
Best wishes,
Nancy
[-- Attachment #2: ubi_new.diff --]
[-- Type: text/plain, Size: 25747 bytes --]
diff -nprBN ubi.orig/bdev.c ubi.new/bdev.c
a0 477
/*
* $Id: bdev.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $
*
* (C) 2003 David Woodhouse <dwmw2@infradead.org>
*
* Interface to Linux 2.5 block layer for UBI 'translation layers'.
*
* 2008 Yurong Tan <nancydreaming@gmail.com>:
* borrow from mtd_blkdevs.c for building block device layer on top of UBI
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/mtd/ubi_blktrans.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/freezer.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <asm/uaccess.h>
#include "ubi.h"
static LIST_HEAD(blktrans_majors);
extern struct mutex vol_table_mutex;
extern struct ubi_volume *vol_table[];
extern void register_vol_user (struct vol_notifier *new);
extern int unregister_vol_user (struct vol_notifier *old);
extern int ubi_major2num(int major);
struct ubi_blkcore_priv {
struct task_struct *thread;
struct request_queue *rq;
spinlock_t queue_lock;
};
static int do_blktrans_request(struct ubi_blktrans_ops *tr,
struct ubi_blktrans_dev *dev,
struct request *req)
{
unsigned long block, nsect;
char *buf;
block = req->sector << 9 >> tr->blkshift;
nsect = req->current_nr_sectors << 9 >> tr->blkshift;
buf = req->buffer;
if (!blk_fs_request(req))
return 0;
if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
return 0;
switch(rq_data_dir(req)) {
case READ:
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
return 0;
return 1;
case WRITE:
if (!tr->writesect)
return 0;
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->writesect(dev, block, buf))
return 0;
return 1;
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
return 0;
}
}
static int ubi_blktrans_thread(void *arg)
{
struct ubi_blktrans_ops *tr = arg;
struct request_queue *rq = tr->blkcore_priv->rq;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
current->flags |= PF_MEMALLOC;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
struct request *req;
struct ubi_blktrans_dev *dev;
int res = 0;
req = elv_next_request(rq);
if (!req) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(rq->queue_lock);
schedule();
spin_lock_irq(rq->queue_lock);
continue;
}
dev = req->rq_disk->private_data;
tr = dev->tr;
spin_unlock_irq(rq->queue_lock);
mutex_lock(&dev->lock);
res = do_blktrans_request(tr, dev, req);
mutex_unlock(&dev->lock);
spin_lock_irq(rq->queue_lock);
end_request(req, res);
}
spin_unlock_irq(rq->queue_lock);
return 0;
}
static void ubi_blktrans_request(struct request_queue *rq)
{
struct ubi_blktrans_ops *tr = rq->queuedata;
wake_up_process(tr->blkcore_priv->thread);
}
static int blktrans_open(struct inode *i, struct file *f)
{
struct ubi_volume_desc *desc;
int ubi_num = ubi_major2num(imajor(i));
int vol_id = iminor(i);
int mode;
struct ubi_blktrans_dev *dev;
struct ubi_blktrans_ops *tr;
int ret = -ENODEV;
if (f->f_mode & FMODE_WRITE)
mode = UBI_READWRITE;
else
mode = UBI_READONLY;
desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
return PTR_ERR(desc);
desc->vol->bdev_mode = mode;
dev = i->i_bdev->bd_disk->private_data;
dev->uv = desc; // add by Nancy
tr = dev->tr;
if (!try_module_get(tr->owner))
goto out_tr;
/* FIXME: Locking. A hot pluggable device can go away
(del_mtd_device can be called for it) without its module
being unloaded. */
ret = 0;
if (tr->open && (ret = tr->open(dev))) {
ubi_close_volume(desc);
out_tr:
module_put(tr->owner);
}
out:
return ret;
}
static int blktrans_release(struct inode *i, struct file *f)
{
struct ubi_blktrans_dev *dev;
struct ubi_blktrans_ops *tr;
struct ubi_volume_desc *desc;
int ret = 0;
dev = i->i_bdev->bd_disk->private_data;
tr = dev->tr;
desc = dev->uv;
if (tr->release)
ret = tr->release(dev);
if (!ret) {
ubi_close_volume(desc);
module_put(tr->owner);
}
return ret;
}
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct ubi_blktrans_dev *dev = bdev->bd_disk->private_data;
if (dev->tr->getgeo)
return dev->tr->getgeo(dev, geo);
return -ENOTTY;
}
static int blktrans_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct ubi_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
struct ubi_blktrans_ops *tr = dev->tr;
switch (cmd) {
case BLKFLSBUF:
if (tr->flush)
return tr->flush(dev);
/* The core code did the work, we had nothing to do. */
return 0;
default:
return -ENOTTY;
}
}
struct block_device_operations ubi_blktrans_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
.ioctl = blktrans_ioctl,
.getgeo = blktrans_getgeo,
};
int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new)
{
struct ubi_blktrans_ops *tr = new->tr;
struct list_head *this;
int last_devnum = -1;
struct gendisk *gd;
if (mutex_trylock(&vol_table_mutex)) {
mutex_unlock(&vol_table_mutex);
BUG();
}
list_for_each(this, &tr->devs) {
struct ubi_blktrans_dev *d = list_entry(this, struct ubi_blktrans_dev, list);
if (new->devnum == -1) {
/* Use first free number */
if (d->devnum != last_devnum+1) {
/* Found a free devnum. Plug it in here */
new->devnum = last_devnum+1;
list_add_tail(&new->list, &d->list);
goto added;
}
} else if (d->devnum == new->devnum) {
/* Required number taken */
return -EBUSY;
} else if (d->devnum > new->devnum) {
/* Required number was free */
list_add_tail(&new->list, &d->list);
goto added;
}
last_devnum = d->devnum;
}
if (new->devnum == -1)
new->devnum = last_devnum+1;
if ((new->devnum << tr->part_bits) > 256) {
return -EBUSY;
}
mutex_init(&new->lock);
list_add_tail(&new->list, &tr->devs);
added:
if (!tr->writesect)
new->readonly = 1;
gd = alloc_disk(1 << tr->part_bits);
if (!gd) {
list_del(&new->list);
return -ENOMEM;
}
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
gd->fops = &ubi_blktrans_ops;
if (tr->part_bits)
if (new->devnum < 26)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%c", tr->name, 'a' + new->devnum);
else
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%c%c", tr->name,
'a' - 1 + new->devnum / 26,
'a' + new->devnum % 26);
else
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
/* 2.5 has capacity in units of 512 bytes while still
having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
set_capacity(gd, (new->size * tr->blksize) >> 9);
gd->private_data = new;
new->blkcore_priv = gd;
gd->queue = tr->blkcore_priv->rq;
if (new->readonly)
set_disk_ro(gd, 1);
add_disk(gd);
return 0;
}
int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old)
{
if (mutex_trylock(&vol_table_mutex)) {
mutex_unlock(&vol_table_mutex);
BUG();
}
list_del(&old->list);
del_gendisk(old->blkcore_priv);
put_disk(old->blkcore_priv);
return 0;
}
static void blktrans_notify_remove(struct ubi_volume *vol)
{
struct list_head *this, *this2, *next;
list_for_each(this, &blktrans_majors) {
struct ubi_blktrans_ops *tr = list_entry(this, struct ubi_blktrans_ops, list);
list_for_each_safe(this2, next, &tr->devs) {
struct ubi_blktrans_dev *dev = list_entry(this2, struct ubi_blktrans_dev, list);
if (dev->uv->vol == vol)
tr->remove_vol(dev);
}
}
}
static void blktrans_notify_add(struct ubi_volume *vol)
{
struct list_head *this;
#if 0
if (mtd->type == MTD_ABSENT)
return;
#endif
list_for_each(this, &blktrans_majors) {
struct ubi_blktrans_ops *tr = list_entry(this, struct ubi_blktrans_ops, list);
tr->add_vol(tr,vol);
}
}
static struct vol_notifier blktrans_notifier = {
.add = blktrans_notify_add,
.remove = blktrans_notify_remove,
};
int register_ubi_blktrans(struct ubi_blktrans_ops *tr)
{
int ret, i;
/* Register the notifier if/when the first device type is
registered, to prevent the link/init ordering from fucking
us over. */
if (!blktrans_notifier.list.next)
register_vol_user(&blktrans_notifier);
tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
if (!tr->blkcore_priv)
return -ENOMEM;
mutex_lock(&vol_table_mutex);
tr->major = register_blkdev(0, tr->name);
#if 0
if (ret) {
// printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
printk("Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
kfree(tr->blkcore_priv);
mutex_unlock(&vol_table_mutex);
return ret;
}
#endif
spin_lock_init(&tr->blkcore_priv->queue_lock);
tr->blkcore_priv->rq = blk_init_queue(ubi_blktrans_request, &tr->blkcore_priv->queue_lock);
if (!tr->blkcore_priv->rq) {
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
mutex_unlock(&vol_table_mutex);
return -ENOMEM;
}
tr->blkcore_priv->rq->queuedata = tr;
blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
tr->blkshift = ffs(tr->blksize) - 1;
tr->blkcore_priv->thread = kthread_run(ubi_blktrans_thread, tr,
"%sd", tr->name);
if (IS_ERR(tr->blkcore_priv->thread)) {
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
mutex_unlock(&vol_table_mutex);
return PTR_ERR(tr->blkcore_priv->thread);
}
INIT_LIST_HEAD(&tr->devs);
list_add(&tr->list, &blktrans_majors);
for (i=0; i<UBI_MAX_VOLUMES; i++) {
if (vol_table[i] )
tr->add_vol(tr, vol_table[i]);
}
mutex_unlock(&vol_table_mutex);
return 0;
}
int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr)
{
struct list_head *this, *next;
mutex_lock(&vol_table_mutex);
/* Clean up the kernel thread */
kthread_stop(tr->blkcore_priv->thread);
/* Remove it from the list of active majors */
list_del(&tr->list);
list_for_each_safe(this, next, &tr->devs) {
struct ubi_blktrans_dev *dev = list_entry(this, struct ubi_blktrans_dev, list);
tr->remove_vol(dev);
}
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
mutex_unlock(&vol_table_mutex);
kfree(tr->blkcore_priv);
BUG_ON(!list_empty(&tr->devs));
return 0;
}
static void __exit ubi_blktrans_exit(void)
{
/* No race here -- if someone's currently in register_mtd_blktrans
we're screwed anyway. */
if (blktrans_notifier.list.next)
unregister_vol_user(&blktrans_notifier);
}
module_exit(ubi_blktrans_exit);
EXPORT_SYMBOL_GPL(register_ubi_blktrans);
EXPORT_SYMBOL_GPL(deregister_ubi_blktrans);
EXPORT_SYMBOL_GPL(add_ubi_blktrans_dev);
EXPORT_SYMBOL_GPL(del_ubi_blktrans_dev);
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>, Yurong Tan <nancydreaming@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Common interface to block layer for UBI 'translation layers'");
diff -nprBN ubi.orig/block-jz.c ubi.new/block-jz.c
a0 315
/*
* Direct UBI block device access
*
* $Id: ubiblock.c,v 1.68 2005/11/07 11:14:20 gleixner Exp $
*
* (C) 2000-2003 Nicolas Pitre <nico@cam.org>
* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
* (C) 2008 Yurong Tan <nancydreaming@gmail.com> :
* borrow mtdblock.c to work on top of UBI
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/hdreg.h>
#include <linux/mtd/ubi_blktrans.h>
#include <linux/mutex.h>
#include "ubi.h"
#define UNMAPPED 1
static struct ubiblk_dev {
struct ubi_volume_desc *uv;
int count;
struct mutex cache_mutex;
unsigned short vbw; //virt block number of write cache
unsigned short vbr; //virt block number of read cache
unsigned char *write_cache;
unsigned char *read_cache;
enum { STATE_UNUSED, STATE_USED } read_cache_state, write_cache_state;
} *ubiblks[UBI_MAX_VOLUMES];
void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int virt_block);
int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk);
int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk)
{
int offset = ubiblk->uv->vol->ubi->ec_hdr_alsize + ubiblk->uv->vol->ubi->vid_hdr_alsize;
if (STATE_UNUSED == ubiblk->write_cache_state)
return 0;
ubi_leb_write(ubiblk->uv, ubiblk->vbw, ubiblk->write_cache, offset,
ubiblk->uv->vol->usable_leb_size, UBI_UNKNOWN);
ubiblk->write_cache_state = STATE_UNUSED;
return 0;
}
void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int virt_block)
{
int offset = ubiblk->uv->vol->ubi->ec_hdr_alsize + ubiblk->uv->vol->ubi->vid_hdr_alsize;
ubiblk->vbw = virt_block;
ubiblk->write_cache_state = STATE_USED;
ubi_leb_read(ubiblk->uv, ubiblk->vbw, ubiblk->write_cache, offset,
ubiblk->uv->vol->usable_leb_size, UBI_UNKNOWN);
//memset(write_cache, 0xff, ubiblk->uv->vol->usable_leb_size);
}
static int do_cached_write (struct ubiblk_dev *ubiblk, unsigned long sector,
int len, const char *buf)
{
struct ubi_volume_desc *uv = ubiblk->uv;
int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size;
unsigned short sectors_per_page = uv->vol->ubi->min_io_size >> 9;
unsigned short page_shift = ffs(uv->vol->ubi->min_io_size) - 1;
unsigned short virt_block, page, page_offset;
unsigned long virt_page;
virt_page = sector / sectors_per_page;
page_offset = sector % sectors_per_page;
virt_block = virt_page / ppb;
page = virt_page % ppb;
if(ubi_is_mapped(uv, virt_block ) == UNMAPPED ){
mutex_lock(&ubiblk->cache_mutex);
ubiblk_flush_writecache(ubiblk);
mutex_unlock(&ubiblk->cache_mutex);
ubiblk_setup_writecache(ubiblk, virt_block);
ubi_leb_map(uv, virt_block, UBI_UNKNOWN);
} else {
if ( STATE_USED == ubiblk->write_cache_state ) {
if ( ubiblk->vbw != virt_block) {
// Commit before we start a new cache.
mutex_lock(&ubiblk->cache_mutex);
ubiblk_flush_writecache(ubiblk);
mutex_unlock(&ubiblk->cache_mutex);
ubiblk_setup_writecache(ubiblk, virt_block);
ubi_leb_unmap(uv, virt_block);
ubi_leb_map(uv, virt_block, UBI_UNKNOWN);
} else {
//dprintk("cache hit: 0x%x\n", virt_page);
}
} else {
// printk("with existing mapping\n");
ubiblk_setup_writecache(ubiblk, virt_block);
ubi_leb_unmap(uv, virt_block);
ubi_leb_map(uv, virt_block, UBI_UNKNOWN);
}
}
memcpy(&ubiblk->write_cache[(page<<page_shift) +(page_offset<<9)],
buf,len);
return 0;
}
static int do_cached_read (struct ubiblk_dev *ubiblk, unsigned long sector,
int len, char *buf)
{
struct ubi_volume_desc *uv = ubiblk->uv;
int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size;
unsigned short sectors_per_page = uv->vol->ubi->min_io_size >> 9;
unsigned short page_shift = ffs(uv->vol->ubi->min_io_size) - 1;
unsigned short virt_block, page, page_offset;
unsigned long virt_page;
int offset = uv->vol->ubi->ec_hdr_alsize + uv->vol->ubi->vid_hdr_alsize;
mutex_lock(&ubiblk->cache_mutex);
ubiblk_flush_writecache(ubiblk);
mutex_unlock(&ubiblk->cache_mutex);
virt_page = sector / sectors_per_page;
page_offset = sector % sectors_per_page;
virt_block = virt_page / ppb;
page = virt_page % ppb;
if ( ubi_is_mapped( uv, virt_block) == UNMAPPED){
// In a Flash Memory device, there might be a logical block that is
// not allcated to a physical block due to the block not being used.
// All data returned should be set to 0xFF when accessing this logical
// block.
// dprintk("address translate fail\n");
memset(buf, 0xFF, 512);
} else {
if( ubiblk->vbr != virt_block ||ubiblk->read_cache_state == STATE_UNUSED ){
ubiblk->vbr = virt_block;
ubi_leb_read(uv, virt_block, ubiblk->read_cache, offset, uv->vol->usable_leb_size, 0);
ubiblk->read_cache_state = STATE_USED;
}
memcpy(buf, &ubiblk->read_cache[(page<<page_shift)+(page_offset<<9)], len);
}
return 0;
}
static int ubiblk_readsect(struct ubi_blktrans_dev *dev,
unsigned long block, char *buf)
{
struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
return do_cached_read(ubiblk, block, 512, buf);
}
static int ubiblk_writesect(struct ubi_blktrans_dev *dev,
unsigned long block, char *buf)
{
struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
return do_cached_write(ubiblk, block, 512, buf);
}
static int ubiblk_init_vol(int dev, struct ubi_volume_desc *uv)
{
struct ubiblk_dev *ubiblk;
int ret;
ubiblk = kmalloc(sizeof(struct ubiblk_dev), GFP_KERNEL);
if (!ubiblk)
return -ENOMEM;
memset(ubiblk, 0, sizeof(*ubiblk));
ubiblk->count = 1;
ubiblk->uv = uv;
mutex_init (&ubiblk->cache_mutex);
ubiblk->write_cache = vmalloc(ubiblk->uv->vol->usable_leb_size);
ubiblk->read_cache = vmalloc(ubiblk->uv->vol->usable_leb_size);
if(!ubiblk->write_cache ||
!ubiblk->read_cache )
return -ENOMEM;
ubiblk->write_cache_state = STATE_UNUSED;
ubiblk->read_cache_state = STATE_UNUSED;
ubiblks[dev] = ubiblk;
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
return 0;
}
static int ubiblk_open(struct ubi_blktrans_dev *ubd)
{
int dev = ubd->devnum;
int res = 0;
DEBUG(MTD_DEBUG_LEVEL1,"ubiblock_open\n");
if (ubiblks[dev]) {
ubiblks[dev]->count++;
printk("%s: increase use count\n",__FUNCTION__);
return 0;
}
/* OK, it's not open. Create cache info for it */
res = ubiblk_init_vol(dev, ubd->uv);
return res;
}
static int ubiblk_release(struct ubi_blktrans_dev *ubd)
{
int dev = ubd->devnum;
struct ubiblk_dev *ubiblk = ubiblks[dev];
struct ubi_device *ubi = ubiblk->uv->vol->ubi;
mutex_lock(&ubiblk->cache_mutex);
ubiblk_flush_writecache(ubiblk);
mutex_unlock(&ubiblk->cache_mutex);
if (!--ubiblk->count) {
/* It was the last usage. Free the device */
ubiblks[dev] = NULL;
if (ubi->mtd->sync)
ubi->mtd->sync(ubi->mtd);
vfree(ubiblk->write_cache);
vfree(ubiblk->read_cache);
kfree(ubiblk);
}
return 0;
}
static int ubiblk_flush(struct ubi_blktrans_dev *dev)
{
struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
struct ubi_device *ubi = ubiblk->uv->vol->ubi;
mutex_lock(&ubiblk->cache_mutex);
ubiblk_flush_writecache(ubiblk);
mutex_unlock(&ubiblk->cache_mutex);
if (ubi->mtd->sync)
ubi->mtd->sync(ubi->mtd);
return 0;
}
static void ubiblk_add_vol_dev(struct ubi_blktrans_ops *tr, struct ubi_volume *vol)
{
struct ubi_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
dev->devnum = vol->vol_id;
dev->size = vol->used_bytes >> 9;
dev->tr = tr;
if (vol->bdev_mode == UBI_READONLY)
dev->readonly = 1;
vol->ubi->bdev_major = tr->major;
add_ubi_blktrans_dev(dev);
}
static void ubiblk_remove_vol_dev(struct ubi_blktrans_dev *dev)
{
del_ubi_blktrans_dev(dev);
kfree(dev);
}
static int ubiblk_getgeo(struct ubi_blktrans_dev *dev, struct hd_geometry *geo)
{
memset(geo, 0, sizeof(*geo));
geo->heads = 4;
geo->sectors = 16;
geo->cylinders = dev->size/(4*16);
return 0;
}
static struct ubi_blktrans_ops ubiblk_tr = {
.name = "ubiblock",
.major = 0,
.part_bits = 0,
.blksize = 512,
.open = ubiblk_open,
.release = ubiblk_release,
.readsect = ubiblk_readsect,
.writesect = ubiblk_writesect,
.getgeo = ubiblk_getgeo,
.flush = ubiblk_flush,
.add_vol = ubiblk_add_vol_dev,
.remove_vol = ubiblk_remove_vol_dev,
.owner = THIS_MODULE,
};
static int __init init_ubiblock(void)
{
return register_ubi_blktrans(&ubiblk_tr);
}
static void __exit cleanup_ubiblock(void)
{
deregister_ubi_blktrans(&ubiblk_tr);
}
module_init(init_ubiblock);
module_exit(cleanup_ubiblock);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> , Yurong Tan <nancydreaming@gmail.com>");
MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to UBI volumes");
diff -nprBN ubi.orig/build.c ubi.new/build.c
a48 152
/* add by Nancy begin */
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
DEFINE_MUTEX(vol_table_mutex);
struct ubi_volume *vol_table[UBI_MAX_VOLUMES];
EXPORT_SYMBOL_GPL(vol_table_mutex);
EXPORT_SYMBOL_GPL(vol_table);
static LIST_HEAD(vol_notifiers);
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
*
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or 1 on failure, which currently will only happen
* if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
*/
int add_vol_device(struct ubi_volume *vol)
{
int i;
mutex_lock(&vol_table_mutex);
if (!vol_table[vol->vol_id]) {
struct list_head *this;
vol_table[vol->vol_id] = vol;
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each(this, &vol_notifiers) {
struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
not->add(vol);
}
mutex_unlock(&vol_table_mutex);
/* We _know_ we aren't being removed, because
our caller is still holding us here. So none
of this try_ nonsense, and no bitching about it
either. :) */
__module_get(THIS_MODULE);
return 0;
}
mutex_unlock(&vol_table_mutex);
return 1;
}
/**
* del_mtd_device - unregister an MTD device
* @mtd: pointer to MTD device info structure
*
* Remove a device from the list of MTD devices present in the system,
* and notify each currently active MTD 'user' of its departure.
* Returns zero on success or 1 on failure, which currently will happen
* if the requested device does not appear to be present in the list.
*/
int del_vol_device (struct ubi_volume *vol)
{
int ret;
mutex_lock(&vol_table_mutex);
if (vol_table[vol->vol_id] != vol) {
ret = -ENODEV;
} else if (vol->readers ||vol->writers || vol->exclusive) {
printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count 0\n",
vol->vol_id, vol->name);
ret = -EBUSY;
} else {
struct list_head *this;
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each(this, &vol_notifiers) {
struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
not->remove(vol);
}
vol_table[vol->vol_id] = NULL;
module_put(THIS_MODULE);
ret = 0;
}
mutex_unlock(&vol_table_mutex);
return ret;
}
/**
* register_mtd_user - register a 'user' of MTD devices.
* @new: pointer to notifier info structure
*
* Registers a pair of callbacks function to be called upon addition
* or removal of MTD devices. Causes the 'add' callback to be immediately
* invoked for each MTD device currently present in the system.
*/
void register_vol_user(struct vol_notifier *new)
{
int i;
mutex_lock(&vol_table_mutex);
list_add(&new->list, &vol_notifiers);
__module_get(THIS_MODULE);
for (i=0; i< UBI_MAX_VOLUMES; i++)
if (vol_table[i])
new->add(vol_table[i]);
mutex_unlock(&vol_table_mutex);
}
/**
* unregister_mtd_user - unregister a 'user' of MTD devices.
* @old: pointer to notifier info structure
*
* Removes a callback function pair from the list of 'users' to be
* notified upon addition or removal of MTD devices. Causes the
* 'remove' callback to be immediately invoked for each MTD device
* currently present in the system.
*/
int unregister_vol_user(struct vol_notifier *old)
{
int i;
mutex_lock(&vol_table_mutex);
module_put(THIS_MODULE);
for (i=0; i< UBI_MAX_VOLUMES; i++)
if (vol_table[i])
old->remove(vol_table[i]);
list_del(&old->list);
mutex_unlock(&vol_table_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(add_vol_device);
EXPORT_SYMBOL_GPL(del_vol_device);
EXPORT_SYMBOL_GPL(register_vol_user);
EXPORT_SYMBOL_GPL(unregister_vol_user);
/* add by Nancy end*/
a86 1
EXPORT_SYMBOL_GPL(ubi_devices_lock);
d207 1
a207 2
if ( (ubi && MAJOR(ubi->cdev.dev) == major) ||
(ubi && ubi->bdev_major == major)) {
a215 1
EXPORT_SYMBOL_GPL(ubi_major2num);
a674 9
/* add by Nancy */
static int bdev_init(struct ubi_device *ubi){
int i;
for(i=0; i<ubi->vtbl_slots; i++)
if(ubi->volumes[i])
add_vol_device(ubi->volumes[i]);
return 0;
}
a795 4
err = bdev_init(ubi);
if(err)
goto out_detach;
diff -nprBN ubi.orig/cdev.c ubi.new/cdev.c
a115 4
const struct ubi_device *ubi = ubi_get_by_major(imajor(inode));
if(ubi->ubi_num != ubi_num)
printk("ubi_num not equal!\n");
d121 1
a121 1
file->private_data = desc;
diff -nprBN ubi.orig/Kconfig ubi.new/Kconfig
a57 14
config MTD_UBI_BLKDEVS
tristate "Common interface to block layer for UBI 'translation layers'"
depends on BLOCK
default n
config MTD_UBI_BLOCK
tristate "Emulate block devices"
default n
depends on MTD_UBI_BLKDEVS
help
This option enables Block layer emulation on top of UBI volumes: for
each UBI volumes an block device is created. This is handy to make
traditional filesystem (like ext2, VFAT) work on top of UBI.
diff -nprBN ubi.orig/Makefile ubi.new/Makefile
a7 3
obj-$(CONFIG_MTD_UBI_BLKDEVS) += bdev.o
obj-$(CONFIG_MTD_UBI_BLOCK) += block-jz.o
diff -nprBN ubi.orig/ubi.h ubi.new/ubi.h
a135 6
struct vol_notifier {
void (*add)(struct ubi_volume *vol);
void (*remove)(struct ubi_volume *vol);
struct list_head list;
};
a234 1
int bdev_mode; //add by Nancy
a338 1
int bdev_major; //add by Nancy
d507 1
a507 1
[-- Attachment #3: ubi_blktrans.h --]
[-- Type: text/plain, Size: 1973 bytes --]
/*
* $Id: ubi_blktrans.h
*
* (C) 2003 David Woodhouse <dwmw2@infradead.org>
* (C) 2008 Yurong Tan <nancydreaming@gmail.com> : borrow from MTD blktrans.h for UBI used
* Interface to Linux block layer for UBI 'translation layers'.
*
*/
#ifndef __UBI_TRANS_H__
#define __UBI_TRANS_H__
#include <linux/mutex.h>
struct hd_geometry;
struct ubi_volume_desc;
struct ubi_blktrans_ops;
struct file;
struct inode;
struct ubi_blktrans_dev {
struct ubi_blktrans_ops *tr;
struct list_head list;
struct ubi_volume_desc *uv;
struct mutex lock;
int devnum;
unsigned long size;
int readonly;
void *blkcore_priv; /* gendisk in 2.5, devfs_handle in 2.4 */
};
struct blkcore_priv; /* Differs for 2.4 and 2.5 kernels; private */
struct ubi_blktrans_ops {
char *name;
int major;
int part_bits;
int blksize;
int blkshift;
/* Access functions */
int (*readsect)(struct ubi_blktrans_dev *dev,
unsigned long block, char *buffer);
int (*writesect)(struct ubi_blktrans_dev *dev,
unsigned long block, char *buffer);
/* Block layer ioctls */
int (*getgeo)(struct ubi_blktrans_dev *dev, struct hd_geometry *geo);
int (*flush)(struct ubi_blktrans_dev *dev);
/* Called with mtd_table_mutex held; no race with add/remove */
int (*open)(struct ubi_blktrans_dev *dev);
int (*release)(struct ubi_blktrans_dev *dev);
/* Called on {de,}registration and on subsequent addition/removal
of devices, with mtd_table_mutex held. */
void (*add_vol)(struct ubi_blktrans_ops *tr, struct ubi_volume *vol);
void (*remove_vol)(struct ubi_blktrans_dev *dev);
struct list_head devs;
struct list_head list;
struct module *owner;
struct ubi_blkcore_priv *blkcore_priv;
};
extern int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new);
extern int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old);
extern int register_ubi_blktrans(struct ubi_blktrans_ops *tr);
extern int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr);
#endif /* __UBI_TRANS_H__ */
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-03-04 9:40 ` Nancy
@ 2008-03-04 9:40 ` Artem Bityutskiy
2008-03-04 9:54 ` Nancy
0 siblings, 1 reply; 14+ messages in thread
From: Artem Bityutskiy @ 2008-03-04 9:40 UTC (permalink / raw)
To: Nancy; +Cc: linux-mtd
On Tue, 2008-03-04 at 17:40 +0800, Nancy wrote:
> Hi,
> UBI block device layer's basic function finally OK now.
>
> Please, replace the "ubi_new.diff" with "ubi_blk_ok.diff"
>
> Next to do:
> 1, fix ubi->ref_count
> 2, when write a LEB, do not read the old mapped LEB to
> write_cache, only read needed pages to hance speed.
Hi,
could you please send patches inline? It is what people normally do
and it is easier to reply and comment the patches this way.
Also, I do not think we need this common "blktrans" module for UBI at
all, it just does not seem to make much sense.
--
Best regards,
Artem Bityutskiy (Битюцкий Артём)
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-03-03 11:17 [PATCH] [MTD] [UBI] add block device layer on top of UBI Nancy
@ 2008-03-04 9:40 ` Nancy
2008-03-04 9:40 ` Artem Bityutskiy
0 siblings, 1 reply; 14+ messages in thread
From: Nancy @ 2008-03-04 9:40 UTC (permalink / raw)
To: linux-mtd
[-- Attachment #1: Type: text/plain, Size: 319 bytes --]
Hi,
UBI block device layer's basic function finally OK now.
Please, replace the "ubi_new.diff" with "ubi_blk_ok.diff"
Next to do:
1, fix ubi->ref_count
2, when write a LEB, do not read the old mapped LEB to
write_cache, only read needed pages to hance speed.
--
Best wishes,
Nancy
[-- Attachment #2: ubi_blk_ok.diff --]
[-- Type: text/plain, Size: 29521 bytes --]
diff -uprBN ubi.orig/bdev.c ubi.new/bdev.c
--- ubi.orig/bdev.c 1970-01-01 08:00:00.000000000 +0800
+++ ubi.new/bdev.c 2008-03-04 17:25:27.000000000 +0800
@@ -0,0 +1,464 @@
+/*
+ * $Id: bdev.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $
+ *
+ * (C) 2003 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Interface to Linux 2.5 block layer for UBI 'translation layers'.
+ *
+ * 2008 Yurong Tan <nancydreaming@gmail.com>:
+ * borrow from mtd_blkdevs.c for building block device layer on top of UBI
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/mtd/ubi_blktrans.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/freezer.h>
+#include <linux/spinlock.h>
+#include <linux/hdreg.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <asm/uaccess.h>
+#include "ubi.h"
+
+
+static LIST_HEAD(blktrans_majors);
+extern struct mutex vol_table_mutex;
+extern struct ubi_volume *vol_table[];
+
+extern void register_vol_user (struct vol_notifier *new);
+extern int unregister_vol_user (struct vol_notifier *old);
+extern int ubi_major2num(int major);
+
+struct ubi_blkcore_priv {
+ struct task_struct *thread;
+ struct request_queue *rq;
+ spinlock_t queue_lock;
+};
+
+static int do_blktrans_request(struct ubi_blktrans_ops *tr,
+ struct ubi_blktrans_dev *dev,
+ struct request *req)
+{
+ unsigned long block, nsect;
+ char *buf;
+
+ block = req->sector << 9 >> tr->blkshift;
+ nsect = req->current_nr_sectors << 9 >> tr->blkshift;
+
+ buf = req->buffer;
+
+ if (!blk_fs_request(req))
+ return 0;
+
+ if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
+ return 0;
+
+ switch(rq_data_dir(req)) {
+ case READ:
+ for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+ if (tr->readsect(dev, block, buf))
+ return 0;
+ return 1;
+
+ case WRITE:
+ if (!tr->writesect)
+ return 0;
+
+ for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+ if (tr->writesect(dev, block, buf))
+ return 0;
+ return 1;
+
+ default:
+ printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
+ return 0;
+ }
+}
+
+static int ubi_blktrans_thread(void *arg)
+{
+ struct ubi_blktrans_ops *tr = arg;
+ struct request_queue *rq = tr->blkcore_priv->rq;
+
+ /* we might get involved when memory gets low, so use PF_MEMALLOC */
+ current->flags |= PF_MEMALLOC;
+
+ spin_lock_irq(rq->queue_lock);
+ while (!kthread_should_stop()) {
+ struct request *req;
+ struct ubi_blktrans_dev *dev;
+ int res = 0;
+
+ req = elv_next_request(rq);
+
+ if (!req) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irq(rq->queue_lock);
+ schedule();
+ spin_lock_irq(rq->queue_lock);
+ continue;
+ }
+
+ dev = req->rq_disk->private_data;
+ tr = dev->tr;
+
+ spin_unlock_irq(rq->queue_lock);
+
+ mutex_lock(&dev->lock);
+ res = do_blktrans_request(tr, dev, req);
+ mutex_unlock(&dev->lock);
+
+ spin_lock_irq(rq->queue_lock);
+
+ end_request(req, res);
+ }
+ spin_unlock_irq(rq->queue_lock);
+
+ return 0;
+}
+
+static void ubi_blktrans_request(struct request_queue *rq)
+{
+ struct ubi_blktrans_ops *tr = rq->queuedata;
+ wake_up_process(tr->blkcore_priv->thread);
+}
+
+static int blktrans_open(struct inode *i, struct file *f)
+{
+ struct ubi_volume_desc *desc;
+ int ubi_num = ubi_major2num(imajor(i));
+ int vol_id = iminor(i);
+
+ int mode;
+ struct ubi_blktrans_dev *dev;
+ struct ubi_blktrans_ops *tr;
+ int ret = -ENODEV;
+
+ if (f->f_mode & FMODE_WRITE)
+ mode = UBI_READWRITE;
+ else
+ mode = UBI_READONLY;
+
+ desc = ubi_open_volume(ubi_num, vol_id, mode);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ desc->vol->bdev_mode = mode;
+ dev = i->i_bdev->bd_disk->private_data;
+ dev->uv = desc; // add by Nancy
+ tr = dev->tr;
+
+ if (!try_module_get(tr->owner))
+ goto out_tr;
+
+ /* FIXME: Locking. A hot pluggable device can go away
+ (del_mtd_device can be called for it) without its module
+ being unloaded. */
+
+ ret = 0;
+ if (tr->open && (ret = tr->open(dev))) {
+ ubi_close_volume(desc);
+ out_tr:
+ module_put(tr->owner);
+ }
+ out:
+ return ret;
+}
+
+static int blktrans_release(struct inode *i, struct file *f)
+{
+ struct ubi_blktrans_dev *dev;
+ struct ubi_blktrans_ops *tr;
+ struct ubi_volume_desc *desc;
+ int ret = 0;
+
+ dev = i->i_bdev->bd_disk->private_data;
+ tr = dev->tr;
+ desc = dev->uv;
+
+ if (tr->release)
+ ret = tr->release(dev);
+
+ if (!ret) {
+ ubi_close_volume(desc);
+ module_put(tr->owner);
+ }
+ return ret;
+}
+
+static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct ubi_blktrans_dev *dev = bdev->bd_disk->private_data;
+
+ if (dev->tr->getgeo)
+ return dev->tr->getgeo(dev, geo);
+ return -ENOTTY;
+}
+
+static int blktrans_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ubi_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
+ struct ubi_blktrans_ops *tr = dev->tr;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ if (tr->flush)
+ return tr->flush(dev);
+ /* The core code did the work, we had nothing to do. */
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+}
+
+struct block_device_operations ubi_blktrans_ops = {
+ .owner = THIS_MODULE,
+ .open = blktrans_open,
+ .release = blktrans_release,
+ .ioctl = blktrans_ioctl,
+ .getgeo = blktrans_getgeo,
+};
+
+int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new)
+{
+ struct ubi_blktrans_ops *tr = new->tr;
+ struct list_head *this;
+ int last_devnum = -1;
+ struct gendisk *gd;
+
+ if (mutex_trylock(&vol_table_mutex)) {
+ mutex_unlock(&vol_table_mutex);
+ BUG();
+ }
+
+ list_for_each(this, &tr->devs) {
+ struct ubi_blktrans_dev *d = list_entry(this, struct ubi_blktrans_dev, list);
+ if (new->devnum == -1) {
+ /* Use first free number */
+ if (d->devnum != last_devnum+1) {
+ /* Found a free devnum. Plug it in here */
+ new->devnum = last_devnum+1;
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ } else if (d->devnum == new->devnum) {
+ /* Required number taken */
+ return -EBUSY;
+ } else if (d->devnum > new->devnum) {
+ /* Required number was free */
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ last_devnum = d->devnum;
+ }
+ if (new->devnum == -1)
+ new->devnum = last_devnum+1;
+
+ if ((new->devnum << tr->part_bits) > 256) {
+ return -EBUSY;
+ }
+
+ mutex_init(&new->lock);
+ list_add_tail(&new->list, &tr->devs);
+ added:
+ if (!tr->writesect)
+ new->readonly = 1;
+
+ gd = alloc_disk(1 << tr->part_bits);
+ if (!gd) {
+ list_del(&new->list);
+ return -ENOMEM;
+ }
+ gd->major = tr->major;
+ gd->first_minor = (new->devnum) << tr->part_bits;
+ gd->fops = &ubi_blktrans_ops;
+
+ if (tr->part_bits)
+ if (new->devnum < 26)
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%c", tr->name, 'a' + new->devnum);
+ else
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%c%c", tr->name,
+ 'a' - 1 + new->devnum / 26,
+ 'a' + new->devnum % 26);
+ else
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%d", tr->name, new->devnum);
+
+ /* 2.5 has capacity in units of 512 bytes while still
+ having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
+ set_capacity(gd, (new->size * tr->blksize) >> 9);
+
+ gd->private_data = new;
+ new->blkcore_priv = gd;
+ gd->queue = tr->blkcore_priv->rq;
+
+ if (new->readonly)
+ set_disk_ro(gd, 1);
+
+ add_disk(gd);
+
+ return 0;
+}
+
+int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old)
+{
+ if (mutex_trylock(&vol_table_mutex)) {
+ mutex_unlock(&vol_table_mutex);
+ BUG();
+ }
+
+ list_del(&old->list);
+
+ del_gendisk(old->blkcore_priv);
+ put_disk(old->blkcore_priv);
+
+ return 0;
+}
+
+static void blktrans_notify_remove(struct ubi_volume *vol)
+{
+ struct list_head *this, *this2, *next;
+
+ list_for_each(this, &blktrans_majors) {
+ struct ubi_blktrans_ops *tr = list_entry(this, struct ubi_blktrans_ops, list);
+
+ list_for_each_safe(this2, next, &tr->devs) {
+ struct ubi_blktrans_dev *dev = list_entry(this2, struct ubi_blktrans_dev, list);
+
+ if (dev->uv->vol == vol)
+ tr->remove_vol(dev);
+ }
+ }
+}
+
+static void blktrans_notify_add(struct ubi_volume *vol)
+{
+ struct list_head *this;
+ #if 0
+ if (mtd->type == MTD_ABSENT)
+ return;
+#endif
+
+ list_for_each(this, &blktrans_majors) {
+ struct ubi_blktrans_ops *tr = list_entry(this, struct ubi_blktrans_ops, list);
+
+ tr->add_vol(tr,vol);
+ }
+
+}
+
+static struct vol_notifier blktrans_notifier = {
+ .add = blktrans_notify_add,
+ .remove = blktrans_notify_remove,
+};
+
+
+int register_ubi_blktrans(struct ubi_blktrans_ops *tr)
+{
+ int i;
+
+ /* Register the notifier if/when the first device type is
+ registered, to prevent the link/init ordering from fucking
+ us over. */
+ if (!blktrans_notifier.list.next)
+ register_vol_user(&blktrans_notifier);
+
+ tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
+ if (!tr->blkcore_priv)
+ return -ENOMEM;
+
+ mutex_lock(&vol_table_mutex);
+ tr->major = register_blkdev(0, tr->name);
+ spin_lock_init(&tr->blkcore_priv->queue_lock);
+
+ tr->blkcore_priv->rq = blk_init_queue(ubi_blktrans_request, &tr->blkcore_priv->queue_lock);
+ if (!tr->blkcore_priv->rq) {
+ unregister_blkdev(tr->major, tr->name);
+ kfree(tr->blkcore_priv);
+ mutex_unlock(&vol_table_mutex);
+ return -ENOMEM;
+ }
+
+ tr->blkcore_priv->rq->queuedata = tr;
+ blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
+ tr->blkshift = ffs(tr->blksize) - 1;
+
+ tr->blkcore_priv->thread = kthread_run(ubi_blktrans_thread, tr,
+ "%sd", tr->name);
+ if (IS_ERR(tr->blkcore_priv->thread)) {
+ blk_cleanup_queue(tr->blkcore_priv->rq);
+ unregister_blkdev(tr->major, tr->name);
+ kfree(tr->blkcore_priv);
+ mutex_unlock(&vol_table_mutex);
+ return PTR_ERR(tr->blkcore_priv->thread);
+ }
+
+ INIT_LIST_HEAD(&tr->devs);
+ list_add(&tr->list, &blktrans_majors);
+
+ for (i=0; i<UBI_MAX_VOLUMES; i++) {
+ if (vol_table[i] )
+ tr->add_vol(tr, vol_table[i]);
+ }
+
+ mutex_unlock(&vol_table_mutex);
+ return 0;
+}
+
+int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr)
+{
+ struct list_head *this, *next;
+
+ mutex_lock(&vol_table_mutex);
+
+ /* Clean up the kernel thread */
+ kthread_stop(tr->blkcore_priv->thread);
+
+ /* Remove it from the list of active majors */
+ list_del(&tr->list);
+
+ list_for_each_safe(this, next, &tr->devs) {
+ struct ubi_blktrans_dev *dev = list_entry(this, struct ubi_blktrans_dev, list);
+ tr->remove_vol(dev);
+ }
+
+ blk_cleanup_queue(tr->blkcore_priv->rq);
+ unregister_blkdev(tr->major, tr->name);
+
+ mutex_unlock(&vol_table_mutex);
+
+ kfree(tr->blkcore_priv);
+
+ BUG_ON(!list_empty(&tr->devs));
+ return 0;
+}
+
+static void __exit ubi_blktrans_exit(void)
+{
+ /* No race here -- if someone's currently in register_ubi_blktrans
+ we're screwed anyway. */
+ if (blktrans_notifier.list.next)
+ unregister_vol_user(&blktrans_notifier);
+}
+
+
+module_exit(ubi_blktrans_exit);
+
+EXPORT_SYMBOL_GPL(register_ubi_blktrans);
+EXPORT_SYMBOL_GPL(deregister_ubi_blktrans);
+EXPORT_SYMBOL_GPL(add_ubi_blktrans_dev);
+EXPORT_SYMBOL_GPL(del_ubi_blktrans_dev);
+
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>, Yurong Tan <nancydreaming@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Common interface to block layer for UBI 'translation layers'");
+
diff -uprBN ubi.orig/block-jz.c ubi.new/block-jz.c
--- ubi.orig/block-jz.c 1970-01-01 08:00:00.000000000 +0800
+++ ubi.new/block-jz.c 2008-03-04 16:56:10.000000000 +0800
@@ -0,0 +1,314 @@
+/*
+ * Direct UBI block device access
+ *
+ * $Id: ubiblock.c,v 1.68 2005/11/07 11:14:20 gleixner Exp $
+ *
+ * (C) 2000-2003 Nicolas Pitre <nico@cam.org>
+ * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
+ * (C) 2008 Yurong Tan <nancydreaming@gmail.com> :
+ * borrow mtdblock.c to work on top of UBI
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/hdreg.h>
+#include <linux/mtd/ubi_blktrans.h>
+#include <linux/mutex.h>
+#include "ubi.h"
+
+#define UNMAPPED 0
+
+static struct ubiblk_dev {
+ struct ubi_volume_desc *uv;
+ int count;
+ struct mutex cache_mutex;
+ unsigned short vbw; //virt block number of write cache
+ unsigned short vbr; //virt block number of read cache
+ unsigned char *write_cache;
+ unsigned char *read_cache;
+ enum { STATE_UNUSED, STATE_USED } read_cache_state, write_cache_state;
+} *ubiblks[UBI_MAX_VOLUMES];
+
+void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int virt_block);
+int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk);
+
+int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk)
+{
+ if (STATE_UNUSED == ubiblk->write_cache_state)
+ return 0;
+
+ ubi_leb_write(ubiblk->uv, ubiblk->vbw, ubiblk->write_cache, 0,
+ ubiblk->uv->vol->usable_leb_size, UBI_UNKNOWN);
+ ubiblk->write_cache_state = STATE_UNUSED;
+ return 0;
+}
+
+void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int virt_block)
+{
+ ubiblk->vbw = virt_block;
+ ubiblk->write_cache_state = STATE_USED;
+
+ ubi_leb_read(ubiblk->uv, ubiblk->vbw, ubiblk->write_cache, 0,
+ ubiblk->uv->vol->usable_leb_size, UBI_UNKNOWN);
+}
+
+static int do_cached_write (struct ubiblk_dev *ubiblk, unsigned long sector,
+ int len, const char *buf)
+{
+ struct ubi_volume_desc *uv = ubiblk->uv;
+ int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size;
+ unsigned short sectors_per_page = uv->vol->ubi->min_io_size >> 9;
+ unsigned short page_shift = ffs(uv->vol->ubi->min_io_size) - 1;
+ unsigned short virt_block, page, page_offset;
+ unsigned long virt_page;
+
+ virt_page = sector / sectors_per_page;
+ page_offset = sector % sectors_per_page;
+ virt_block = virt_page / ppb;
+ page = virt_page % ppb;
+
+ if(ubi_is_mapped(uv, virt_block ) == UNMAPPED ){
+ mutex_lock(&ubiblk->cache_mutex);
+ ubiblk_flush_writecache(ubiblk);
+ mutex_unlock(&ubiblk->cache_mutex);
+
+ ubiblk_setup_writecache(ubiblk, virt_block);
+ ubi_leb_map(uv, virt_block, UBI_UNKNOWN);
+
+ } else {
+ if ( STATE_USED == ubiblk->write_cache_state ) {
+ if ( ubiblk->vbw != virt_block) {
+ // Commit before we start a new cache.
+ mutex_lock(&ubiblk->cache_mutex);
+ ubiblk_flush_writecache(ubiblk);
+ mutex_unlock(&ubiblk->cache_mutex);
+
+ ubiblk_setup_writecache(ubiblk, virt_block);
+ ubi_leb_unmap(uv, virt_block);
+ ubi_leb_map(uv, virt_block, UBI_UNKNOWN);
+ } else {
+ //dprintk("cache hit: 0x%x\n", virt_page);
+ }
+ } else {
+// printk("with existing mapping\n");
+ ubiblk_setup_writecache(ubiblk, virt_block);
+ ubi_leb_unmap(uv, virt_block);
+ ubi_leb_map(uv, virt_block, UBI_UNKNOWN);
+ }
+ }
+ memcpy(&ubiblk->write_cache[(page<<page_shift) +(page_offset<<9)],
+ buf,len);
+ return 0;
+}
+
+static int do_cached_read (struct ubiblk_dev *ubiblk, unsigned long sector,
+ int len, char *buf)
+{
+ struct ubi_volume_desc *uv = ubiblk->uv;
+ int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size;
+ unsigned short sectors_per_page = uv->vol->ubi->min_io_size >> 9;
+ unsigned short page_shift = ffs(uv->vol->ubi->min_io_size) - 1;
+ unsigned short virt_block, page, page_offset;
+ unsigned long virt_page;
+
+ virt_page = sector / sectors_per_page;
+ page_offset = sector % sectors_per_page;
+ virt_block = virt_page / ppb;
+ page = virt_page % ppb;
+
+ if(ubiblk->vbw == virt_block){
+ mutex_lock(&ubiblk->cache_mutex);
+ ubiblk_flush_writecache(ubiblk);
+ mutex_unlock(&ubiblk->cache_mutex);
+ }
+
+ if ( ubi_is_mapped( uv, virt_block) == UNMAPPED){
+ // In a Flash Memory device, there might be a logical block that is
+ // not allcated to a physical block due to the block not being used.
+ // All data returned should be set to 0xFF when accessing this logical
+ // block.
+ // dprintk("address translate fail\n");
+ memset(buf, 0xFF, 512);
+ } else {
+
+ if( ubiblk->vbr != virt_block ||ubiblk->read_cache_state == STATE_UNUSED ){
+ ubiblk->vbr = virt_block;
+ ubi_leb_read(uv, virt_block, ubiblk->read_cache, 0, uv->vol->usable_leb_size, 0);
+ ubiblk->read_cache_state = STATE_USED;
+ }
+ memcpy(buf, &ubiblk->read_cache[(page<<page_shift)+(page_offset<<9)], len);
+ }
+ return 0;
+}
+
+static int ubiblk_readsect(struct ubi_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+ return do_cached_read(ubiblk, block, 512, buf);
+}
+
+static int ubiblk_writesect(struct ubi_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+ return do_cached_write(ubiblk, block, 512, buf);
+}
+
+static int ubiblk_init_vol(int dev, struct ubi_volume_desc *uv)
+{
+ struct ubiblk_dev *ubiblk;
+ int ret;
+
+ ubiblk = kmalloc(sizeof(struct ubiblk_dev), GFP_KERNEL);
+ if (!ubiblk)
+ return -ENOMEM;
+
+ memset(ubiblk, 0, sizeof(*ubiblk));
+
+ ubiblk->count = 1;
+ ubiblk->uv = uv;
+ mutex_init (&ubiblk->cache_mutex);
+
+ ubiblk->write_cache = vmalloc(ubiblk->uv->vol->usable_leb_size);
+ ubiblk->read_cache = vmalloc(ubiblk->uv->vol->usable_leb_size);
+
+ if(!ubiblk->write_cache ||
+ !ubiblk->read_cache )
+ return -ENOMEM;
+
+ ubiblk->write_cache_state = STATE_UNUSED;
+ ubiblk->read_cache_state = STATE_UNUSED;
+
+ ubiblks[dev] = ubiblk;
+ DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
+ return 0;
+}
+
+static int ubiblk_open(struct ubi_blktrans_dev *ubd)
+{
+ int dev = ubd->devnum;
+ int res = 0;
+
+ DEBUG(MTD_DEBUG_LEVEL1,"ubiblock_open\n");
+
+ if (ubiblks[dev]) {
+ ubiblks[dev]->count++;
+ printk("%s: increase use count\n",__FUNCTION__);
+ return 0;
+ }
+
+ /* OK, it's not open. Create cache info for it */
+ res = ubiblk_init_vol(dev, ubd->uv);
+ return res;
+}
+
+static int ubiblk_release(struct ubi_blktrans_dev *ubd)
+{
+ int dev = ubd->devnum;
+ struct ubiblk_dev *ubiblk = ubiblks[dev];
+ struct ubi_device *ubi = ubiblk->uv->vol->ubi;
+
+ mutex_lock(&ubiblk->cache_mutex);
+ ubiblk_flush_writecache(ubiblk);
+ mutex_unlock(&ubiblk->cache_mutex);
+
+ if (!--ubiblk->count) {
+ /* It was the last usage. Free the device */
+ ubiblks[dev] = NULL;
+
+ if (ubi->mtd->sync)
+ ubi->mtd->sync(ubi->mtd);
+
+ vfree(ubiblk->write_cache);
+ vfree(ubiblk->read_cache);
+ kfree(ubiblk);
+ }
+ return 0;
+}
+
+static int ubiblk_flush(struct ubi_blktrans_dev *dev)
+{
+ struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+ struct ubi_device *ubi = ubiblk->uv->vol->ubi;
+
+ mutex_lock(&ubiblk->cache_mutex);
+ ubiblk_flush_writecache(ubiblk);
+ mutex_unlock(&ubiblk->cache_mutex);
+
+ if (ubi->mtd->sync)
+ ubi->mtd->sync(ubi->mtd);
+ return 0;
+}
+
+static void ubiblk_add_vol_dev(struct ubi_blktrans_ops *tr, struct ubi_volume *vol)
+{
+ struct ubi_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return;
+
+ dev->devnum = vol->vol_id;
+ dev->size = vol->used_bytes >> 9;
+ dev->tr = tr;
+
+ if (vol->bdev_mode == UBI_READONLY)
+ dev->readonly = 1;
+
+ vol->ubi->bdev_major = tr->major;
+
+ add_ubi_blktrans_dev(dev);
+}
+
+static void ubiblk_remove_vol_dev(struct ubi_blktrans_dev *dev)
+{
+ del_ubi_blktrans_dev(dev);
+ kfree(dev);
+}
+
+static int ubiblk_getgeo(struct ubi_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ memset(geo, 0, sizeof(*geo));
+ geo->heads = 4;
+ geo->sectors = 16;
+ geo->cylinders = dev->size/(4*16);
+ return 0;
+}
+
+static struct ubi_blktrans_ops ubiblk_tr = {
+ .name = "ubiblock",
+ .major = 0,
+ .part_bits = 0,
+ .blksize = 512,
+ .open = ubiblk_open,
+ .release = ubiblk_release,
+ .readsect = ubiblk_readsect,
+ .writesect = ubiblk_writesect,
+ .getgeo = ubiblk_getgeo,
+ .flush = ubiblk_flush,
+ .add_vol = ubiblk_add_vol_dev,
+ .remove_vol = ubiblk_remove_vol_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_ubiblock(void)
+{
+ return register_ubi_blktrans(&ubiblk_tr);
+}
+
+static void __exit cleanup_ubiblock(void)
+{
+ deregister_ubi_blktrans(&ubiblk_tr);
+}
+
+module_init(init_ubiblock);
+module_exit(cleanup_ubiblock);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> , Yurong Tan <nancydreaming@gmail.com>");
+MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to UBI volumes");
diff -uprBN ubi.orig/build.c ubi.new/build.c
--- ubi.orig/build.c 2008-02-23 12:57:47.000000000 +0800
+++ ubi.new/build.c 2008-03-03 17:51:27.000000000 +0800
@@ -46,6 +46,158 @@
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+/* add by Nancy begin */
+
+/* These are exported solely for the purpose of mtd_blkdevs.c. You
+ should not use them for _anything_ else */
+DEFINE_MUTEX(vol_table_mutex);
+struct ubi_volume *vol_table[UBI_MAX_VOLUMES];
+
+EXPORT_SYMBOL_GPL(vol_table_mutex);
+EXPORT_SYMBOL_GPL(vol_table);
+
+static LIST_HEAD(vol_notifiers);
+
+/**
+ * add_mtd_device - register an MTD device
+ * @mtd: pointer to new MTD device info structure
+ *
+ * Add a device to the list of MTD devices present in the system, and
+ * notify each currently active MTD 'user' of its arrival. Returns
+ * zero on success or 1 on failure, which currently will only happen
+ * if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
+ */
+
+int add_vol_device(struct ubi_volume *vol)
+{
+ int i;
+
+ mutex_lock(&vol_table_mutex);
+ if (!vol_table[vol->vol_id]) {
+ struct list_head *this;
+
+ vol_table[vol->vol_id] = vol;
+
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each(this, &vol_notifiers) {
+ struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
+ not->add(vol);
+ }
+
+ mutex_unlock(&vol_table_mutex);
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
+ return 0;
+ }
+ mutex_unlock(&vol_table_mutex);
+ return 1;
+}
+
+/**
+ * del_mtd_device - unregister an MTD device
+ * @mtd: pointer to MTD device info structure
+ *
+ * Remove a device from the list of MTD devices present in the system,
+ * and notify each currently active MTD 'user' of its departure.
+ * Returns zero on success or 1 on failure, which currently will happen
+ * if the requested device does not appear to be present in the list.
+ */
+
+int del_vol_device (struct ubi_volume *vol)
+{
+ int ret;
+
+ mutex_lock(&vol_table_mutex);
+ if (vol_table[vol->vol_id] != vol) {
+ ret = -ENODEV;
+ } else if (vol->readers ||vol->writers || vol->exclusive) {
+ printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count 0\n",
+ vol->vol_id, vol->name);
+ ret = -EBUSY;
+ } else {
+ struct list_head *this;
+
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each(this, &vol_notifiers) {
+ struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
+ not->remove(vol);
+ }
+
+ vol_table[vol->vol_id] = NULL;
+
+ module_put(THIS_MODULE);
+ ret = 0;
+ }
+ mutex_unlock(&vol_table_mutex);
+ return ret;
+}
+
+/**
+ * register_mtd_user - register a 'user' of MTD devices.
+ * @new: pointer to notifier info structure
+ *
+ * Registers a pair of callbacks function to be called upon addition
+ * or removal of MTD devices. Causes the 'add' callback to be immediately
+ * invoked for each MTD device currently present in the system.
+ */
+
+void register_vol_user(struct vol_notifier *new)
+{
+ int i;
+
+ mutex_lock(&vol_table_mutex);
+
+ list_add(&new->list, &vol_notifiers);
+
+ __module_get(THIS_MODULE);
+
+ for (i=0; i< UBI_MAX_VOLUMES; i++)
+ if (vol_table[i])
+ new->add(vol_table[i]);
+
+ mutex_unlock(&vol_table_mutex);
+}
+
+/**
+ * unregister_mtd_user - unregister a 'user' of MTD devices.
+ * @old: pointer to notifier info structure
+ *
+ * Removes a callback function pair from the list of 'users' to be
+ * notified upon addition or removal of MTD devices. Causes the
+ * 'remove' callback to be immediately invoked for each MTD device
+ * currently present in the system.
+ */
+
+int unregister_vol_user(struct vol_notifier *old)
+{
+ int i;
+
+ mutex_lock(&vol_table_mutex);
+
+ module_put(THIS_MODULE);
+
+ for (i=0; i< UBI_MAX_VOLUMES; i++)
+ if (vol_table[i])
+ old->remove(vol_table[i]);
+
+ list_del(&old->list);
+ mutex_unlock(&vol_table_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(add_vol_device);
+EXPORT_SYMBOL_GPL(del_vol_device);
+EXPORT_SYMBOL_GPL(register_vol_user);
+EXPORT_SYMBOL_GPL(unregister_vol_user);
+
+/* add by Nancy end*/
+
+
+
/**
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD device name or number string
@@ -84,6 +236,7 @@ DEFINE_MUTEX(ubi_devices_mutex);
/* Protects @ubi_devices and @ubi->ref_count */
static DEFINE_SPINLOCK(ubi_devices_lock);
+EXPORT_SYMBOL_GPL(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
static ssize_t ubi_version_show(struct class *class, char *buf)
@@ -204,7 +356,8 @@ int ubi_major2num(int major)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if ( (ubi && MAJOR(ubi->cdev.dev) == major) ||
+ (ubi && ubi->bdev_major == major)) {
ubi_num = ubi->ubi_num;
break;
}
@@ -213,6 +366,7 @@ int ubi_major2num(int major)
return ubi_num;
}
+EXPORT_SYMBOL_GPL(ubi_major2num);
/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
static ssize_t dev_attribute_show(struct device *dev,
@@ -672,6 +826,15 @@ static int autoresize(struct ubi_device
return 0;
}
+/* add by Nancy */
+static int bdev_init(struct ubi_device *ubi){
+ int i;
+ for(i=0; i<ubi->vtbl_slots; i++)
+ if(ubi->volumes[i])
+ add_vol_device(ubi->volumes[i]);
+ return 0;
+}
+
/**
* ubi_attach_mtd_dev - attach an MTD device.
* @mtd_dev: MTD device description object
@@ -793,6 +956,10 @@ int ubi_attach_mtd_dev(struct mtd_info *
if (err)
goto out_detach;
+ err = bdev_init(ubi);
+ if(err)
+ goto out_detach;
+
ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
if (IS_ERR(ubi->bgt_thread)) {
err = PTR_ERR(ubi->bgt_thread);
diff -uprBN ubi.orig/cdev.c ubi.new/cdev.c
--- ubi.orig/cdev.c 2008-02-23 12:57:47.000000000 +0800
+++ ubi.new/cdev.c 2008-03-03 17:51:27.000000000 +0800
@@ -113,12 +113,16 @@ static int vol_cdev_open(struct inode *i
mode = UBI_READONLY;
dbg_msg("open volume %d, mode %d", vol_id, mode);
+
+ const struct ubi_device *ubi = ubi_get_by_major(imajor(inode));
+ if(ubi->ubi_num != ubi_num)
+ printk("ubi_num not equal!\n");
desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
return PTR_ERR(desc);
- file->private_data = desc;
+ file->private_data = desc;
return 0;
}
diff -uprBN ubi.orig/Kconfig ubi.new/Kconfig
--- ubi.orig/Kconfig 2008-02-23 12:57:47.000000000 +0800
+++ ubi.new/Kconfig 2008-03-03 17:51:27.000000000 +0800
@@ -55,4 +55,18 @@ config MTD_UBI_GLUEBI
this if no legacy software will be used.
source "drivers/mtd/ubi/Kconfig.debug"
+
+config MTD_UBI_BLKDEVS
+ tristate "Common interface to block layer for UBI 'translation layers'"
+ depends on BLOCK
+ default n
+
+config MTD_UBI_BLOCK
+ tristate "Emulate block devices"
+ default n
+ depends on MTD_UBI_BLKDEVS
+ help
+ This option enables Block layer emulation on top of UBI volumes: for
+ each UBI volumes an block device is created. This is handy to make
+ traditional filesystem (like ext2, VFAT) work on top of UBI.
endmenu
diff -uprBN ubi.orig/Makefile ubi.new/Makefile
--- ubi.orig/Makefile 2008-02-23 12:57:47.000000000 +0800
+++ ubi.new/Makefile 2008-03-03 17:51:27.000000000 +0800
@@ -5,3 +5,6 @@ ubi-y += misc.o
ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+
+obj-$(CONFIG_MTD_UBI_BLKDEVS) += bdev.o
+obj-$(CONFIG_MTD_UBI_BLOCK) += block-jz.o
diff -uprBN ubi.orig/ubi.h ubi.new/ubi.h
--- ubi.orig/ubi.h 2008-02-23 12:57:47.000000000 +0800
+++ ubi.new/ubi.h 2008-03-03 17:51:27.000000000 +0800
@@ -133,6 +133,12 @@ struct ubi_ltree_entry {
struct ubi_volume_desc;
+struct vol_notifier {
+ void (*add)(struct ubi_volume *vol);
+ void (*remove)(struct ubi_volume *vol);
+ struct list_head list;
+};
+
/**
* struct ubi_volume - UBI volume description data structure.
* @dev: device object to make use of the the Linux device model
@@ -232,6 +238,7 @@ struct ubi_volume {
int gluebi_refcount;
struct mtd_info gluebi_mtd;
#endif
+ int bdev_mode; //add by Nancy
};
/**
@@ -336,6 +343,7 @@ struct ubi_wl_entry;
*/
struct ubi_device {
struct cdev cdev;
+ int bdev_major; //add by Nancy
struct device dev;
int ubi_num;
char ubi_name[sizeof(UBI_NAME_STR)+5];
@@ -504,7 +512,7 @@ int ubi_io_read_vid_hdr(struct ubi_devic
struct ubi_vid_hdr *vid_hdr, int verbose);
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
-
+
/* build.c */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-03-04 9:54 ` Nancy
@ 2008-03-04 9:53 ` Artem Bityutskiy
2008-03-04 10:07 ` Nancy
0 siblings, 1 reply; 14+ messages in thread
From: Artem Bityutskiy @ 2008-03-04 9:53 UTC (permalink / raw)
To: Nancy; +Cc: linux-mtd
On Tue, 2008-03-04 at 17:54 +0800, Nancy wrote:
> How gonna you do to support VFAT ? In embended system world, this
> function is very important.
I do not mean the block translation layer is unneeded. It is ok and cool
to create it. I mean we do not need any generic layer where you can
register "block translation drivers", like mtd_blktrans.
--
Best regards,
Artem Bityutskiy (Битюцкий Артём)
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-03-04 9:40 ` Artem Bityutskiy
@ 2008-03-04 9:54 ` Nancy
2008-03-04 9:53 ` Artem Bityutskiy
0 siblings, 1 reply; 14+ messages in thread
From: Nancy @ 2008-03-04 9:54 UTC (permalink / raw)
To: dedekind; +Cc: linux-mtd
> could you please send patches inline? It is what people normally do
> and it is easier to reply and comment the patches this way.
OK, next time I will try.
> Also, I do not think we need this common "blktrans" module for UBI at
> all, it just does not seem to make much sense.
How gonna you do to support VFAT ? In embended system world, this
function is very important.
--
Best wishes,
Nancy
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-03-04 9:53 ` Artem Bityutskiy
@ 2008-03-04 10:07 ` Nancy
2008-03-04 13:55 ` Nancy
0 siblings, 1 reply; 14+ messages in thread
From: Nancy @ 2008-03-04 10:07 UTC (permalink / raw)
To: dedekind; +Cc: linux-mtd
> I do not mean the block translation layer is unneeded. It is ok and cool
> to create it. I mean we do not need any generic layer where you can
> register "block translation drivers", like mtd_blktrans.
Sounds like you need a special layer to create. In fact, I don't quite
understand what is that. Are you creating it now ? Would you mind I
take apart in?
It's time to go home. See you later!
--
Best wishes,
Nancy
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-03-04 10:07 ` Nancy
@ 2008-03-04 13:55 ` Nancy
2008-05-13 4:16 ` Nancy
0 siblings, 1 reply; 14+ messages in thread
From: Nancy @ 2008-03-04 13:55 UTC (permalink / raw)
To: dedekind; +Cc: linux-mtd
Hi Aterm,
If I said something make you feel bad, I'm so sorry. English is
not my native language. Please try to understand!
In fact, I just want you( linux hacker) to help me to finish
the job, never dreamed to make it become a part of UBI code. I'm just
a common device driver programmer, lack of so many knowlegs about
linux kernel. If there's a NTFL layer for nand in opensource which can
be used in formal products. I would feel so relax. If like that, I
won't spend much time on this. You never know how hard it was for me,
and I do all these things alone ( I am in a small company, there only
7 people manage a whole SOC drivers). All other comrades needs only
focus on the hardware layer things. But I have to work more, and no
one help.
Job is job, if no things out, you will be fired. Under that
huge pressure, I have to learn linux kernel in my spare time. Thank
God, during those pain hard learning and working times. I gains much!
And now I do interested linux kernel though I'm still a beginner.
--
Best wishes,
Nancy
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-03-04 13:55 ` Nancy
@ 2008-05-13 4:16 ` Nancy
2008-05-13 9:53 ` Artem Bityutskiy
0 siblings, 1 reply; 14+ messages in thread
From: Nancy @ 2008-05-13 4:16 UTC (permalink / raw)
To: dedekind; +Cc: linux-mtd
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=GB2312, Size: 40340 bytes --]
Hi all,
I think it is my final edition of ubi block device layer code
based on UBI commit e442c48f84982d0fa10c6b292018241dafca4d65
Finally, it can support any filesystem based on block device layer.
eg: FAT, ext2....
#modprobe ubiblk
#mkfs.vfat /dev/ubiblock1
#mount -t vfat /dev/ubiblock1 /mnt/fat
I notice there many people need this function especially our
Chinese. Hope this helpful :-)
Here's my implementation:
diff -uprBN ../ubi/bdev.c ubi/bdev.c
--- ../ubi/bdev.c 1970-01-01 08:00:00.000000000 +0800
+++ ubi/bdev.c 2008-05-13 11:26:28.000000000 +0800
@@ -0,0 +1,432 @@
+/*
+ *
+ * (C) 2003 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Interface to Linux 2.5 block layer for UBI 'translation layers'.
+ *
+ * 2008 Yurong Tan <nancydreaming@gmail.com>:
+ * borrow from mtd_blkdevs.c for building block device layer on
top of UBI
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/freezer.h>
+#include <linux/spinlock.h>
+#include <linux/hdreg.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <asm/uaccess.h>
+#include "ubi.h"
+#include "ubiblk.h"
+
+static LIST_HEAD(blktrans_majors);
+extern struct mutex vol_table_mutex;
+extern struct ubi_volume *vol_table[];
+
+extern void register_vol_user (struct vol_notifier *new);
+extern int unregister_vol_user (struct vol_notifier *old);
+extern int ubi_major2num(int major);
+
+struct ubi_blkcore_priv {
+ struct task_struct *thread;
+ struct request_queue *rq;
+ spinlock_t queue_lock;
+};
+
+static int do_blktrans_request(struct ubi_blktrans_ops *tr,
+ struct ubi_blktrans_dev *dev,
+ struct request *req)
+{
+ unsigned long block, nsect;
+ char *buf;
+
+ block = req->sector << 9 >> tr->blkshift;
+ nsect = req->current_nr_sectors << 9 >> tr->blkshift;
+ buf = req->buffer;
+
+ if (!blk_fs_request(req))
+ return 0;
+
+ if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
+ return 0;
+
+ switch(rq_data_dir(req)) {
+ case READ:
+ for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+ if (tr->readsect(dev, block, buf))
+ return 0;
+ return 1;
+
+ case WRITE:
+ if (!tr->writesect)
+ return 0;
+
+ for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+ if (tr->writesect(dev, block, buf))
+ return 0;
+ return 1;
+
+ default:
+ printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
+ return 0;
+ }
+}
+
+static int ubi_blktrans_thread(void *arg)
+{
+ struct ubi_blktrans_ops *tr = arg;
+ struct request_queue *rq = tr->blkcore_priv->rq;
+
+ /* we might get involved when memory gets low, so use PF_MEMALLOC */
+ current->flags |= PF_MEMALLOC;
+
+ spin_lock_irq(rq->queue_lock);
+ while (!kthread_should_stop()) {
+ struct request *req;
+ struct ubi_blktrans_dev *dev;
+ int res = 0;
+
+ req = elv_next_request(rq);
+
+ if (!req) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irq(rq->queue_lock);
+ schedule();
+ spin_lock_irq(rq->queue_lock);
+ continue;
+ }
+ dev = req->rq_disk->private_data;
+ tr = dev->tr;
+
+ spin_unlock_irq(rq->queue_lock);
+ mutex_lock(&dev->lock);
+ res = do_blktrans_request(tr, dev, req);
+ mutex_unlock(&dev->lock);
+ spin_lock_irq(rq->queue_lock);
+
+ end_request(req, res);
+ }
+ spin_unlock_irq(rq->queue_lock);
+
+ return 0;
+}
+
+static void ubi_blktrans_request(struct request_queue *rq)
+{
+ struct ubi_blktrans_ops *tr = rq->queuedata;
+ wake_up_process(tr->blkcore_priv->thread);
+}
+
+static int blktrans_open(struct inode *i, struct file *f)
+{
+ struct ubi_blktrans_dev *dev;
+ struct ubi_blktrans_ops *tr;
+ int ret =0;
+
+ dev = i->i_bdev->bd_disk->private_data;
+ tr = dev->tr;
+
+ if (!try_module_get(tr->owner))
+ goto out_tr;
+
+ if(!tr->open)
+ return -1;
+ else
+ ret = tr->open(i,f);
+
+ return ret;
+out_tr:
+ module_put(tr->owner);
+ return -1;
+}
+
+static int blktrans_release(struct inode *i, struct file *f)
+{
+ struct ubi_blktrans_dev *dev;
+ struct ubi_blktrans_ops *tr;
+ struct ubi_volume_desc *desc;
+ int ret = 0;
+
+ dev = i->i_bdev->bd_disk->private_data;
+ tr = dev->tr;
+ desc = dev->uv;
+
+ if (tr->release)
+ ret = tr->release(dev);
+
+ module_put(tr->owner);
+ return ret;
+}
+
+static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct ubi_blktrans_dev *dev = bdev->bd_disk->private_data;
+
+ if (dev->tr->getgeo)
+ return dev->tr->getgeo(dev, geo);
+ return -ENOTTY;
+}
+
+static int blktrans_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ubi_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
+ struct ubi_blktrans_ops *tr = dev->tr;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ if (tr->flush)
+ return tr->flush(dev);
+ /* The core code did the work, we had nothing to do. */
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+}
+
+struct block_device_operations ubi_blktrans_ops = {
+ .owner = THIS_MODULE,
+ .open = blktrans_open,
+ .release = blktrans_release,
+ .ioctl = blktrans_ioctl,
+ .getgeo = blktrans_getgeo,
+};
+
+int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new)
+{
+ struct ubi_blktrans_ops *tr = new->tr;
+ struct list_head *this;
+ int last_devnum = -1;
+ struct gendisk *gd;
+
+ if (mutex_trylock(&vol_table_mutex)) {
+ mutex_unlock(&vol_table_mutex);
+ BUG();
+ }
+
+ list_for_each(this, &tr->devs) {
+ struct ubi_blktrans_dev *d = list_entry(this, struct ubi_blktrans_dev, list);
+ if (new->devnum == -1) {
+ /* Use first free number */
+ if (d->devnum != last_devnum+1) {
+ /* Found a free devnum. Plug it in here */
+ new->devnum = last_devnum+1;
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ } else if (d->devnum == new->devnum) {
+ /* Required number taken */
+ return -EBUSY;
+ } else if (d->devnum > new->devnum) {
+ /* Required number was free */
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ last_devnum = d->devnum;
+ }
+ if (new->devnum == -1)
+ new->devnum = last_devnum+1;
+
+ if ((new->devnum << tr->part_bits) > 256) {
+ return -EBUSY;
+ }
+
+ mutex_init(&new->lock);
+ list_add_tail(&new->list, &tr->devs);
+ added:
+ if (!tr->writesect)
+ new->readonly = 1;
+
+ gd = alloc_disk(1 << tr->part_bits);
+ if (!gd) {
+ list_del(&new->list);
+ return -ENOMEM;
+ }
+ gd->major = tr->major;
+ gd->first_minor = (new->devnum) << tr->part_bits;
+ gd->fops = &ubi_blktrans_ops;
+
+ if (tr->part_bits)
+ if (new->devnum < 26)
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%c", tr->name, 'a' + new->devnum);
+ else
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%c%c", tr->name,
+ 'a' - 1 + new->devnum / 26,
+ 'a' + new->devnum % 26);
+ else
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%d", tr->name, new->devnum);
+
+ /* 2.5 has capacity in units of 512 bytes while still
+ having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
+ set_capacity(gd, (new->size * tr->blksize) >> 9);
+
+ gd->private_data = new;
+ new->blkcore_priv = gd;
+ gd->queue = tr->blkcore_priv->rq;
+
+ if (new->readonly)
+ set_disk_ro(gd, 1);
+
+ add_disk(gd);
+
+ return 0;
+}
+
+int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old)
+{
+ if (mutex_trylock(&vol_table_mutex)) {
+ mutex_unlock(&vol_table_mutex);
+ BUG();
+ }
+
+ list_del(&old->list);
+
+ del_gendisk(old->blkcore_priv);
+ put_disk(old->blkcore_priv);
+
+ return 0;
+}
+
+static void blktrans_notify_remove(struct ubi_volume *vol)
+{
+ struct list_head *this, *this2, *next;
+
+ list_for_each(this, &blktrans_majors) {
+ struct ubi_blktrans_ops *tr = list_entry(this, struct
ubi_blktrans_ops, list);
+
+ list_for_each_safe(this2, next, &tr->devs) {
+ struct ubi_blktrans_dev *dev = list_entry(this2, struct
ubi_blktrans_dev, list);
+
+ if (dev->uv->vol == vol)
+ tr->remove_vol(dev);
+ }
+ }
+}
+
+static void blktrans_notify_add(struct ubi_volume *vol)
+{
+ struct list_head *this;
+
+ list_for_each(this, &blktrans_majors) {
+ struct ubi_blktrans_ops *tr = list_entry(this, struct
ubi_blktrans_ops, list);
+
+ tr->add_vol(tr,vol);
+ }
+
+}
+
+static struct vol_notifier blktrans_notifier = {
+ .add = blktrans_notify_add,
+ .remove = blktrans_notify_remove,
+};
+
+
+int register_ubi_blktrans(struct ubi_blktrans_ops *tr)
+{
+ int i;
+
+ /* Register the notifier if/when the first device type is
+ registered, to prevent the link/init ordering from fucking
+ us over. */
+ if (!blktrans_notifier.list.next)
+ register_vol_user(&blktrans_notifier);
+
+ tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
+ if (!tr->blkcore_priv)
+ return -ENOMEM;
+
+ mutex_lock(&vol_table_mutex);
+ tr->major = register_blkdev(0, tr->name);
+ spin_lock_init(&tr->blkcore_priv->queue_lock);
+
+ tr->blkcore_priv->rq = blk_init_queue(ubi_blktrans_request,
&tr->blkcore_priv->queue_lock);
+ if (!tr->blkcore_priv->rq) {
+ unregister_blkdev(tr->major, tr->name);
+ kfree(tr->blkcore_priv);
+ mutex_unlock(&vol_table_mutex);
+ return -ENOMEM;
+ }
+
+ tr->blkcore_priv->rq->queuedata = tr;
+ blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
+ tr->blkshift = ffs(tr->blksize) - 1;
+
+ tr->blkcore_priv->thread = kthread_run(ubi_blktrans_thread, tr,
+ "%sd", tr->name);
+ if (IS_ERR(tr->blkcore_priv->thread)) {
+ blk_cleanup_queue(tr->blkcore_priv->rq);
+ unregister_blkdev(tr->major, tr->name);
+ kfree(tr->blkcore_priv);
+ mutex_unlock(&vol_table_mutex);
+ return PTR_ERR(tr->blkcore_priv->thread);
+ }
+
+ INIT_LIST_HEAD(&tr->devs);
+ list_add(&tr->list, &blktrans_majors);
+
+ for (i=0; i<UBI_MAX_VOLUMES; i++) {
+ if (vol_table[i] )
+ tr->add_vol(tr, vol_table[i]);
+ }
+
+ mutex_unlock(&vol_table_mutex);
+ return 0;
+}
+
+int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr)
+{
+ struct list_head *this, *next;
+
+ mutex_lock(&vol_table_mutex);
+
+ /* Clean up the kernel thread */
+ kthread_stop(tr->blkcore_priv->thread);
+
+ /* Remove it from the list of active majors */
+ list_del(&tr->list);
+
+ list_for_each_safe(this, next, &tr->devs) {
+ struct ubi_blktrans_dev *dev = list_entry(this, struct
ubi_blktrans_dev, list);
+ tr->remove_vol(dev);
+ }
+
+ blk_cleanup_queue(tr->blkcore_priv->rq);
+ unregister_blkdev(tr->major, tr->name);
+
+ mutex_unlock(&vol_table_mutex);
+
+ kfree(tr->blkcore_priv);
+
+ BUG_ON(!list_empty(&tr->devs));
+ return 0;
+}
+
+static void __exit ubi_blktrans_exit(void)
+{
+ /* No race here -- if someone's currently in register_ubi_blktrans
+ we're screwed anyway. */
+ if (blktrans_notifier.list.next)
+ unregister_vol_user(&blktrans_notifier);
+}
+
+
+module_exit(ubi_blktrans_exit);
+
+EXPORT_SYMBOL_GPL(register_ubi_blktrans);
+EXPORT_SYMBOL_GPL(deregister_ubi_blktrans);
+EXPORT_SYMBOL_GPL(add_ubi_blktrans_dev);
+EXPORT_SYMBOL_GPL(del_ubi_blktrans_dev);
+
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>, Yurong Tan
<nancydreaming@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Common interface to block layer for UBI
'translation layers'");
+
diff -uprBN ../ubi/build.c ubi/build.c
--- ../ubi/build.c 2008-05-09 07:27:46.000000000 +0800
+++ ubi/build.c 2008-05-13 11:29:55.000000000 +0800
@@ -18,6 +18,7 @@
*
* Author: Artem Bityutskiy (Битюцкий Артём),
* Frank Haverkamp
+ * Yurong tan(Nancy)
*/
/*
@@ -46,6 +47,113 @@
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+/* add by Nancy begin */
+DEFINE_MUTEX(vol_table_mutex);
+struct ubi_volume *vol_table[UBI_MAX_VOLUMES];
+
+EXPORT_SYMBOL_GPL(vol_table_mutex);
+EXPORT_SYMBOL_GPL(vol_table);
+
+static LIST_HEAD(vol_notifiers);
+
+int add_vol_device(struct ubi_volume *vol)
+{
+ mutex_lock(&vol_table_mutex);
+ if (!vol_table[vol->vol_id]) {
+
+ struct list_head *this;
+ vol_table[vol->vol_id] = vol;
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the vol_table_mutex */
+ list_for_each(this, &vol_notifiers) {
+ struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
+ not->add(vol);
+ }
+ mutex_unlock(&vol_table_mutex);
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ return 0;
+ }
+ mutex_unlock(&vol_table_mutex);
+ return 1;
+}
+
+int del_vol_device (struct ubi_volume *vol)
+{
+ int ret;
+ struct list_head *this;
+
+ mutex_lock(&vol_table_mutex);
+ if (vol_table[vol->vol_id] != vol) {
+ ret = -ENODEV;
+ } else if (vol->readers ||vol->writers || vol->exclusive) {
+ printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count 0\n",
+ vol->vol_id, vol->name);
+ ret = -EBUSY;
+ } else {
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the vol_table_mutex */
+ list_for_each(this, &vol_notifiers) {
+ struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
+ not->remove(vol);
+ }
+
+ vol_table[vol->vol_id] = NULL;
+ module_put(THIS_MODULE);
+ ret = 0;
+ }
+ mutex_unlock(&vol_table_mutex);
+ return ret;
+}
+
+void register_vol_user(struct vol_notifier *new)
+{
+ int i;
+
+ mutex_lock(&vol_table_mutex);
+ list_add(&new->list, &vol_notifiers);
+ __module_get(THIS_MODULE);
+
+ for (i=0; i< UBI_MAX_VOLUMES; i++)
+ if (vol_table[i])
+ new->add(vol_table[i]);
+
+ mutex_unlock(&vol_table_mutex);
+}
+
+int unregister_vol_user(struct vol_notifier *old)
+{
+ int i;
+
+ mutex_lock(&vol_table_mutex);
+ module_put(THIS_MODULE);
+
+ for (i=0; i< UBI_MAX_VOLUMES; i++)
+ if (vol_table[i])
+ old->remove(vol_table[i]);
+
+ list_del(&old->list);
+ mutex_unlock(&vol_table_mutex);
+ return 0;
+}
+
+static int bdev_init(struct ubi_device *ubi){
+ int i;
+ for(i=0; i<ubi->vtbl_slots; i++)
+ if(ubi->volumes[i])
+ add_vol_device(ubi->volumes[i]);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(add_vol_device);
+EXPORT_SYMBOL_GPL(del_vol_device);
+EXPORT_SYMBOL_GPL(register_vol_user);
+EXPORT_SYMBOL_GPL(unregister_vol_user);
+/* add by Nancy end*/
+
+
/**
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD device name or number string
@@ -84,6 +192,7 @@ DEFINE_MUTEX(ubi_devices_mutex);
/* Protects @ubi_devices and @ubi->ref_count */
static DEFINE_SPINLOCK(ubi_devices_lock);
+EXPORT_SYMBOL_GPL(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
static ssize_t ubi_version_show(struct class *class, char *buf)
@@ -204,7 +313,8 @@ int ubi_major2num(int major)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if ( (ubi && MAJOR(ubi->cdev.dev) == major) ||
+ (ubi && ubi->bdev_major == major)) {
ubi_num = ubi->ubi_num;
break;
}
@@ -213,6 +323,7 @@ int ubi_major2num(int major)
return ubi_num;
}
+EXPORT_SYMBOL_GPL(ubi_major2num);
/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
static ssize_t dev_attribute_show(struct device *dev,
@@ -763,7 +874,8 @@ int ubi_attach_mtd_dev(struct mtd_info *
mutex_init(&ubi->volumes_mutex);
spin_lock_init(&ubi->volumes_lock);
- ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
+ dbg_msg("attaching mtd%d to ubi%d: VID header offset %d",
+ mtd->index, ubi_num, vid_hdr_offset);
err = io_init(ubi);
if (err)
@@ -800,6 +912,10 @@ int ubi_attach_mtd_dev(struct mtd_info *
if (err)
goto out_detach;
+ err = bdev_init(ubi);
+ if(err)
+ goto out_detach;
+
ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
if (IS_ERR(ubi->bgt_thread)) {
err = PTR_ERR(ubi->bgt_thread);
diff -uprBN ../ubi/cdev.c ubi/cdev.c
--- ../ubi/cdev.c 2008-05-09 07:27:46.000000000 +0800
+++ ubi/cdev.c 2008-05-13 11:51:29.000000000 +0800
@@ -411,6 +411,47 @@ static int vol_cdev_ioctl(struct inode *
void __user *argp = (void __user *)arg;
switch (cmd) {
+ /* Volume dump command */
+ case UBI_IOCLEBDP:
+ {
+ struct ubi_leb_dump dp;
+ int pnum;
+ char *lebbuf;
+
+ if (copy_from_user(&dp, argp, sizeof(struct ubi_leb_dump))){
+ err = -EFAULT;
+ break;
+ }
+
+ pnum = vol->eba_tbl[dp.lnum];
+ if (pnum < 0) {
+ //the LEB is clean, no need dump
+ err = 1;
+ break;
+ }
+
+ lebbuf = kmalloc(vol->ubi->leb_size, GFP_KERNEL);
+ if (!lebbuf){
+ err = -ENOMEM;
+ break;
+ }
+
+ err= ubi_eba_read_leb(ubi, vol, dp.lnum, lebbuf, 0, vol->ubi->leb_size, 0);
+ if (err){
+ kfree(lebbuf);
+ break;
+ }
+
+ err = copy_to_user(dp.lebbuf, lebbuf, vol->ubi->leb_size);
+ if (err) {
+ kfree(lebbuf);
+ err = -EFAULT;
+ break;
+ }
+ kfree(lebbuf);
+ break;
+ }
+
/* Volume update command */
case UBI_IOCVOLUP:
{
diff -uprBN ../ubi/eba.c ubi/eba.c
--- ../ubi/eba.c 2008-05-09 07:27:47.000000000 +0800
+++ ubi/eba.c 2008-05-13 11:18:36.000000000 +0800
@@ -45,7 +45,7 @@
#include <linux/crc32.h>
#include <linux/err.h>
#include "ubi.h"
-
+#include "ubiblk.h"
/* Number of physical eraseblocks reserved for atomic LEB change operation */
#define EBA_RESERVED_PEBS 1
@@ -1250,3 +1250,140 @@ void ubi_eba_close(const struct ubi_devi
kfree(ubi->volumes[i]->eba_tbl);
}
}
+
+/* add by Nancy begin */
+
+static int ubiblk_fill_writecache(struct ubiblk_dev *ubiblk)
+{
+ struct ubi_volume_desc *uv = ubiblk->uv;
+ struct ubi_device *ubi = uv->vol->ubi;
+ int ppb = ubi->leb_size / ubi->min_io_size;
+ unsigned short subpage_shift = 9;
+ unsigned short spp = ubi->min_io_size >> subpage_shift;
+ unsigned short page_shift = ffs(ubi->min_io_size) - 1;
+ unsigned short sectors_in_page_shift = ffs(ubi->min_io_size / 512) - 1;
+ unsigned short page, sector;
+ char page_buf[ubi->min_io_size];
+
+ if (!page_buf)
+ return -ENOMEM;
+
+ for (page = 0; page < ppb; page++) {
+ if ( !ubiblk->page_sts[page]) {
+ ubi_leb_read(uv, ubiblk->vbw,
+ &ubiblk->write_cache[page<<page_shift],
+ page<<page_shift, ubi->min_io_size, 0);
+ }else{
+ for(sector = 0; sector < spp; sector++)
+ if( !ubiblk->subpage_sts[(page<<sectors_in_page_shift)+sector] )
+ break;
+ if(sector != spp){
+ ubi_leb_read(uv, ubiblk->vbw,
+ page_buf,
+ page<<page_shift, ubi->min_io_size, 0);
+ for(sector = 0; sector < spp; sector++)
+ if(!ubiblk->subpage_sts[(page<<sectors_in_page_shift) + sector])
+ memcpy(&ubiblk->write_cache[ \
+ (page<<page_shift)+(sector<<subpage_shift)],
+ &page_buf[sector<<subpage_shift],
+ 512);
+ }
+ }
+ }
+ return 0;
+}
+
+int ubiblk_eba_atomic_leb_change(struct ubi_device *ubi, struct
ubi_volume *vol,
+ int lnum, void *buf, int len, int dtype, struct ubiblk_dev *ubiblk)
+{
+ int err, pnum, tries = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ ubiblk_fill_writecache(ubiblk);
+ mutex_lock(&ubi->alc_mutex);
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ goto out_mutex;
+
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+ crc = crc32(UBI_CRC32_INIT, buf, len);
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->data_size = cpu_to_be32(len);
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_crc = cpu_to_be32(crc);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi, dtype);
+ if (pnum < 0) {
+ err = pnum;
+ goto out_leb_unlock;
+ }
+
+ dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
+ vol_id, lnum, vol->eba_tbl[lnum], pnum);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ goto write_error;
+ }
+
+ err = ubi_io_write_data(ubi, buf, pnum, 0, len);
+ if (err) {
+ ubi_warn("failed to write %d bytes of data to PEB %d",
+ len, pnum);
+ goto write_error;
+ }
+ if (vol->eba_tbl[lnum] >= 0) {
+ err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
+ if (err)
+ goto out_leb_unlock;
+ }
+
+ vol->eba_tbl[lnum] = pnum;
+
+out_leb_unlock:
+ leb_write_unlock(ubi, vol_id, lnum);
+out_mutex:
+ mutex_unlock(&ubi->alc_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ /*
+ * This flash device does not admit of bad eraseblocks or
+ * something nasty and unexpected happened. Switch to read-only
+ * mode just in case.
+ */
+ ubi_ro_mode(ubi);
+ goto out_leb_unlock;
+ }
+
+ err = ubi_wl_put_peb(ubi, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ goto out_leb_unlock;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ ubi_msg("try another PEB");
+ goto retry;
+}
+
+/* add by Nancy end*/
+
diff -uprBN ../ubi/kapi.c ubi/kapi.c
--- ../ubi/kapi.c 2008-05-09 07:27:47.000000000 +0800
+++ ubi/kapi.c 2008-05-13 11:18:49.000000000 +0800
@@ -24,7 +24,10 @@
#include <linux/err.h>
#include <asm/div64.h>
#include "ubi.h"
+#include "ubiblk.h"
+extern int ubiblk_eba_atomic_leb_change(struct ubi_device *ubi,
struct ubi_volume *vol,
+ int lnum, void *buf, int len, int dtype, struct ubiblk_dev *ubiblk);
/**
* ubi_get_device_info - get information about UBI device.
* @ubi_num: UBI device number
@@ -632,3 +635,138 @@ int ubi_is_mapped(struct ubi_volume_desc
return vol->eba_tbl[lnum] >= 0;
}
EXPORT_SYMBOL_GPL(ubi_is_mapped);
+
+/* add by Nancy start */
+
+int ubiblk_leb_change(struct ubiblk_dev *ubiblk)
+{
+ struct ubi_volume *vol = ubiblk->uv->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ struct ubi_volume_desc *desc = ubiblk->uv;
+ int lnum = ubiblk->vbw;
+ int len = ubi->leb_size;
+ int dtype = UBI_UNKNOWN;
+ void *buf = ubiblk->write_cache;
+
+ dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 ||
+ len > vol->usable_leb_size || len % ubi->min_io_size)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (len == 0)
+ return 0;
+
+ return ubiblk_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype, ubiblk);
+}
+EXPORT_SYMBOL_GPL(ubiblk_leb_change);
+
+
+void ubi_open_blkdev(int ubi_num, int vol_id, int mode)
+{
+ int err;
+ struct ubi_device *ubi;
+ struct ubi_volume *vol;
+
+ dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return;
+
+ if (mode != UBI_READONLY && mode != UBI_READWRITE &&
+ mode != UBI_EXCLUSIVE)
+ return;
+ /*
+ * First of all, we have to get the UBI device to prevent its removal.
+ */
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return;
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
+ err = -EINVAL;
+ goto out_put_ubi;
+ }
+
+ err = -ENODEV;
+ if (!try_module_get(THIS_MODULE))
+ goto out_put_ubi;
+
+ spin_lock(&ubi->volumes_lock);
+ vol = ubi->volumes[vol_id];
+ if (!vol)
+ goto out_unlock;
+
+ err = -EBUSY;
+ switch (mode) {
+ case UBI_READONLY:
+ if (vol->exclusive)
+ goto out_unlock;
+ vol->readers += 1;
+ break;
+
+ case UBI_READWRITE:
+ if (vol->exclusive || vol->writers > 0)
+ goto out_unlock;
+ vol->writers += 1;
+ break;
+
+ case UBI_EXCLUSIVE:
+ if (vol->exclusive || vol->writers || vol->readers)
+ goto out_unlock;
+ vol->exclusive = 1;
+ break;
+ }
+ get_device(&vol->dev);
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+ return;
+
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ module_put(THIS_MODULE);
+out_put_ubi:
+ ubi_put_device(ubi);
+ return;
+}
+EXPORT_SYMBOL_GPL(ubi_open_blkdev);
+
+
+void ubi_close_blkdev(struct ubi_volume_desc *desc)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
+
+ spin_lock(&ubi->volumes_lock);
+ switch (desc->mode) {
+ case UBI_READONLY:
+ vol->readers -= 1;
+ break;
+ case UBI_READWRITE:
+ vol->writers -= 1;
+ break;
+ case UBI_EXCLUSIVE:
+ vol->exclusive = 0;
+ }
+ vol->ref_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
+ put_device(&vol->dev);
+ ubi_put_device(ubi);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(ubi_close_blkdev);
+/* add by Nancy end */
+
diff -uprBN ../ubi/Kconfig ubi/Kconfig
--- ../ubi/Kconfig 2008-05-09 07:27:46.000000000 +0800
+++ ubi/Kconfig 2008-04-02 15:14:11.000000000 +0800
@@ -24,13 +24,8 @@ config MTD_UBI_WL_THRESHOLD
erase counter value and the lowest erase counter value of eraseblocks
of UBI devices. When this threshold is exceeded, UBI starts performing
wear leveling by means of moving data from eraseblock with low erase
- counter to eraseblocks with high erase counter.
-
- The default value should be OK for SLC NAND flashes, NOR flashes and
- other flashes which have eraseblock life-cycle 100000 or more.
- However, in case of MLC NAND flashes which typically have eraseblock
- life-cycle less then 10000, the threshold should be lessened (e.g.,
- to 128 or 256, although it does not have to be power of 2).
+ counter to eraseblocks with high erase counter. Leave the default
+ value if unsure.
config MTD_UBI_BEB_RESERVE
int "Percentage of reserved eraseblocks for bad eraseblocks handling"
@@ -60,4 +55,18 @@ config MTD_UBI_GLUEBI
this if no legacy software will be used.
source "drivers/mtd/ubi/Kconfig.debug"
+
+config MTD_UBI_BLKDEVS
+ tristate "Common interface to block layer for UBI 'translation layers'"
+ depends on BLOCK
+ default n
+
+config MTD_UBI_BLOCK
+ tristate "Emulate block devices"
+ default n
+ depends on MTD_UBI_BLKDEVS
+ help
+ This option enables Block layer emulation on top of UBI volumes: for
+ each UBI volumes an block device is created. This is handy to make
+ traditional filesystem (like ext2, VFAT) work on top of UBI.
endmenu
diff -uprBN ../ubi/Makefile ubi/Makefile
--- ../ubi/Makefile 2008-05-09 07:27:46.000000000 +0800
+++ ubi/Makefile 2008-05-13 11:19:30.000000000 +0800
@@ -5,3 +5,6 @@ ubi-y += misc.o
ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+
+obj-$(CONFIG_MTD_UBI_BLKDEVS) += bdev.o
+obj-$(CONFIG_MTD_UBI_BLOCK) += ubiblk.o
diff -uprBN ../ubi/ubiblk.c ubi/ubiblk.c
--- ../ubi/ubiblk.c 1970-01-01 08:00:00.000000000 +0800
+++ ubi/ubiblk.c 2008-05-13 11:34:21.000000000 +0800
@@ -0,0 +1,347 @@
+/*
+ * Direct UBI block device access
+ *
+ * (C) 2000-2003 Nicolas Pitre <nico@cam.org>
+ * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
+ * (C) 2008 Yurong Tan <nancydreaming@gmail.com> :
+ * borrow mtdblock.c to work on top of UBI
+ */
+
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/freezer.h>
+#include <asm/uaccess.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/hdreg.h>
+#include <linux/mutex.h>
+#include "ubi.h"
+#include "ubiblk.h"
+
+#define UBIBLK_UNMAPPED 0
+#define UBIBLK_SECTOR_SIZE 512
+
+extern void ubi_open_blkdev(int ubi_num, int vol_id, int mode);
+extern void ubi_close_blkdev(struct ubi_volume_desc *desc);
+static void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int
virt_block);
+static int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk);
+extern int ubiblk_leb_change(struct ubiblk_dev *ubiblk);
+
+struct ubiblk_dev *ubiblks[UBI_MAX_VOLUMES];
+static unsigned short subpage_shift;
+
+static int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk)
+{
+ if (STATE_UNUSED == ubiblk->write_cache_state)
+ return 0;
+ ubiblk_leb_change(ubiblk);
+ ubiblk->write_cache_state = STATE_UNUSED;
+
+ return 0;
+}
+
+static void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int virt_block)
+{
+ struct ubi_volume_desc *uv = ubiblk->uv;
+ struct ubi_device *ubi = uv->vol->ubi;
+ int ppb = ubi->leb_size / ubi->min_io_size;
+ unsigned short spp = ubi->min_io_size >> subpage_shift;
+
+ ubiblk->vbw = virt_block;
+ ubiblk->write_cache_state = STATE_USED;
+
+ memset(ubiblk->page_sts, 0, ppb);
+ memset(ubiblk->subpage_sts, 0, ppb*spp);
+}
+
+static int do_cached_write (struct ubiblk_dev *ubiblk, unsigned long sector,
+ int len, const char *buf)
+{
+ struct ubi_volume_desc *uv = ubiblk->uv;
+ struct ubi_device *ubi = uv->vol->ubi;
+ int ppb = ubi->leb_size / ubi->min_io_size;
+ unsigned short sectors_per_page = ubi->min_io_size / len;
+ unsigned short sectors_in_page_shift = ffs(sectors_per_page) - 1;
+ unsigned short page_shift = ffs(ubi->min_io_size) - 1;
+ unsigned short virt_block, page, subpage;
+ unsigned long virt_page;
+
+ virt_page = sector / sectors_per_page;
+ subpage = sector % sectors_per_page;
+ virt_block = virt_page / ppb;
+ page = virt_page % ppb;
+
+ if(ubi_is_mapped(uv, virt_block) == UBIBLK_UNMAPPED ){
+ mutex_lock(&ubiblk->cache_mutex);
+ ubiblk_flush_writecache(ubiblk);
+ mutex_unlock(&ubiblk->cache_mutex);
+
+ ubiblk_setup_writecache(ubiblk, virt_block);
+ } else {
+ if ( STATE_USED == ubiblk->write_cache_state ) {
+ if ( ubiblk->vbw != virt_block) {
+ // Commit before we start a new cache.
+ mutex_lock(&ubiblk->cache_mutex);
+ ubiblk_flush_writecache(ubiblk);
+ mutex_unlock(&ubiblk->cache_mutex);
+
+ ubiblk_setup_writecache(ubiblk, virt_block);
+ } else {
+ //printk("cache hit: 0x%x\n", virt_page);
+ }
+ } else {
+// printk("with existing mapping\n");
+ ubiblk_setup_writecache(ubiblk, virt_block);
+ }
+ }
+ ubiblk->page_sts[page] = 1;
+ ubiblk->subpage_sts[(page<<sectors_in_page_shift) + subpage] = 1;
+ memcpy(&ubiblk->write_cache[(page<<page_shift) +(subpage<<subpage_shift)],
+ buf,len);
+ return 0;
+}
+
+static int do_cached_read (struct ubiblk_dev *ubiblk, unsigned long sector,
+ int len, char *buf)
+{
+ struct ubi_volume_desc *uv = ubiblk->uv;
+ int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size;
+ unsigned short sectors_per_page = uv->vol->ubi->min_io_size >> 9;
+ unsigned short page_shift = ffs(uv->vol->ubi->min_io_size) - 1;
+ unsigned short virt_block, page, page_offset;
+ unsigned long virt_page;
+
+ virt_page = sector / sectors_per_page;
+ page_offset = sector % sectors_per_page;
+ virt_block = virt_page / ppb;
+ page = virt_page % ppb;
+
+ if(ubiblk->vbw == virt_block){
+ mutex_lock(&ubiblk->cache_mutex);
+ ubiblk_flush_writecache(ubiblk);
+ mutex_unlock(&ubiblk->cache_mutex);
+ }
+
+ if ( ubi_is_mapped( uv, virt_block) == UBIBLK_UNMAPPED){
+ /* In a Flash Memory device, there might be a logical block that is
+ * not allcated to a physical block due to the block not being used.
+ * All data returned should be set to 0xFF when accessing this logical
+ * block.
+ */
+
+ //printk("address translate fail\n");
+ memset(buf, 0xFF, UBIBLK_SECTOR_SIZE);
+ } else {
+
+ if( ubiblk->vbr != virt_block ||ubiblk->read_cache_state == STATE_UNUSED ){
+ ubiblk->vbr = virt_block;
+ ubi_leb_read(uv, virt_block, ubiblk->read_cache, 0,
uv->vol->usable_leb_size, 0);
+ ubiblk->read_cache_state = STATE_USED;
+ }
+ memcpy(buf, &ubiblk->read_cache[(page<<page_shift)+(page_offset<<9)], len);
+ }
+ return 0;
+}
+
+static int ubiblk_readsect(struct ubi_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+ return do_cached_read(ubiblk, block, UBIBLK_SECTOR_SIZE, buf);
+}
+
+static int ubiblk_writesect(struct ubi_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+ return do_cached_write(ubiblk, block, UBIBLK_SECTOR_SIZE, buf);
+}
+
+static int ubiblk_init_vol(int dev, struct ubi_volume_desc *uv)
+{
+ struct ubiblk_dev *ubiblk;
+ struct ubi_device *ubi = uv->vol->ubi;
+ int ppb = ubi->leb_size / ubi->min_io_size;
+ unsigned short spp = ubi->min_io_size >> subpage_shift;
+
+ ubiblk = kmalloc(sizeof(struct ubiblk_dev), GFP_KERNEL);
+ if (!ubiblk)
+ return -ENOMEM;
+
+ memset(ubiblk, 0, sizeof(*ubiblk));
+
+ ubiblk->count = 1;
+ ubiblk->uv = uv;
+ mutex_init (&ubiblk->cache_mutex);
+
+ ubiblk->write_cache = vmalloc(ubiblk->uv->vol->usable_leb_size);
+ ubiblk->read_cache = vmalloc(ubiblk->uv->vol->usable_leb_size);
+ ubiblk->page_sts = vmalloc(ppb);
+ ubiblk->subpage_sts = vmalloc(ppb*spp);
+
+ if(!ubiblk->write_cache ||
+ !ubiblk->read_cache ||
+ !ubiblk->page_sts ||
+ !ubiblk->subpage_sts)
+ return -ENOMEM;
+
+ ubiblk->write_cache_state = STATE_UNUSED;
+ ubiblk->read_cache_state = STATE_UNUSED;
+
+ ubiblks[dev] = ubiblk;
+ DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
+ return 0;
+}
+
+static int ubiblk_open(struct inode *i, struct file *f )
+{
+ struct ubi_volume_desc *desc;
+ struct ubi_blktrans_dev *dev;
+ int ubi_num = ubi_major2num(imajor(i));
+ int vol_id = iminor(i);
+ int mode;
+ int ret = 0;
+
+ if (f->f_mode & FMODE_WRITE)
+ mode = UBI_READWRITE;
+ else
+ mode = UBI_READONLY;
+
+ dev = i->i_bdev->bd_disk->private_data;
+ if (ubiblks[dev->devnum]) {
+ ubiblks[dev->devnum]->count++;
+ ubi_open_blkdev(ubi_num, vol_id, mode);
+ printk("%s: increase use count\n",__FUNCTION__);
+ return 0;
+ }
+
+ desc = ubi_open_volume(ubi_num, vol_id, mode);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ desc->vol->bdev_mode = mode;
+ dev->uv = desc;
+
+ subpage_shift = ffs(UBIBLK_SECTOR_SIZE)-1;
+ ret = ubiblk_init_vol(dev->devnum, desc);
+ return ret;
+}
+
+static int ubiblk_release(struct ubi_blktrans_dev *ubd)
+{
+ int dev = ubd->devnum;
+ struct ubiblk_dev *ubiblk = ubiblks[dev];
+ struct ubi_device *ubi = ubiblk->uv->vol->ubi;
+
+ mutex_lock(&ubiblk->cache_mutex);
+ ubiblk_flush_writecache(ubiblk);
+ mutex_unlock(&ubiblk->cache_mutex);
+
+ ubiblk->count --;
+ if (!ubiblk->count) {
+ /* It was the last usage. Free the device */
+ ubiblks[dev] = NULL;
+
+ if (ubi->mtd->sync)
+ ubi->mtd->sync(ubi->mtd);
+
+ vfree(ubiblk->write_cache);
+ vfree(ubiblk->read_cache);
+ kfree(ubiblk);
+
+ ubi_close_volume(ubiblk->uv);
+ return 0;
+ }
+ else{
+ printk("%s: decrease use count\n",__FUNCTION__);
+ ubi_close_blkdev(ubiblk->uv);
+ return 0;
+ }
+ return 1;
+}
+static int ubiblk_flush(struct ubi_blktrans_dev *dev)
+{
+ struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+ struct ubi_device *ubi = ubiblk->uv->vol->ubi;
+
+ mutex_lock(&ubiblk->cache_mutex);
+ ubiblk_flush_writecache(ubiblk);
+ mutex_unlock(&ubiblk->cache_mutex);
+
+ if (ubi->mtd->sync)
+ ubi->mtd->sync(ubi->mtd);
+ return 0;
+}
+
+void ubiblk_add_vol_dev(struct ubi_blktrans_ops *tr, struct ubi_volume *vol)
+{
+ struct ubi_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return;
+
+ dev->devnum = vol->vol_id;
+ dev->size = vol->used_bytes >> 9;
+ dev->tr = tr;
+
+ if (vol->bdev_mode == UBI_READONLY)
+ dev->readonly = 1;
+
+ vol->ubi->bdev_major = tr->major;
+
+ add_ubi_blktrans_dev(dev);
+}
+
+void ubiblk_remove_vol_dev(struct ubi_blktrans_dev *dev)
+{
+ del_ubi_blktrans_dev(dev);
+ kfree(dev);
+}
+
+static int ubiblk_getgeo(struct ubi_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ memset(geo, 0, sizeof(*geo));
+ geo->heads = 4;
+ geo->sectors = 16;
+ geo->cylinders = dev->size/(4*16);
+ return 0;
+}
+
+static struct ubi_blktrans_ops ubiblk_tr = {
+ .name = "ubiblock",
+ .major = 0,
+ .part_bits = 0,
+ .blksize = UBIBLK_SECTOR_SIZE,
+ .open = ubiblk_open,
+ .release = ubiblk_release,
+ .readsect = ubiblk_readsect,
+ .writesect = ubiblk_writesect,
+ .getgeo = ubiblk_getgeo,
+ .flush = ubiblk_flush,
+ .add_vol = ubiblk_add_vol_dev,
+ .remove_vol = ubiblk_remove_vol_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_ubiblock(void)
+{
+ return register_ubi_blktrans(&ubiblk_tr);
+}
+
+static void __exit cleanup_ubiblock(void)
+{
+ deregister_ubi_blktrans(&ubiblk_tr);
+}
+
+module_init(init_ubiblock);
+module_exit(cleanup_ubiblock);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> , Yurong Tan
<nancydreaming@gmail.com>");
+MODULE_DESCRIPTION("Caching read/erase/writeback block device
emulation access to UBI volumes");
diff -uprBN ../ubi/ubiblk.h ubi/ubiblk.h
--- ../ubi/ubiblk.h 1970-01-01 08:00:00.000000000 +0800
+++ ubi/ubiblk.h 2008-05-13 11:35:44.000000000 +0800
@@ -0,0 +1,85 @@
+/*
+ * (C) 2003 David Woodhouse <dwmw2@infradead.org>
+ * (C) 2008 Yurong Tan <nancydreaming@gmail.com> : borrow from MTD
blktrans.h for UBI used
+ * Interface to Linux block layer for UBI 'translation layers'.
+ */
+
+#ifndef __UBI_TRANS_H__
+#define __UBI_TRANS_H__
+
+#include <linux/mutex.h>
+#include <linux/fs.h>
+#include "ubi.h"
+
+struct hd_geometry;
+struct ubi_volume_desc;
+struct ubi_blktrans_ops;
+struct file;
+struct inode;
+
+struct ubiblk_dev {
+ struct ubi_volume_desc *uv;
+ int count;
+ struct mutex cache_mutex;
+ unsigned short vbw; //virt block number of write cache
+ unsigned short vbr; //virt block number of read cache
+
+ unsigned char *write_cache;
+ unsigned char *page_sts;
+ unsigned char *subpage_sts;
+
+ unsigned char *read_cache;
+ enum { STATE_UNUSED, STATE_USED } read_cache_state, write_cache_state;
+};
+
+struct ubi_blktrans_dev {
+ struct ubi_blktrans_ops *tr;
+ struct list_head list;
+ struct ubi_volume_desc *uv;
+ struct mutex lock;
+ int devnum;
+ unsigned long size;
+ int readonly;
+ void *blkcore_priv; /* gendisk in 2.5, devfs_handle in 2.4 */
+};
+
+struct blkcore_priv; /* Differs for 2.4 and 2.5 kernels; private */
+
+struct ubi_blktrans_ops {
+ char *name;
+ int major;
+ int part_bits;
+ int blksize;
+ int blkshift;
+
+ /* Access functions */
+ int (*readsect)(struct ubi_blktrans_dev *dev,
+ unsigned long block, char *buffer);
+ int (*writesect)(struct ubi_blktrans_dev *dev,
+ unsigned long block, char *buffer);
+
+ /* Block layer ioctls */
+ int (*getgeo)(struct ubi_blktrans_dev *dev, struct hd_geometry *geo);
+ int (*flush)(struct ubi_blktrans_dev *dev);
+
+ /* Called with mtd_table_mutex held; no race with add/remove */
+ int (*open)(struct inode *i, struct file *f);
+ int (*release)(struct ubi_blktrans_dev *dev);
+
+ /* Called on {de,}registration and on subsequent addition/removal
+ of devices, with mtd_table_mutex held. */
+ void (*add_vol)(struct ubi_blktrans_ops *tr, struct ubi_volume *vol);
+ void (*remove_vol)(struct ubi_blktrans_dev *dev);
+
+ struct list_head devs;
+ struct list_head list;
+ struct module *owner;
+
+ struct ubi_blkcore_priv *blkcore_priv;
+};
+
+extern int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new);
+extern int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old);
+extern int register_ubi_blktrans(struct ubi_blktrans_ops *tr);
+extern int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr);
+#endif /* __UBI_TRANS_H__ */
diff -uprBN ../ubi/ubi.h ubi/ubi.h
--- ../ubi/ubi.h 2008-05-09 07:27:47.000000000 +0800
+++ ubi/ubi.h 2008-05-13 11:17:47.000000000 +0800
@@ -232,8 +232,16 @@ struct ubi_volume {
int gluebi_refcount;
struct mtd_info gluebi_mtd;
#endif
+ int bdev_mode; //add by Nancy
};
+struct vol_notifier {
+ void (*add)(struct ubi_volume *vol);
+ void (*remove)(struct ubi_volume *vol);
+ struct list_head list;
+};
+
+
/**
* struct ubi_volume_desc - descriptor of the UBI volume returned when it is
* opened.
@@ -336,6 +344,7 @@ struct ubi_wl_entry;
*/
struct ubi_device {
struct cdev cdev;
+ int bdev_major; //add by Nancy
struct device dev;
int ubi_num;
char ubi_name[sizeof(UBI_NAME_STR)+5];
@@ -504,7 +513,7 @@ int ubi_io_read_vid_hdr(struct ubi_devic
struct ubi_vid_hdr *vid_hdr, int verbose);
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
-
+
/* build.c */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
Signed-off-by: Yurong Tan (Nancy)<nancydreaming@gmail.com>
--
Best wishes,
Nancy
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-05-13 4:16 ` Nancy
@ 2008-05-13 9:53 ` Artem Bityutskiy
2008-05-13 13:31 ` Nancy
0 siblings, 1 reply; 14+ messages in thread
From: Artem Bityutskiy @ 2008-05-13 9:53 UTC (permalink / raw)
To: Nancy; +Cc: linux-mtd
On Tue, 2008-05-13 at 12:16 +0800, Nancy wrote:
> Hi all,
> I think it is my final edition of ubi block device layer code
> based on UBI commit e442c48f84982d0fa10c6b292018241dafca4d65
> Finally, it can support any filesystem based on block device layer.
> eg: FAT, ext2....
> #modprobe ubiblk
> #mkfs.vfat /dev/ubiblock1
> #mount -t vfat /dev/ubiblock1 /mnt/fat
>
> I notice there many people need this function especially our
> Chinese. Hope this helpful :-)
> Here's my implementation:
What is the reason you added this "generic" ubi mtd block stuff? What
for? Also, UBI should not have any explicit block device support - it
should be isolated. It is to ok to add some general support for devices
on top of UBI, like notification about volume size change and so on,
but not explicit block layer support. Gluebi should also get a separate
module, ideally.
Also, the patch suffers from too direct copy-pastes which is not
acceptable.
--
Best regards,
Artem Bityutskiy (Битюцкий Артём)
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-05-13 9:53 ` Artem Bityutskiy
@ 2008-05-13 13:31 ` Nancy
2008-05-14 13:50 ` Artem Bityutskiy
2008-05-14 18:15 ` N Cheung
0 siblings, 2 replies; 14+ messages in thread
From: Nancy @ 2008-05-13 13:31 UTC (permalink / raw)
To: dedekind; +Cc: linux-mtd
On Tue, May 13, 2008 at 5:53 PM, Artem Bityutskiy
<dedekind@infradead.org> wrote:
> On Tue, 2008-05-13 at 12:16 +0800, Nancy wrote:
> > Hi all,
> > I think it is my final edition of ubi block device layer code
> > based on UBI commit e442c48f84982d0fa10c6b292018241dafca4d65
> > Finally, it can support any filesystem based on block device layer.
> > eg: FAT, ext2....
> > #modprobe ubiblk
> > #mkfs.vfat /dev/ubiblock1
> > #mount -t vfat /dev/ubiblock1 /mnt/fat
> >
> > I notice there many people need this function especially our
> > Chinese. Hope this helpful :-)
> > Here's my implementation:
>
> What is the reason you added this "generic" ubi mtd block stuff? What
> for?
I though you must know the answer better than any others. Here's
your Big red note
"People are often confused and treat UBI as a block device emulation
layer (also known as FTL - flash translation layer). But this is not
true - UBI is not an FTL."
I used to be one of those people. There are many people like me
find an good NFTL layer code in OpenSource. But none! UBI complete
the main part of the NFTL job and done well. In fact I want to ask you
why do not go further to make it a good NFTL, not just for UBIFS use,
but other filesystem based on block device layer.
In embended system world, this function is very important. mp3,
mp4, mobile phone, study machine...... many many devices need connect
with PC with USB line. There are huge numbers of people using
Windows. FAT should be supported.
Also, some customer like to use Reiserfs. As a BSP Nand flash
driver supporter, I have to meet there need. That's why I add this
"generic" ubi block layer.
> Also, UBI should not have any explicit block device support - it
> should be isolated. It is to ok to add some general support for devices
> on top of UBI, like notification about volume size change and so on,
Good!
> but not explicit block layer support.
Why not? Why you so hate block layer?
Gluebi should also get a separate
> module, ideally.
In fact, we do not need Gluebi, What Glubi for? for jffs2 on top of
UBI? I'd rather use Jffs2 on top of MTD directly. It really not
neccessary to use 2 or more Nand awared filesystems. Just pick the
best one, let's say UBIFS, enough!
> Also, the patch suffers from too direct copy-pastes which is not
> acceptable.
Oh, Is that a rule? Too direct copy-pastes is what I called code
reused. Complete the task is more important! Too direct copy-pastes
also shows my high respect to the original code author.
MTD and UBI should be family, Why the developers here are so ....?
Hope my patch make them family again! Please cherrish the peace and
signed off this patch to rescue the people who are looking for NFTL.
If you do mind the copyright, you can wipe out my name, that's OK, I
do not care much.
Or you do those job all by yourself again. But please, do not ignore
NFTL requirement!
---
Best wishes,
Nancy
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-05-13 13:31 ` Nancy
@ 2008-05-14 13:50 ` Artem Bityutskiy
2008-05-15 4:59 ` Nancy
2008-05-14 18:15 ` N Cheung
1 sibling, 1 reply; 14+ messages in thread
From: Artem Bityutskiy @ 2008-05-14 13:50 UTC (permalink / raw)
To: Nancy; +Cc: linux-mtd
On Tue, 2008-05-13 at 21:31 +0800, Nancy wrote:
> I though you must know the answer better than any others. Here's
> your Big red note
... (snip) ...
> In embended system world, this function is very important. mp3,
Please, no need to send lengthy explanations like this. I have nothing
agains a good FTL. In opposite, I think it would be nice to develop one.
I just think the patches you send are not good enough and need more work
if you want them to be merged upstream.
> MTD and UBI should be family, Why the developers here are so ....?
... (snip) ...
Please, no need to flame. Please, make technical arguments instead.
Thank you!
--
Best regards,
Artem Bityutskiy (Битюцкий Артём)
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-05-13 13:31 ` Nancy
2008-05-14 13:50 ` Artem Bityutskiy
@ 2008-05-14 18:15 ` N Cheung
2008-05-15 5:54 ` Nancy
1 sibling, 1 reply; 14+ messages in thread
From: N Cheung @ 2008-05-14 18:15 UTC (permalink / raw)
To: Nancy; +Cc: linux-mtd
On Tue, May 13, 2008 at 6:31 AM, Nancy <nancydreaming@gmail.com> wrote:
> .. But none! UBI complete
> the main part of the NFTL job and done well. In fact I want to ask you
> why do not go further to make it a good NFTL, not just for UBIFS use,
> but other filesystem based on block device layer.
..
> Also, some customer like to use Reiserfs. As a BSP Nand flash
> driver supporter, I have to meet there need. That's why I add this
> "generic" ubi block layer.
>
Nancy and Artem,
I want to run cramfs or squashfs on NAND. It looks like this ubiblock
may be the solution to my search. Or perhaps there is a NFTL layer
that manages bad blocks. Any suggestions?
Thanks,
Norman
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-05-14 13:50 ` Artem Bityutskiy
@ 2008-05-15 4:59 ` Nancy
0 siblings, 0 replies; 14+ messages in thread
From: Nancy @ 2008-05-15 4:59 UTC (permalink / raw)
To: dedekind; +Cc: linux-mtd
On 5/14/08, Artem Bityutskiy <dedekind@infradead.org> wrote:
> Please, no need to send lengthy explanations like this. I have nothing
> agains a good FTL. In opposite, I think it would be nice to develop one.
> I just think the patches you send are not good enough and need more work
> if you want them to be merged upstream.
OK. please show your opinion about the patches, where need to
improve. I know the code style is too diffrent. lack of code comments,
........
You can teach me to meet your requriement. If you think it's more
quick to do it by yourself, just need me to explain some place you do
not clear. OK. All I am asking for is to merge this new feature
upstream as soon as possible. I do not care copyright much. In fact I
will be so happy to see you turn those code to your style. I mean I am
lack of professional program skill. I need someone like you guide me
to be professional, to improve my skill. Nothing better than implement
the same idea but different code formulate to see the ..... sorry, i
don't know how to express, I think you knows : )
> Please, no need to flame. Please, make technical arguments instead.
>
> Thank you!
In fact, the patches should be more easier done by UBI awared
and Block device layer awared developer. As you can see in my patches,
90% was direct copy-plase from mtdblock.c mtd_blktrans.c and some
sections of UBI code. I was wondering why Artem Bityutskiy, David
Woodhouse, Nicolas Pitre do not unite together to finish a good NFTL
which is so in need by lots of people. To make linux better and
better. Then I see it may be imply a competition in you. Oh, come on,
you are all so greate! all the best!
--
Best wishes,
Nancy
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH] [MTD] [UBI] add block device layer on top of UBI
2008-05-14 18:15 ` N Cheung
@ 2008-05-15 5:54 ` Nancy
0 siblings, 0 replies; 14+ messages in thread
From: Nancy @ 2008-05-15 5:54 UTC (permalink / raw)
To: N Cheung; +Cc: linux-mtd
Hi, Norman
On 5/15/08, N Cheung <brjerome.1@gmail.com> wrote:
> Nancy and Artem,
>
> I want to run cramfs or squashfs on NAND. It looks like this ubiblock
> may be the solution to my search. Or perhaps there is a NFTL layer
> that manages bad blocks. Any suggestions?
In my opinion, the Nand awared filesystem is the best choice
for you. JFFS2, YAFFS2, UBIFS are all Nand awared filesystem. They are
all run on Character device layer.
UBI block device layer is creating for the traditional
filesystem based on block device layer, such as FAT. Although UBI
tolerant power failure, but that was not mean completely safe for FAT.
Sometimes, FAT will turn to readonly if it detect an file EOF missing.
In that case, you should use tool "dosfsck" to fix this problem.
Maybe cramfs or squashfs may have the same problem like FAT has
or other problem. To be safe, please use Nand awared filesystem
instead. Use ubi block device layer when you have no other choices.
--
Best wishes,
Nancy
^ permalink raw reply [flat|nested] 14+ messages in thread
end of thread, other threads:[~2008-05-15 5:54 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-03-03 11:17 [PATCH] [MTD] [UBI] add block device layer on top of UBI Nancy
2008-03-04 9:40 ` Nancy
2008-03-04 9:40 ` Artem Bityutskiy
2008-03-04 9:54 ` Nancy
2008-03-04 9:53 ` Artem Bityutskiy
2008-03-04 10:07 ` Nancy
2008-03-04 13:55 ` Nancy
2008-05-13 4:16 ` Nancy
2008-05-13 9:53 ` Artem Bityutskiy
2008-05-13 13:31 ` Nancy
2008-05-14 13:50 ` Artem Bityutskiy
2008-05-15 4:59 ` Nancy
2008-05-14 18:15 ` N Cheung
2008-05-15 5:54 ` Nancy
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox