linux-scsi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] add bsg queue resize
@ 2007-01-20 14:25 FUJITA Tomonori
  2007-01-21  4:09 ` Jens Axboe
  2007-01-23  8:34 ` Jens Axboe
  0 siblings, 2 replies; 6+ messages in thread
From: FUJITA Tomonori @ 2007-01-20 14:25 UTC (permalink / raw)
  To: linux-scsi; +Cc: jens.axboe

This enables bsg to resize the queue depth via
SG_SET_COMMAND_Q. bsg_command structures are allocated via mempool
because the previous way to use contiguous memory makes it difficult
to resize the queue depth when a bsg_device has outstanding commands.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
---
 block/bsg.c |  112 +++++++++++++++++++++++-----------------------------------
 1 files changed, 45 insertions(+), 67 deletions(-)

diff --git a/block/bsg.c b/block/bsg.c
index 9d77a0c..af46f54 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -33,8 +33,6 @@ #include <scsi/sg.h>
 
 static char bsg_version[] = "block layer sg (bsg) 0.4";
 
-struct bsg_command;
-
 struct bsg_device {
 	struct gendisk *disk;
 	request_queue_t *queue;
@@ -46,8 +44,7 @@ struct bsg_device {
 	int minor;
 	int queued_cmds;
 	int done_cmds;
-	unsigned long *cmd_bitmap;
-	struct bsg_command *cmd_map;
+	mempool_t *bsg_cmd_q;
 	wait_queue_head_t wq_done;
 	wait_queue_head_t wq_free;
 	char name[BDEVNAME_SIZE];
@@ -55,19 +52,26 @@ struct bsg_device {
 	unsigned long flags;
 };
 
+/*
+ * our internal command type
+ */
+struct bsg_command {
+	struct bsg_device *bd;
+	struct list_head list;
+	struct request *rq;
+	struct bio *bio;
+	int err;
+	struct sg_io_v4 hdr;
+	struct sg_io_v4 __user *uhdr;
+	char sense[SCSI_SENSE_BUFFERSIZE];
+};
+
 enum {
 	BSG_F_BLOCK		= 1,
 	BSG_F_WRITE_PERM	= 2,
 };
 
-/*
- * command allocation bitmap defines
- */
-#define BSG_CMDS_PAGE_ORDER	(1)
-#define BSG_CMDS_PER_LONG	(sizeof(unsigned long) * 8)
-#define BSG_CMDS_MASK		(BSG_CMDS_PER_LONG - 1)
-#define BSG_CMDS_BYTES		(PAGE_SIZE * (1 << BSG_CMDS_PAGE_ORDER))
-#define BSG_CMDS		(BSG_CMDS_BYTES / sizeof(struct bsg_command))
+#define BSG_DEFAULT_CMDS	(64)
 
 #undef BSG_DEBUG
 
@@ -94,31 +98,18 @@ static struct hlist_head bsg_device_list
 static struct class *bsg_class;
 static LIST_HEAD(bsg_class_list);
 
-/*
- * our internal command type
- */
-struct bsg_command {
-	struct bsg_device *bd;
-	struct list_head list;
-	struct request *rq;
-	struct bio *bio;
-	int err;
-	struct sg_io_v4 hdr;
-	struct sg_io_v4 __user *uhdr;
-	char sense[SCSI_SENSE_BUFFERSIZE];
-};
+static struct kmem_cache *bsg_cmd_cachep;
 
 static void bsg_free_command(struct bsg_command *bc)
 {
 	struct bsg_device *bd = bc->bd;
-	unsigned long bitnr = bc - bd->cmd_map;
 	unsigned long flags;
 
-	dprintk("%s: command bit offset %lu\n", bd->name, bitnr);
+	dprintk("%s: command free %p\n", bd->name, bc);
 
+	mempool_free(bc, bd->bsg_cmd_q);
 	spin_lock_irqsave(&bd->lock, flags);
 	bd->queued_cmds--;
-	__clear_bit(bitnr, bd->cmd_bitmap);
 	spin_unlock_irqrestore(&bd->lock, flags);
 
 	wake_up(&bd->wq_free);
@@ -127,29 +118,18 @@ static void bsg_free_command(struct bsg_
 static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd)
 {
 	struct bsg_command *bc = NULL;
-	unsigned long *map;
-	int free_nr;
 
 	spin_lock_irq(&bd->lock);
-
 	if (bd->queued_cmds >= bd->max_queue)
 		goto out;
-
-	for (free_nr = 0, map = bd->cmd_bitmap; *map == ~0UL; map++)
-		free_nr += BSG_CMDS_PER_LONG;
-
-	BUG_ON(*map == ~0UL);
-
 	bd->queued_cmds++;
-	free_nr += ffz(*map);
-	__set_bit(free_nr, bd->cmd_bitmap);
 	spin_unlock_irq(&bd->lock);
 
-	bc = bd->cmd_map + free_nr;
+	bc = mempool_alloc(bd->bsg_cmd_q, GFP_NOWAIT);
 	memset(bc, 0, sizeof(*bc));
 	bc->bd = bd;
 	INIT_LIST_HEAD(&bc->list);
-	dprintk("%s: returning free cmd %p (bit %d)\n", bd->name, bc, free_nr);
+	dprintk("%s: returning free cmd %p\n", bd->name, bc);
 	return bc;
 out:
 	dprintk("%s: failed (depth %d)\n", bd->name, bd->queued_cmds);
@@ -356,8 +336,8 @@ static void bsg_rq_end_io(struct request
 	struct bsg_device *bd = bc->bd;
 	unsigned long flags;
 
-	dprintk("%s: finished rq %p bc %p, bio %p offset %Zd stat %d\n",
-		bd->name, rq, bc, bc->bio, bc - bd->cmd_map, uptodate);
+	dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
+		bd->name, rq, bc, bc->bio, uptodate);
 
 	bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
 
@@ -705,19 +685,13 @@ bsg_write(struct file *file, const char
 
 static void bsg_free_device(struct bsg_device *bd)
 {
-	if (bd->cmd_map)
-		free_pages((unsigned long) bd->cmd_map, BSG_CMDS_PAGE_ORDER);
-
-	kfree(bd->cmd_bitmap);
+	mempool_destroy(bd->bsg_cmd_q);
 	kfree(bd);
 }
 
 static struct bsg_device *bsg_alloc_device(void)
 {
-	struct bsg_command *cmd_map;
-	unsigned long *cmd_bitmap;
 	struct bsg_device *bd;
-	int bits;
 
 	bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
 	if (unlikely(!bd))
@@ -725,19 +699,10 @@ static struct bsg_device *bsg_alloc_devi
 
 	spin_lock_init(&bd->lock);
 
-	bd->max_queue = BSG_CMDS;
-
-	bits = (BSG_CMDS / BSG_CMDS_PER_LONG) + 1;
-	cmd_bitmap = kzalloc(bits * sizeof(unsigned long), GFP_KERNEL);
-	if (!cmd_bitmap)
+	bd->max_queue = BSG_DEFAULT_CMDS;
+	bd->bsg_cmd_q = mempool_create_slab_pool(bd->max_queue, bsg_cmd_cachep);
+	if (unlikely(!bd->bsg_cmd_q))
 		goto out_free_bd;
-	bd->cmd_bitmap = cmd_bitmap;
-
-	cmd_map = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
-						BSG_CMDS_PAGE_ORDER);
-	if (!cmd_map)
-		goto out_free_bitmap;
-	bd->cmd_map = cmd_map;
 
 	INIT_LIST_HEAD(&bd->busy_list);
 	INIT_LIST_HEAD(&bd->done_list);
@@ -747,8 +712,6 @@ static struct bsg_device *bsg_alloc_devi
 	init_waitqueue_head(&bd->wq_done);
 	return bd;
 
-out_free_bitmap:
-	kfree(cmd_bitmap);
 out_free_bd:
 	kfree(bd);
 	return NULL;
@@ -918,15 +881,22 @@ bsg_ioctl(struct inode *inode, struct fi
 		 */
 	case SG_GET_COMMAND_Q:
 		return put_user(bd->max_queue, uarg);
-		case SG_SET_COMMAND_Q: {
-		int queue;
+	case SG_SET_COMMAND_Q: {
+		int queue, ret;
 
 		if (get_user(queue, uarg))
 			return -EFAULT;
-		if (queue > BSG_CMDS || queue < 1)
+
+		if (queue < 1)
 			return -EINVAL;
 
+		ret = mempool_resize(bd->bsg_cmd_q, queue, GFP_KERNEL);
+		if (ret)
+			return ret;
+
+		spin_lock_irq(&bd->lock);
 		bd->max_queue = queue;
+		spin_unlock_irq(&bd->lock);
 		return 0;
 	}
 
@@ -1048,6 +1018,14 @@ static int __init bsg_init(void)
 		return ret;
 	}
 
+	bsg_cmd_cachep = kmem_cache_create("bsg_cmd", sizeof(struct bsg_command),
+					   0, 0, NULL, NULL);
+	if (!bsg_cmd_cachep) {
+		class_destroy(bsg_class);
+		unregister_chrdev(BSG_MAJOR, "bsg");
+		return -ENOMEM;
+	}
+
 	printk(KERN_INFO "%s loaded\n", bsg_version);
 	return 0;
 }
-- 
1.4.3.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] add bsg queue resize
  2007-01-20 14:25 [PATCH] add bsg queue resize FUJITA Tomonori
@ 2007-01-21  4:09 ` Jens Axboe
  2007-01-23  8:34 ` Jens Axboe
  1 sibling, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2007-01-21  4:09 UTC (permalink / raw)
  To: FUJITA Tomonori; +Cc: linux-scsi

On Sat, Jan 20 2007, FUJITA Tomonori wrote:
> This enables bsg to resize the queue depth via
> SG_SET_COMMAND_Q. bsg_command structures are allocated via mempool
> because the previous way to use contiguous memory makes it difficult
> to resize the queue depth when a bsg_device has outstanding commands.

You work quickly! I'll give this a review during the flight back, if
everything is alright I'll merge it up. Thanks!

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] add bsg queue resize
  2007-01-20 14:25 [PATCH] add bsg queue resize FUJITA Tomonori
  2007-01-21  4:09 ` Jens Axboe
@ 2007-01-23  8:34 ` Jens Axboe
  2007-01-23 15:23   ` Jens Axboe
  1 sibling, 1 reply; 6+ messages in thread
From: Jens Axboe @ 2007-01-23  8:34 UTC (permalink / raw)
  To: FUJITA Tomonori; +Cc: linux-scsi

On Sat, Jan 20 2007, FUJITA Tomonori wrote:
> This enables bsg to resize the queue depth via
> SG_SET_COMMAND_Q. bsg_command structures are allocated via mempool
> because the previous way to use contiguous memory makes it difficult
> to resize the queue depth when a bsg_device has outstanding commands.

Overall the patch looks fine. I don't think we need a mempool though,
and allocations could just use GFP_USER from the user invoked queuing
paths. Just make it GFP_USER, we can always extend the
bsg_alloc_command() to take a gfp_t argument as well If you get rid of
the mempool, then resizing is simply just adjusting bd->max_queue.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] add bsg queue resize
  2007-01-23  8:34 ` Jens Axboe
@ 2007-01-23 15:23   ` Jens Axboe
  2007-01-24  2:25     ` FUJITA Tomonori
  0 siblings, 1 reply; 6+ messages in thread
From: Jens Axboe @ 2007-01-23 15:23 UTC (permalink / raw)
  To: FUJITA Tomonori; +Cc: linux-scsi

On Tue, Jan 23 2007, Jens Axboe wrote:
> On Sat, Jan 20 2007, FUJITA Tomonori wrote:
> > This enables bsg to resize the queue depth via
> > SG_SET_COMMAND_Q. bsg_command structures are allocated via mempool
> > because the previous way to use contiguous memory makes it difficult
> > to resize the queue depth when a bsg_device has outstanding commands.
> 
> Overall the patch looks fine. I don't think we need a mempool though,
> and allocations could just use GFP_USER from the user invoked queuing
> paths. Just make it GFP_USER, we can always extend the
> bsg_alloc_command() to take a gfp_t argument as well If you get rid of
> the mempool, then resizing is simply just adjusting bd->max_queue.

Like so.

diff --git a/block/bsg.c b/block/bsg.c
index 9d77a0c..c56618a 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -33,8 +33,6 @@
 
 static char bsg_version[] = "block layer sg (bsg) 0.4";
 
-struct bsg_command;
-
 struct bsg_device {
 	struct gendisk *disk;
 	request_queue_t *queue;
@@ -46,8 +44,6 @@ struct bsg_device {
 	int minor;
 	int queued_cmds;
 	int done_cmds;
-	unsigned long *cmd_bitmap;
-	struct bsg_command *cmd_map;
 	wait_queue_head_t wq_done;
 	wait_queue_head_t wq_free;
 	char name[BDEVNAME_SIZE];
@@ -60,14 +56,7 @@ enum {
 	BSG_F_WRITE_PERM	= 2,
 };
 
-/*
- * command allocation bitmap defines
- */
-#define BSG_CMDS_PAGE_ORDER	(1)
-#define BSG_CMDS_PER_LONG	(sizeof(unsigned long) * 8)
-#define BSG_CMDS_MASK		(BSG_CMDS_PER_LONG - 1)
-#define BSG_CMDS_BYTES		(PAGE_SIZE * (1 << BSG_CMDS_PAGE_ORDER))
-#define BSG_CMDS		(BSG_CMDS_BYTES / sizeof(struct bsg_command))
+#define BSG_DEFAULT_CMDS	64
 
 #undef BSG_DEBUG
 
@@ -94,6 +83,8 @@ static struct hlist_head bsg_device_list[BSG_LIST_SIZE];
 static struct class *bsg_class;
 static LIST_HEAD(bsg_class_list);
 
+static struct kmem_cache *bsg_cmd_cachep;
+
 /*
  * our internal command type
  */
@@ -111,14 +102,12 @@ struct bsg_command {
 static void bsg_free_command(struct bsg_command *bc)
 {
 	struct bsg_device *bd = bc->bd;
-	unsigned long bitnr = bc - bd->cmd_map;
 	unsigned long flags;
 
-	dprintk("%s: command bit offset %lu\n", bd->name, bitnr);
+	kmem_cache_free(bsg_cmd_cachep, bc);
 
 	spin_lock_irqsave(&bd->lock, flags);
 	bd->queued_cmds--;
-	__clear_bit(bitnr, bd->cmd_bitmap);
 	spin_unlock_irqrestore(&bd->lock, flags);
 
 	wake_up(&bd->wq_free);
@@ -127,32 +116,29 @@ static void bsg_free_command(struct bsg_command *bc)
 static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd)
 {
 	struct bsg_command *bc = NULL;
-	unsigned long *map;
-	int free_nr;
 
 	spin_lock_irq(&bd->lock);
 
 	if (bd->queued_cmds >= bd->max_queue)
 		goto out;
 
-	for (free_nr = 0, map = bd->cmd_bitmap; *map == ~0UL; map++)
-		free_nr += BSG_CMDS_PER_LONG;
-
-	BUG_ON(*map == ~0UL);
-
 	bd->queued_cmds++;
-	free_nr += ffz(*map);
-	__set_bit(free_nr, bd->cmd_bitmap);
 	spin_unlock_irq(&bd->lock);
 
-	bc = bd->cmd_map + free_nr;
+	bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER);
+	if (unlikely(!bc)) {
+		spin_lock_irq(&bd->lock);
+		goto alloc_fail;
+	}
+
 	memset(bc, 0, sizeof(*bc));
 	bc->bd = bd;
 	INIT_LIST_HEAD(&bc->list);
-	dprintk("%s: returning free cmd %p (bit %d)\n", bd->name, bc, free_nr);
+	dprintk("%s: returning free cmd %p\n", bd->name, bc);
 	return bc;
+alloc_fail:
+	bd->queued_cmds--;
 out:
-	dprintk("%s: failed (depth %d)\n", bd->name, bd->queued_cmds);
 	spin_unlock_irq(&bd->lock);
 	return bc;
 }
@@ -356,8 +342,8 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
 	struct bsg_device *bd = bc->bd;
 	unsigned long flags;
 
-	dprintk("%s: finished rq %p bc %p, bio %p offset %Zd stat %d\n",
-		bd->name, rq, bc, bc->bio, bc - bd->cmd_map, uptodate);
+	dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
+		bd->name, rq, bc, bc->bio, uptodate);
 
 	bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
 
@@ -703,21 +689,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
 	return bytes_read;
 }
 
-static void bsg_free_device(struct bsg_device *bd)
-{
-	if (bd->cmd_map)
-		free_pages((unsigned long) bd->cmd_map, BSG_CMDS_PAGE_ORDER);
-
-	kfree(bd->cmd_bitmap);
-	kfree(bd);
-}
-
 static struct bsg_device *bsg_alloc_device(void)
 {
-	struct bsg_command *cmd_map;
-	unsigned long *cmd_bitmap;
 	struct bsg_device *bd;
-	int bits;
 
 	bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
 	if (unlikely(!bd))
@@ -725,19 +699,7 @@ static struct bsg_device *bsg_alloc_device(void)
 
 	spin_lock_init(&bd->lock);
 
-	bd->max_queue = BSG_CMDS;
-
-	bits = (BSG_CMDS / BSG_CMDS_PER_LONG) + 1;
-	cmd_bitmap = kzalloc(bits * sizeof(unsigned long), GFP_KERNEL);
-	if (!cmd_bitmap)
-		goto out_free_bd;
-	bd->cmd_bitmap = cmd_bitmap;
-
-	cmd_map = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
-						BSG_CMDS_PAGE_ORDER);
-	if (!cmd_map)
-		goto out_free_bitmap;
-	bd->cmd_map = cmd_map;
+	bd->max_queue = BSG_DEFAULT_CMDS;
 
 	INIT_LIST_HEAD(&bd->busy_list);
 	INIT_LIST_HEAD(&bd->done_list);
@@ -746,12 +708,6 @@ static struct bsg_device *bsg_alloc_device(void)
 	init_waitqueue_head(&bd->wq_free);
 	init_waitqueue_head(&bd->wq_done);
 	return bd;
-
-out_free_bitmap:
-	kfree(cmd_bitmap);
-out_free_bd:
-	kfree(bd);
-	return NULL;
 }
 
 static int bsg_put_device(struct bsg_device *bd)
@@ -779,7 +735,7 @@ static int bsg_put_device(struct bsg_device *bd)
 
 	blk_put_queue(bd->queue);
 	hlist_del(&bd->dev_list);
-	bsg_free_device(bd);
+	kfree(bd);
 out:
 	mutex_unlock(&bsg_mutex);
 	return ret;
@@ -918,15 +874,17 @@ bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
 		 */
 	case SG_GET_COMMAND_Q:
 		return put_user(bd->max_queue, uarg);
-		case SG_SET_COMMAND_Q: {
+	case SG_SET_COMMAND_Q: {
 		int queue;
 
 		if (get_user(queue, uarg))
 			return -EFAULT;
-		if (queue > BSG_CMDS || queue < 1)
+		if (queue < 1)
 			return -EINVAL;
 
+		spin_lock_irq(&bd->lock);
 		bd->max_queue = queue;
+		spin_unlock_irq(&bd->lock);
 		return 0;
 	}
 
@@ -1035,15 +993,25 @@ static int __init bsg_init(void)
 {
 	int ret, i;
 
+	bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
+				sizeof(struct bsg_command), 0, 0, NULL, NULL);
+	if (!bsg_cmd_cachep) {
+		printk(KERN_ERR "bsg: failed creating slab cache\n");
+		return -ENOMEM;
+	}
+
 	for (i = 0; i < BSG_LIST_SIZE; i++)
 		INIT_HLIST_HEAD(&bsg_device_list[i]);
 
 	bsg_class = class_create(THIS_MODULE, "bsg");
-	if (IS_ERR(bsg_class))
+	if (IS_ERR(bsg_class)) {
+		kmem_cache_destroy(bsg_cmd_cachep);
 		return PTR_ERR(bsg_class);
+	}
 
 	ret = register_chrdev(BSG_MAJOR, "bsg", &bsg_fops);
 	if (ret) {
+		kmem_cache_destroy(bsg_cmd_cachep);
 		class_destroy(bsg_class);
 		return ret;
 	}


-- 
Jens Axboe


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] add bsg queue resize
  2007-01-23 15:23   ` Jens Axboe
@ 2007-01-24  2:25     ` FUJITA Tomonori
  2007-01-24  8:04       ` Jens Axboe
  0 siblings, 1 reply; 6+ messages in thread
From: FUJITA Tomonori @ 2007-01-24  2:25 UTC (permalink / raw)
  To: jens.axboe; +Cc: fujita.tomonori, linux-scsi

From: Jens Axboe <jens.axboe@oracle.com>
Subject: Re: [PATCH] add bsg queue resize
Date: Tue, 23 Jan 2007 16:23:49 +0100

> On Tue, Jan 23 2007, Jens Axboe wrote:
> > On Sat, Jan 20 2007, FUJITA Tomonori wrote:
> > > This enables bsg to resize the queue depth via
> > > SG_SET_COMMAND_Q. bsg_command structures are allocated via mempool
> > > because the previous way to use contiguous memory makes it difficult
> > > to resize the queue depth when a bsg_device has outstanding commands.
> > 
> > Overall the patch looks fine. I don't think we need a mempool though,
> > and allocations could just use GFP_USER from the user invoked queuing
> > paths. Just make it GFP_USER, we can always extend the
> > bsg_alloc_command() to take a gfp_t argument as well If you get rid of
> > the mempool, then resizing is simply just adjusting bd->max_queue.
> 
> Like so.

Thanks. I thought that pre-allocating bsg_command structures would be
nice. But it doesn't matter much for me.

One minor comment is that we could simplify __bsg_alloc_command
failpath a bit?

---
>From 6cebe0e87adc90ba50067f017c52a233fb9262d6 Mon Sep 17 00:00:00 2001
From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Date: Wed, 24 Jan 2007 11:08:47 +0900
Subject: [PATCH] bsg: simplify __bsg_alloc_command failpath

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
---
 block/bsg.c |    5 ++---
 1 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/block/bsg.c b/block/bsg.c
index e97e3ec..c85d961 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -128,7 +128,8 @@ static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd)
 	bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER);
 	if (unlikely(!bc)) {
 		spin_lock_irq(&bd->lock);
-		goto alloc_fail;
+		bd->queued_cmds--;
+		goto out;
 	}
 
 	memset(bc, 0, sizeof(*bc));
@@ -136,8 +137,6 @@ static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd)
 	INIT_LIST_HEAD(&bc->list);
 	dprintk("%s: returning free cmd %p\n", bd->name, bc);
 	return bc;
-alloc_fail:
-	bd->queued_cmds--;
 out:
 	spin_unlock_irq(&bd->lock);
 	return bc;
-- 
1.4.4.3




^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] add bsg queue resize
  2007-01-24  2:25     ` FUJITA Tomonori
@ 2007-01-24  8:04       ` Jens Axboe
  0 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2007-01-24  8:04 UTC (permalink / raw)
  To: FUJITA Tomonori; +Cc: linux-scsi

On Wed, Jan 24 2007, FUJITA Tomonori wrote:
> From: Jens Axboe <jens.axboe@oracle.com>
> Subject: Re: [PATCH] add bsg queue resize
> Date: Tue, 23 Jan 2007 16:23:49 +0100
> 
> > On Tue, Jan 23 2007, Jens Axboe wrote:
> > > On Sat, Jan 20 2007, FUJITA Tomonori wrote:
> > > > This enables bsg to resize the queue depth via
> > > > SG_SET_COMMAND_Q. bsg_command structures are allocated via mempool
> > > > because the previous way to use contiguous memory makes it difficult
> > > > to resize the queue depth when a bsg_device has outstanding commands.
> > > 
> > > Overall the patch looks fine. I don't think we need a mempool though,
> > > and allocations could just use GFP_USER from the user invoked queuing
> > > paths. Just make it GFP_USER, we can always extend the
> > > bsg_alloc_command() to take a gfp_t argument as well If you get rid of
> > > the mempool, then resizing is simply just adjusting bd->max_queue.
> > 
> > Like so.
> 
> Thanks. I thought that pre-allocating bsg_command structures would be
> nice. But it doesn't matter much for me.

Probably not very useful in the end, we need to allocate some other
structures for IO anyway.

> One minor comment is that we could simplify __bsg_alloc_command
> failpath a bit?

Yep, applied. Another thing that needs cleaning up for the allocation is
the whole bsg_io_schedule() stuff.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2007-01-24  8:03 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-01-20 14:25 [PATCH] add bsg queue resize FUJITA Tomonori
2007-01-21  4:09 ` Jens Axboe
2007-01-23  8:34 ` Jens Axboe
2007-01-23 15:23   ` Jens Axboe
2007-01-24  2:25     ` FUJITA Tomonori
2007-01-24  8:04       ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).