From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1LtNdA-0000NP-Od for qemu-devel@nongnu.org; Mon, 13 Apr 2009 10:59:28 -0400 Received: from exim by lists.gnu.org with spam-scanned (Exim 4.43) id 1LtNd6-0000MJ-Oq for qemu-devel@nongnu.org; Mon, 13 Apr 2009 10:59:28 -0400 Received: from [199.232.76.173] (port=40336 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1LtNd6-0000MG-Eb for qemu-devel@nongnu.org; Mon, 13 Apr 2009 10:59:24 -0400 Received: from smtp02.citrix.com ([66.165.176.63]:55462) by monty-python.gnu.org with esmtp (Exim 4.60) (envelope-from ) id 1LtNd5-0004gk-SS for qemu-devel@nongnu.org; Mon, 13 Apr 2009 10:59:24 -0400 Received: from [10.80.3.128] ([10.80.3.128]) by smtp01.ad.xensource.com (8.13.1/8.13.1) with ESMTP id n3DExHmp022378 for ; Mon, 13 Apr 2009 07:59:18 -0700 Message-ID: <49E35345.1090004@eu.citrix.com> Date: Mon, 13 Apr 2009 15:59:17 +0100 From: Stefano Stabellini MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 7bit Subject: [Qemu-devel] [PATCH] implement qemu_blockalign Reply-To: qemu-devel@nongnu.org List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: "qemu-devel@nongnu.org" Hi all, this patch adds a buffer_alignment field to BlockDriverState and implements a qemu_blockalign function that uses that field to allocate a memory aligned buffer to be used by the block driver. buffer_alignment is initialized to 512 but each block driver can set a different value (at the moment none of them do). This patch modifies ide.c, block-qcow.c, block-qcow2.c and block.c to use qemu_blockalign instead of qemu_memalign. There is only one place left that still uses qemu_memalign to allocate buffers used by block drivers that is posix-aio-compat:handle_aiocb_rw because it is not possible to get the BlockDriverState from that function. However I think it is not important because posix-aio-compat already deals with driver specific code so it is supposed to know its own needs. Signed-off-by: Stefano Stabellini --- diff --git a/block-qcow.c b/block-qcow.c index b60f4c1..b1174cb 100644 --- a/block-qcow.c +++ b/block-qcow.c @@ -641,7 +641,7 @@ static BlockDriverAIOCB *qcow_aio_readv(BlockDriverState *bs, acb->sector_num = sector_num; acb->qiov = qiov; if (qiov->niov > 1) - acb->buf = acb->orig_buf = qemu_memalign(512, qiov->size); + acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size); else acb->buf = qiov->iov->iov_base; acb->nb_sectors = nb_sectors; @@ -736,7 +736,7 @@ static BlockDriverAIOCB *qcow_aio_writev(BlockDriverState *bs, acb->sector_num = sector_num; acb->qiov = qiov; if (qiov->niov > 1) { - acb->buf = acb->orig_buf = qemu_memalign(512, qiov->size); + acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size); qemu_iovec_to_buffer(qiov, acb->buf); } else acb->buf = qiov->iov->iov_base; diff --git a/block-qcow2.c b/block-qcow2.c index 3bd38b0..795bcdf 100644 --- a/block-qcow2.c +++ b/block-qcow2.c @@ -1414,7 +1414,7 @@ static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs, acb->sector_num = sector_num; acb->qiov = qiov; if (qiov->niov > 1) { - acb->buf = acb->orig_buf = qemu_memalign(512, qiov->size); + acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size); if (is_write) qemu_iovec_to_buffer(qiov, acb->buf); } else diff --git a/block-raw-posix.c b/block-raw-posix.c index 822839f..0663c06 100644 --- a/block-raw-posix.c +++ b/block-raw-posix.c @@ -165,7 +165,7 @@ static int raw_open(BlockDriverState *bs, const char *filename, int flags) s->fd = fd; s->aligned_buf = NULL; if ((flags & BDRV_O_NOCACHE)) { - s->aligned_buf = qemu_memalign(512, ALIGNED_BUFFER_SIZE); + s->aligned_buf = qemu_blockalign(bs, ALIGNED_BUFFER_SIZE); if (s->aligned_buf == NULL) { ret = -errno; close(fd); diff --git a/block.c b/block.c index 74d19ad..7c9c304 100644 --- a/block.c +++ b/block.c @@ -362,6 +362,8 @@ int bdrv_open2(BlockDriverState *bs, const char *filename, int flags, bs->is_temporary = 0; bs->encrypted = 0; bs->valid_key = 0; + /* buffer_alignment defaulted to 512, drivers can change this value */ + bs->buffer_alignment = 512; if (flags & BDRV_O_SNAPSHOT) { BlockDriverState *bs1; @@ -1376,7 +1378,7 @@ static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, acb = qemu_aio_get(bs, cb, opaque); acb->is_write = is_write; acb->qiov = qiov; - acb->bounce = qemu_memalign(512, qiov->size); + acb->bounce = qemu_blockalign(bs, qiov->size); if (!acb->bh) acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb); @@ -1626,3 +1628,8 @@ BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); return NULL; } + +void *qemu_blockalign(BlockDriverState *bs, size_t size) +{ + return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size); +} diff --git a/block_int.h b/block_int.h index 3e78997..756436f 100644 --- a/block_int.h +++ b/block_int.h @@ -142,6 +142,9 @@ struct BlockDriverState { /* Whether the disk can expand beyond total_sectors */ int growable; + /* the memory alignment required for the buffers handled by this driver */ + int buffer_alignment; + /* NOTE: the following infos are only hints for real hardware drivers. They are not used by the block driver */ int cyls, heads, secs, translation; @@ -170,6 +173,8 @@ void *qemu_aio_get_pool(AIOPool *pool, BlockDriverState *bs, BlockDriverCompletionFunc *cb, void *opaque); void qemu_aio_release(void *p); +void *qemu_blockalign(BlockDriverState *bs, size_t size); + extern BlockDriverState *bdrv_first; #endif /* BLOCK_INT_H */ diff --git a/hw/ide.c b/hw/ide.c index f187546..345ee61 100644 --- a/hw/ide.c +++ b/hw/ide.c @@ -2788,11 +2788,11 @@ static void ide_init2(IDEState *ide_state, for(i = 0; i < 2; i++) { s = ide_state + i; - s->io_buffer = qemu_memalign(512, IDE_DMA_BUF_SECTORS*512 + 4); if (i == 0) s->bs = hd0; else s->bs = hd1; + s->io_buffer = qemu_blockalign(s->bs, IDE_DMA_BUF_SECTORS*512 + 4); if (s->bs) { bdrv_get_geometry(s->bs, &nb_sectors); bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs);