* [Qemu-devel] [PATCH v2 0/2] fix max_discard for NBD/gluster block drivers
@ 2015-02-02 18:29 Denis V. Lunev
2015-02-02 18:29 ` [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard Denis V. Lunev
2015-02-02 18:29 ` [Qemu-devel] [PATCH 2/2] nbd: " Denis V. Lunev
0 siblings, 2 replies; 8+ messages in thread
From: Denis V. Lunev @ 2015-02-02 18:29 UTC (permalink / raw)
Cc: Kevin Wolf, Denis V. Lunev, Peter Lieven, qemu-devel
Changes from v1:
- switched to UINT32_MAX for NBD
- limited max_discard for gluster to SIZE_MAX on i686 only
Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Kevin Wolf <kwolf@redhat.com>
CC: Peter Lieven <pl@kamp.de>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard
2015-02-02 18:29 [Qemu-devel] [PATCH v2 0/2] fix max_discard for NBD/gluster block drivers Denis V. Lunev
@ 2015-02-02 18:29 ` Denis V. Lunev
2015-02-02 19:58 ` Peter Lieven
2015-02-02 18:29 ` [Qemu-devel] [PATCH 2/2] nbd: " Denis V. Lunev
1 sibling, 1 reply; 8+ messages in thread
From: Denis V. Lunev @ 2015-02-02 18:29 UTC (permalink / raw)
Cc: Kevin Wolf, Denis V. Lunev, Peter Lieven, qemu-devel
qemu_gluster_co_discard calculates size to discard as follows
size_t size = nb_sectors * BDRV_SECTOR_SIZE;
ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
glfs_discard_async is declared as follows:
int glfs_discard_async (glfs_fd_t *fd, off_t length, size_t lent,
glfs_io_cbk fn, void *data) __THROW
This is problematic on i686 as sizeof(size_t) == 4.
Set bl_max_discard to SIZE_MAX >> BDRV_SECTOR_BITS to avoid overflow
on i386.
Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Kevin Wolf <kwolf@redhat.com>
CC: Peter Lieven <pl@kamp.de>
---
block/gluster.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/block/gluster.c b/block/gluster.c
index 1eb3a8c..47bf92d 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -622,6 +622,13 @@ out:
return ret;
}
+static void qemu_gluster_refresh_limits(BlockDriverState *bs, Error **errp)
+{
+#if SIZE_MAX == UINT_MAX
+ bs->bl.max_discard = SIZE_MAX >> BDRV_SECTOR_BITS;
+#endif
+}
+
#ifdef CONFIG_GLUSTERFS_DISCARD
static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
int64_t sector_num, int nb_sectors)
@@ -735,6 +742,7 @@ static BlockDriver bdrv_gluster = {
#ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_co_discard = qemu_gluster_co_discard,
#endif
+ .bdrv_refresh_limits = qemu_gluster_refresh_limits,
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
@@ -762,6 +770,7 @@ static BlockDriver bdrv_gluster_tcp = {
#ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_co_discard = qemu_gluster_co_discard,
#endif
+ .bdrv_refresh_limits = qemu_gluster_refresh_limits,
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
@@ -789,6 +798,7 @@ static BlockDriver bdrv_gluster_unix = {
#ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_co_discard = qemu_gluster_co_discard,
#endif
+ .bdrv_refresh_limits = qemu_gluster_refresh_limits,
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
@@ -816,6 +826,7 @@ static BlockDriver bdrv_gluster_rdma = {
#ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_co_discard = qemu_gluster_co_discard,
#endif
+ .bdrv_refresh_limits = qemu_gluster_refresh_limits,
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
--
1.9.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard
2015-02-02 18:29 ` [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard Denis V. Lunev
@ 2015-02-02 19:58 ` Peter Lieven
2015-02-02 20:09 ` Denis V. Lunev
0 siblings, 1 reply; 8+ messages in thread
From: Peter Lieven @ 2015-02-02 19:58 UTC (permalink / raw)
To: Denis V. Lunev; +Cc: Kevin Wolf, qemu-devel
Am 02.02.2015 um 19:29 schrieb Denis V. Lunev:
> qemu_gluster_co_discard calculates size to discard as follows
> size_t size = nb_sectors * BDRV_SECTOR_SIZE;
> ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
>
> glfs_discard_async is declared as follows:
> int glfs_discard_async (glfs_fd_t *fd, off_t length, size_t lent,
> glfs_io_cbk fn, void *data) __THROW
> This is problematic on i686 as sizeof(size_t) == 4.
>
> Set bl_max_discard to SIZE_MAX >> BDRV_SECTOR_BITS to avoid overflow
> on i386.
>
> Signed-off-by: Denis V. Lunev <den@openvz.org>
> CC: Kevin Wolf <kwolf@redhat.com>
> CC: Peter Lieven <pl@kamp.de>
> ---
> block/gluster.c | 11 +++++++++++
> 1 file changed, 11 insertions(+)
>
> diff --git a/block/gluster.c b/block/gluster.c
> index 1eb3a8c..47bf92d 100644
> --- a/block/gluster.c
> +++ b/block/gluster.c
> @@ -622,6 +622,13 @@ out:
> return ret;
> }
>
> +static void qemu_gluster_refresh_limits(BlockDriverState *bs, Error **errp)
> +{
> +#if SIZE_MAX == UINT_MAX
> + bs->bl.max_discard = SIZE_MAX >> BDRV_SECTOR_BITS;
> +#endif
I would write:
bs->bl.max_discard = MIN(SIZE_MAX >> BDRV_SECTOR_BITS, INT_MAX);
without the condition.
Peter
> +}
> +
> #ifdef CONFIG_GLUSTERFS_DISCARD
> static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
> int64_t sector_num, int nb_sectors)
> @@ -735,6 +742,7 @@ static BlockDriver bdrv_gluster = {
> #ifdef CONFIG_GLUSTERFS_DISCARD
> .bdrv_co_discard = qemu_gluster_co_discard,
> #endif
> + .bdrv_refresh_limits = qemu_gluster_refresh_limits,
> #ifdef CONFIG_GLUSTERFS_ZEROFILL
> .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
> #endif
> @@ -762,6 +770,7 @@ static BlockDriver bdrv_gluster_tcp = {
> #ifdef CONFIG_GLUSTERFS_DISCARD
> .bdrv_co_discard = qemu_gluster_co_discard,
> #endif
> + .bdrv_refresh_limits = qemu_gluster_refresh_limits,
> #ifdef CONFIG_GLUSTERFS_ZEROFILL
> .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
> #endif
> @@ -789,6 +798,7 @@ static BlockDriver bdrv_gluster_unix = {
> #ifdef CONFIG_GLUSTERFS_DISCARD
> .bdrv_co_discard = qemu_gluster_co_discard,
> #endif
> + .bdrv_refresh_limits = qemu_gluster_refresh_limits,
> #ifdef CONFIG_GLUSTERFS_ZEROFILL
> .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
> #endif
> @@ -816,6 +826,7 @@ static BlockDriver bdrv_gluster_rdma = {
> #ifdef CONFIG_GLUSTERFS_DISCARD
> .bdrv_co_discard = qemu_gluster_co_discard,
> #endif
> + .bdrv_refresh_limits = qemu_gluster_refresh_limits,
> #ifdef CONFIG_GLUSTERFS_ZEROFILL
> .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
> #endif
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard
2015-02-02 19:58 ` Peter Lieven
@ 2015-02-02 20:09 ` Denis V. Lunev
0 siblings, 0 replies; 8+ messages in thread
From: Denis V. Lunev @ 2015-02-02 20:09 UTC (permalink / raw)
To: Peter Lieven; +Cc: Kevin Wolf, qemu-devel
On 02/02/15 22:58, Peter Lieven wrote:
> Am 02.02.2015 um 19:29 schrieb Denis V. Lunev:
>> qemu_gluster_co_discard calculates size to discard as follows
>> size_t size = nb_sectors * BDRV_SECTOR_SIZE;
>> ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
>>
>> glfs_discard_async is declared as follows:
>> int glfs_discard_async (glfs_fd_t *fd, off_t length, size_t lent,
>> glfs_io_cbk fn, void *data) __THROW
>> This is problematic on i686 as sizeof(size_t) == 4.
>>
>> Set bl_max_discard to SIZE_MAX >> BDRV_SECTOR_BITS to avoid overflow
>> on i386.
>>
>> Signed-off-by: Denis V. Lunev <den@openvz.org>
>> CC: Kevin Wolf <kwolf@redhat.com>
>> CC: Peter Lieven <pl@kamp.de>
>> ---
>> block/gluster.c | 11 +++++++++++
>> 1 file changed, 11 insertions(+)
>>
>> diff --git a/block/gluster.c b/block/gluster.c
>> index 1eb3a8c..47bf92d 100644
>> --- a/block/gluster.c
>> +++ b/block/gluster.c
>> @@ -622,6 +622,13 @@ out:
>> return ret;
>> }
>>
>> +static void qemu_gluster_refresh_limits(BlockDriverState *bs, Error **errp)
>> +{
>> +#if SIZE_MAX == UINT_MAX
>> + bs->bl.max_discard = SIZE_MAX >> BDRV_SECTOR_BITS;
>> +#endif
> I would write:
>
> bs->bl.max_discard = MIN(SIZE_MAX >> BDRV_SECTOR_BITS, INT_MAX);
>
> without the condition.
>
>
> Peter
>
ok, respinned
Den
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 2/2] nbd: fix max_discard
2015-02-02 18:29 [Qemu-devel] [PATCH v2 0/2] fix max_discard for NBD/gluster block drivers Denis V. Lunev
2015-02-02 18:29 ` [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard Denis V. Lunev
@ 2015-02-02 18:29 ` Denis V. Lunev
2015-02-02 19:55 ` Peter Lieven
1 sibling, 1 reply; 8+ messages in thread
From: Denis V. Lunev @ 2015-02-02 18:29 UTC (permalink / raw)
Cc: Kevin Wolf, Denis V. Lunev, Peter Lieven, qemu-devel
nbd_co_discard calls nbd_client_session_co_discard which uses uint32_t
as the length in bytes of the data to discard due to the following
definition:
struct nbd_request {
uint32_t magic;
uint32_t type;
uint64_t handle;
uint64_t from;
uint32_t len; <-- the length of data to be discarded, in bytes
} QEMU_PACKED;
Thus we should limit bl_max_discard to UINT32_MAX >> BDRV_SECTOR_BITS to
avoid overflow.
Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Kevin Wolf <kwolf@redhat.com>
CC: Peter Lieven <pl@kamp.de>
---
block/nbd.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/block/nbd.c b/block/nbd.c
index 04cc845..99af713 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -301,6 +301,11 @@ static int nbd_co_flush(BlockDriverState *bs)
return nbd_client_session_co_flush(&s->client);
}
+static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
+{
+ bs->bl.max_discard = UINT32_MAX >> BDRV_SECTOR_BITS;
+}
+
static int nbd_co_discard(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
@@ -396,6 +401,7 @@ static BlockDriver bdrv_nbd = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
@@ -413,6 +419,7 @@ static BlockDriver bdrv_nbd_tcp = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
@@ -430,6 +437,7 @@ static BlockDriver bdrv_nbd_unix = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
--
1.9.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 2/2] nbd: fix max_discard
2015-02-02 18:29 ` [Qemu-devel] [PATCH 2/2] nbd: " Denis V. Lunev
@ 2015-02-02 19:55 ` Peter Lieven
0 siblings, 0 replies; 8+ messages in thread
From: Peter Lieven @ 2015-02-02 19:55 UTC (permalink / raw)
To: Denis V. Lunev; +Cc: Kevin Wolf, qemu-devel
Am 02.02.2015 um 19:29 schrieb Denis V. Lunev:
> nbd_co_discard calls nbd_client_session_co_discard which uses uint32_t
> as the length in bytes of the data to discard due to the following
> definition:
>
> struct nbd_request {
> uint32_t magic;
> uint32_t type;
> uint64_t handle;
> uint64_t from;
> uint32_t len; <-- the length of data to be discarded, in bytes
> } QEMU_PACKED;
>
> Thus we should limit bl_max_discard to UINT32_MAX >> BDRV_SECTOR_BITS to
> avoid overflow.
>
> Signed-off-by: Denis V. Lunev <den@openvz.org>
> CC: Kevin Wolf <kwolf@redhat.com>
> CC: Peter Lieven <pl@kamp.de>
> ---
> block/nbd.c | 8 ++++++++
> 1 file changed, 8 insertions(+)
>
> diff --git a/block/nbd.c b/block/nbd.c
> index 04cc845..99af713 100644
> --- a/block/nbd.c
> +++ b/block/nbd.c
> @@ -301,6 +301,11 @@ static int nbd_co_flush(BlockDriverState *bs)
> return nbd_client_session_co_flush(&s->client);
> }
>
> +static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
> +{
> + bs->bl.max_discard = UINT32_MAX >> BDRV_SECTOR_BITS;
> +}
> +
> static int nbd_co_discard(BlockDriverState *bs, int64_t sector_num,
> int nb_sectors)
> {
> @@ -396,6 +401,7 @@ static BlockDriver bdrv_nbd = {
> .bdrv_close = nbd_close,
> .bdrv_co_flush_to_os = nbd_co_flush,
> .bdrv_co_discard = nbd_co_discard,
> + .bdrv_refresh_limits = nbd_refresh_limits,
> .bdrv_getlength = nbd_getlength,
> .bdrv_detach_aio_context = nbd_detach_aio_context,
> .bdrv_attach_aio_context = nbd_attach_aio_context,
> @@ -413,6 +419,7 @@ static BlockDriver bdrv_nbd_tcp = {
> .bdrv_close = nbd_close,
> .bdrv_co_flush_to_os = nbd_co_flush,
> .bdrv_co_discard = nbd_co_discard,
> + .bdrv_refresh_limits = nbd_refresh_limits,
> .bdrv_getlength = nbd_getlength,
> .bdrv_detach_aio_context = nbd_detach_aio_context,
> .bdrv_attach_aio_context = nbd_attach_aio_context,
> @@ -430,6 +437,7 @@ static BlockDriver bdrv_nbd_unix = {
> .bdrv_close = nbd_close,
> .bdrv_co_flush_to_os = nbd_co_flush,
> .bdrv_co_discard = nbd_co_discard,
> + .bdrv_refresh_limits = nbd_refresh_limits,
> .bdrv_getlength = nbd_getlength,
> .bdrv_detach_aio_context = nbd_detach_aio_context,
> .bdrv_attach_aio_context = nbd_attach_aio_context,
Reviewed-by: Peter Lieven <pl@kamp.de>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH v3 0/2] fix max_discard for NBD/gluster block drivers
@ 2015-02-02 20:09 Denis V. Lunev
2015-02-02 20:09 ` [Qemu-devel] [PATCH 2/2] nbd: fix max_discard Denis V. Lunev
0 siblings, 1 reply; 8+ messages in thread
From: Denis V. Lunev @ 2015-02-02 20:09 UTC (permalink / raw)
Cc: Kevin Wolf, Denis V. Lunev, Peter Lieven, qemu-devel
Changes from v2:
- dropped conditional in patch gluster code
Changes from v1:
- switched to UINT32_MAX for NBD
- limited max_discard for gluster to SIZE_MAX on i686 only
Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Kevin Wolf <kwolf@redhat.com>
CC: Peter Lieven <pl@kamp.de>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 2/2] nbd: fix max_discard
2015-02-02 20:09 [Qemu-devel] [PATCH v3 0/2] fix max_discard for NBD/gluster block drivers Denis V. Lunev
@ 2015-02-02 20:09 ` Denis V. Lunev
0 siblings, 0 replies; 8+ messages in thread
From: Denis V. Lunev @ 2015-02-02 20:09 UTC (permalink / raw)
Cc: Kevin Wolf, Denis V. Lunev, qemu-devel
nbd_co_discard calls nbd_client_session_co_discard which uses uint32_t
as the length in bytes of the data to discard due to the following
definition:
struct nbd_request {
uint32_t magic;
uint32_t type;
uint64_t handle;
uint64_t from;
uint32_t len; <-- the length of data to be discarded, in bytes
} QEMU_PACKED;
Thus we should limit bl_max_discard to UINT32_MAX >> BDRV_SECTOR_BITS to
avoid overflow.
Signed-off-by: Denis V. Lunev <den@openvz.org>
Reviewed-by: Peter Lieven <pl@kamp.de>
CC: Kevin Wolf <kwolf@redhat.com>
---
block/nbd.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/block/nbd.c b/block/nbd.c
index 04cc845..99af713 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -301,6 +301,11 @@ static int nbd_co_flush(BlockDriverState *bs)
return nbd_client_session_co_flush(&s->client);
}
+static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
+{
+ bs->bl.max_discard = UINT32_MAX >> BDRV_SECTOR_BITS;
+}
+
static int nbd_co_discard(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
@@ -396,6 +401,7 @@ static BlockDriver bdrv_nbd = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
@@ -413,6 +419,7 @@ static BlockDriver bdrv_nbd_tcp = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
@@ -430,6 +437,7 @@ static BlockDriver bdrv_nbd_unix = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
--
1.9.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH] block: change default for discard and write zeroes to INT_MAX
@ 2015-02-02 14:48 Peter Lieven
2015-02-02 16:23 ` [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard Denis V. Lunev
0 siblings, 1 reply; 8+ messages in thread
From: Peter Lieven @ 2015-02-02 14:48 UTC (permalink / raw)
To: qemu-devel; +Cc: kwolf, den, Peter Lieven
do not trim requests if the driver does not supply a limit
through BlockLimits. For write zeroes we still keep a limit
for the unsupported path to avoid allocating a big bounce buffer.
Suggested-by: Kevin Wolf <kwolf@redhat.com>
Suggested-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: Peter Lieven <pl@kamp.de>
---
block.c | 16 ++++------------
1 file changed, 4 insertions(+), 12 deletions(-)
diff --git a/block.c b/block.c
index d45e4dd..ee7ff2c 100644
--- a/block.c
+++ b/block.c
@@ -3192,10 +3192,7 @@ int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
BDRV_REQ_COPY_ON_READ);
}
-/* if no limit is specified in the BlockLimits use a default
- * of 32768 512-byte sectors (16 MiB) per request.
- */
-#define MAX_WRITE_ZEROES_DEFAULT 32768
+#define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
@@ -3206,7 +3203,7 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
int ret = 0;
int max_write_zeroes = bs->bl.max_write_zeroes ?
- bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
+ bs->bl.max_write_zeroes : INT_MAX;
while (nb_sectors > 0 && !ret) {
int num = nb_sectors;
@@ -3242,7 +3239,7 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
if (ret == -ENOTSUP) {
/* Fall back to bounce buffer if write zeroes is unsupported */
int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
- MAX_WRITE_ZEROES_DEFAULT);
+ MAX_WRITE_ZEROES_BOUNCE_BUFFER);
num = MIN(num, max_xfer_len);
iov.iov_len = num * BDRV_SECTOR_SIZE;
if (iov.iov_base == NULL) {
@@ -5097,11 +5094,6 @@ static void coroutine_fn bdrv_discard_co_entry(void *opaque)
rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
}
-/* if no limit is specified in the BlockLimits use a default
- * of 32768 512-byte sectors (16 MiB) per request.
- */
-#define MAX_DISCARD_DEFAULT 32768
-
int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
@@ -5126,7 +5118,7 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
return 0;
}
- max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
+ max_discard = bs->bl.max_discard ? bs->bl.max_discard : INT_MAX;
while (nb_sectors > 0) {
int ret;
int num = nb_sectors;
--
1.9.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard
2015-02-02 14:48 [Qemu-devel] [PATCH] block: change default for discard and write zeroes to INT_MAX Peter Lieven
@ 2015-02-02 16:23 ` Denis V. Lunev
2015-02-02 16:23 ` [Qemu-devel] [PATCH 2/2] nbd: " Denis V. Lunev
0 siblings, 1 reply; 8+ messages in thread
From: Denis V. Lunev @ 2015-02-02 16:23 UTC (permalink / raw)
Cc: Kevin Wolf, Denis V. Lunev, Peter Lieven, qemu-devel
qemu_gluster_co_discard calculates size to discard as follows
size_t size = nb_sectors * BDRV_SECTOR_SIZE;
ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
glfs_discard_async is declared as follows:
int glfs_discard_async (glfs_fd_t *fd, off_t length, size_t lent,
glfs_io_cbk fn, void *data) __THROW
This is problematic on i686 as sizeof(size_t) == 4.
Set bl_max_discard to INT_MAX >> BDRV_SECTOR_BITS to avoid overflow.
Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Kevin Wolf <kwolf@redhat.com>
CC: Peter Lieven <pl@kamp.de>
---
block/gluster.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/block/gluster.c b/block/gluster.c
index 1eb3a8c..ed09691 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -622,6 +622,11 @@ out:
return ret;
}
+static void qemu_gluster_refresh_limits(BlockDriverState *bs, Error **errp)
+{
+ bs->bl.max_discard = INT_MAX >> BDRV_SECTOR_BITS;
+}
+
#ifdef CONFIG_GLUSTERFS_DISCARD
static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
int64_t sector_num, int nb_sectors)
@@ -735,6 +740,7 @@ static BlockDriver bdrv_gluster = {
#ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_co_discard = qemu_gluster_co_discard,
#endif
+ .bdrv_refresh_limits = qemu_gluster_refresh_limits,
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
@@ -762,6 +768,7 @@ static BlockDriver bdrv_gluster_tcp = {
#ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_co_discard = qemu_gluster_co_discard,
#endif
+ .bdrv_refresh_limits = qemu_gluster_refresh_limits,
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
@@ -789,6 +796,7 @@ static BlockDriver bdrv_gluster_unix = {
#ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_co_discard = qemu_gluster_co_discard,
#endif
+ .bdrv_refresh_limits = qemu_gluster_refresh_limits,
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
@@ -816,6 +824,7 @@ static BlockDriver bdrv_gluster_rdma = {
#ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_co_discard = qemu_gluster_co_discard,
#endif
+ .bdrv_refresh_limits = qemu_gluster_refresh_limits,
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
--
1.9.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 2/2] nbd: fix max_discard
2015-02-02 16:23 ` [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard Denis V. Lunev
@ 2015-02-02 16:23 ` Denis V. Lunev
0 siblings, 0 replies; 8+ messages in thread
From: Denis V. Lunev @ 2015-02-02 16:23 UTC (permalink / raw)
Cc: Kevin Wolf, Denis V. Lunev, Peter Lieven, qemu-devel
nbd_co_discard calls nbd_client_session_co_discard which uses uint32_t
as the length in bytes of the data to discard due to the following
definition:
struct nbd_request {
uint32_t magic;
uint32_t type;
uint64_t handle;
uint64_t from;
uint32_t len; <-- the length of data to be discarded, in bytes
} QEMU_PACKED;
Thus we should limit bl_max_discard to INT_MAX >> BDRV_SECTOR_BITS to avoid
overflow.
Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Kevin Wolf <kwolf@redhat.com>
CC: Peter Lieven <pl@kamp.de>
---
block/nbd.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/block/nbd.c b/block/nbd.c
index 04cc845..008d52e 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -301,6 +301,11 @@ static int nbd_co_flush(BlockDriverState *bs)
return nbd_client_session_co_flush(&s->client);
}
+static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
+{
+ bs->bl.max_discard = INT_MAX >> BDRV_SECTOR_BITS;
+}
+
static int nbd_co_discard(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
@@ -396,6 +401,7 @@ static BlockDriver bdrv_nbd = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
@@ -413,6 +419,7 @@ static BlockDriver bdrv_nbd_tcp = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
@@ -430,6 +437,7 @@ static BlockDriver bdrv_nbd_unix = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
--
1.9.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
end of thread, other threads:[~2015-02-02 20:10 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-02-02 18:29 [Qemu-devel] [PATCH v2 0/2] fix max_discard for NBD/gluster block drivers Denis V. Lunev
2015-02-02 18:29 ` [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard Denis V. Lunev
2015-02-02 19:58 ` Peter Lieven
2015-02-02 20:09 ` Denis V. Lunev
2015-02-02 18:29 ` [Qemu-devel] [PATCH 2/2] nbd: " Denis V. Lunev
2015-02-02 19:55 ` Peter Lieven
-- strict thread matches above, loose matches on Subject: below --
2015-02-02 20:09 [Qemu-devel] [PATCH v3 0/2] fix max_discard for NBD/gluster block drivers Denis V. Lunev
2015-02-02 20:09 ` [Qemu-devel] [PATCH 2/2] nbd: fix max_discard Denis V. Lunev
2015-02-02 14:48 [Qemu-devel] [PATCH] block: change default for discard and write zeroes to INT_MAX Peter Lieven
2015-02-02 16:23 ` [Qemu-devel] [PATCH 1/2] glusterfs: fix max_discard Denis V. Lunev
2015-02-02 16:23 ` [Qemu-devel] [PATCH 2/2] nbd: " Denis V. Lunev
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).