From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:54232) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1dEA3r-0004Rj-LD for qemu-devel@nongnu.org; Fri, 26 May 2017 03:53:28 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1dEA3o-0006DM-SR for qemu-devel@nongnu.org; Fri, 26 May 2017 03:53:27 -0400 Received: from mx1.redhat.com ([209.132.183.28]:38258) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1dEA3o-0006D6-Iu for qemu-devel@nongnu.org; Fri, 26 May 2017 03:53:24 -0400 From: Fam Zheng Date: Fri, 26 May 2017 15:52:36 +0800 Message-Id: <20170526075246.20265-13-famz@redhat.com> In-Reply-To: <20170526075246.20265-1-famz@redhat.com> References: <20170526075246.20265-1-famz@redhat.com> Subject: [Qemu-devel] [PULL 12/22] throttle-groups: do not use qemu_co_enter_next List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: peter.maydell@linaro.org, Stefan Hajnoczi From: Paolo Bonzini Prepare for removing this function; always restart throttled requests from coroutine context. This will matter when restarting throttled requests will have to acquire a CoMutex. Reviewed-by: Alberto Garcia Reviewed-by: Stefan Hajnoczi Signed-off-by: Paolo Bonzini Message-Id: <20170525163225.29954-9-pbonzini@redhat.com> Signed-off-by: Fam Zheng --- block/throttle-groups.c | 42 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/block/throttle-groups.c b/block/throttle-groups.c index 85169ec..8bf1031 100644 --- a/block/throttle-groups.c +++ b/block/throttle-groups.c @@ -260,6 +260,20 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write) return must_wait; } +/* Start the next pending I/O request for a BlockBackend. Return whether + * any request was actually pending. + * + * @blk: the current BlockBackend + * @is_write: the type of operation (read/write) + */ +static bool coroutine_fn throttle_group_co_restart_queue(BlockBackend *blk, + bool is_write) +{ + BlockBackendPublic *blkp = blk_get_public(blk); + + return qemu_co_queue_next(&blkp->throttled_reqs[is_write]); +} + /* Look for the next pending I/O request and schedule it. * * This assumes that tg->lock is held. @@ -287,7 +301,7 @@ static void schedule_next_request(BlockBackend *blk, bool is_write) if (!must_wait) { /* Give preference to requests from the current blk */ if (qemu_in_coroutine() && - qemu_co_queue_next(&blkp->throttled_reqs[is_write])) { + throttle_group_co_restart_queue(blk, is_write)) { token = blk; } else { ThrottleTimers *tt = &blk_get_public(token)->throttle_timers; @@ -340,15 +354,21 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk, qemu_mutex_unlock(&tg->lock); } -static void throttle_group_restart_queue(BlockBackend *blk, bool is_write) +typedef struct { + BlockBackend *blk; + bool is_write; +} RestartData; + +static void coroutine_fn throttle_group_restart_queue_entry(void *opaque) { + RestartData *data = opaque; + BlockBackend *blk = data->blk; + bool is_write = data->is_write; BlockBackendPublic *blkp = blk_get_public(blk); ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); bool empty_queue; - aio_context_acquire(blk_get_aio_context(blk)); - empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]); - aio_context_release(blk_get_aio_context(blk)); + empty_queue = !throttle_group_co_restart_queue(blk, is_write); /* If the request queue was empty then we have to take care of * scheduling the next one */ @@ -359,6 +379,18 @@ static void throttle_group_restart_queue(BlockBackend *blk, bool is_write) } } +static void throttle_group_restart_queue(BlockBackend *blk, bool is_write) +{ + Coroutine *co; + RestartData rd = { + .blk = blk, + .is_write = is_write + }; + + co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd); + aio_co_enter(blk_get_aio_context(blk), co); +} + void throttle_group_restart_blk(BlockBackend *blk) { BlockBackendPublic *blkp = blk_get_public(blk); -- 2.9.4