From: "Michael S. Tsirkin" <mst@redhat.com>
To: Asias He <asias@redhat.com>
Cc: kvm@vger.kernel.org, virtualization@lists.linux-foundation.org,
target-devel@vger.kernel.org,
Stefan Hajnoczi <stefanha@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>
Subject: Re: [PATCH v9 1/3] tcm_vhost: Refactor the lock nesting rule
Date: Thu, 25 Apr 2013 08:44:58 +0300 [thread overview]
Message-ID: <20130425054458.GA6710@redhat.com> (raw)
In-Reply-To: <1366859406-8963-2-git-send-email-asias@redhat.com>
On Thu, Apr 25, 2013 at 11:10:04AM +0800, Asias He wrote:
> We want to use tcm_vhost_mutex to make sure hotplug/hotunplug will not
> happen when set_endpoint/clear_endpoint is in process.
>
> Signed-off-by: Asias He <asias@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
> ---
> drivers/vhost/tcm_vhost.c | 32 +++++++++++++++++++-------------
> 1 file changed, 19 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> index 957a0b9..822cd1f 100644
> --- a/drivers/vhost/tcm_vhost.c
> +++ b/drivers/vhost/tcm_vhost.c
> @@ -808,6 +808,9 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
> /*
> * Called from vhost_scsi_ioctl() context to walk the list of available
> * tcm_vhost_tpg with an active struct tcm_vhost_nexus
> + *
> + * The lock nesting rule is:
> + * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
> */
> static int vhost_scsi_set_endpoint(
> struct vhost_scsi *vs,
> @@ -820,26 +823,27 @@ static int vhost_scsi_set_endpoint(
> int index, ret, i, len;
> bool match = false;
>
> + mutex_lock(&tcm_vhost_mutex);
> mutex_lock(&vs->dev.mutex);
> +
> /* Verify that ring has been setup correctly. */
> for (index = 0; index < vs->dev.nvqs; ++index) {
> /* Verify that ring has been setup correctly. */
> if (!vhost_vq_access_ok(&vs->vqs[index])) {
> - mutex_unlock(&vs->dev.mutex);
> - return -EFAULT;
> + ret = -EFAULT;
> + goto out;
> }
> }
>
> len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
> vs_tpg = kzalloc(len, GFP_KERNEL);
> if (!vs_tpg) {
> - mutex_unlock(&vs->dev.mutex);
> - return -ENOMEM;
> + ret = -ENOMEM;
> + goto out;
> }
> if (vs->vs_tpg)
> memcpy(vs_tpg, vs->vs_tpg, len);
>
> - mutex_lock(&tcm_vhost_mutex);
> list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
> mutex_lock(&tv_tpg->tv_tpg_mutex);
> if (!tv_tpg->tpg_nexus) {
> @@ -854,11 +858,10 @@ static int vhost_scsi_set_endpoint(
>
> if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
> if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
> - mutex_unlock(&tv_tpg->tv_tpg_mutex);
> - mutex_unlock(&tcm_vhost_mutex);
> - mutex_unlock(&vs->dev.mutex);
> kfree(vs_tpg);
> - return -EEXIST;
> + mutex_unlock(&tv_tpg->tv_tpg_mutex);
> + ret = -EEXIST;
> + goto out;
> }
> tv_tpg->tv_tpg_vhost_count++;
> vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
> @@ -867,7 +870,6 @@ static int vhost_scsi_set_endpoint(
> }
> mutex_unlock(&tv_tpg->tv_tpg_mutex);
> }
> - mutex_unlock(&tcm_vhost_mutex);
>
> if (match) {
> memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
> @@ -893,7 +895,9 @@ static int vhost_scsi_set_endpoint(
> kfree(vs->vs_tpg);
> vs->vs_tpg = vs_tpg;
>
> +out:
> mutex_unlock(&vs->dev.mutex);
> + mutex_unlock(&tcm_vhost_mutex);
> return ret;
> }
>
> @@ -908,6 +912,7 @@ static int vhost_scsi_clear_endpoint(
> int index, ret, i;
> u8 target;
>
> + mutex_lock(&tcm_vhost_mutex);
> mutex_lock(&vs->dev.mutex);
> /* Verify that ring has been setup correctly. */
> for (index = 0; index < vs->dev.nvqs; ++index) {
> @@ -918,8 +923,8 @@ static int vhost_scsi_clear_endpoint(
> }
>
> if (!vs->vs_tpg) {
> - mutex_unlock(&vs->dev.mutex);
> - return 0;
> + ret = 0;
> + goto err_dev;
> }
>
> for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
> @@ -965,13 +970,14 @@ static int vhost_scsi_clear_endpoint(
> kfree(vs->vs_tpg);
> vs->vs_tpg = NULL;
> mutex_unlock(&vs->dev.mutex);
> -
> + mutex_unlock(&tcm_vhost_mutex);
> return 0;
>
> err_tpg:
> mutex_unlock(&tv_tpg->tv_tpg_mutex);
> err_dev:
> mutex_unlock(&vs->dev.mutex);
> + mutex_unlock(&tcm_vhost_mutex);
> return ret;
> }
>
> --
> 1.8.1.4
next prev parent reply other threads:[~2013-04-25 5:44 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-04-25 3:10 [PATCH v9 0/3] tcm_vhost hotplug Asias He
2013-04-25 3:10 ` [PATCH v9 1/3] tcm_vhost: Refactor the lock nesting rule Asias He
2013-04-25 5:44 ` Michael S. Tsirkin [this message]
2013-04-25 3:10 ` [PATCH v9 2/3] tcm_vhost: Add hotplug/hotunplug support Asias He
2013-04-25 5:52 ` Michael S. Tsirkin
2013-04-25 3:10 ` [PATCH v9 3/3] tcm_vhost: Add ioctl to get and set events missed flag Asias He
[not found] ` <1366859406-8963-4-git-send-email-asias@redhat.com>
2013-04-25 5:54 ` Michael S. Tsirkin
2013-04-25 5:55 ` Michael S. Tsirkin
2013-04-25 5:55 ` [PATCH v9 0/3] tcm_vhost hotplug Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20130425054458.GA6710@redhat.com \
--to=mst@redhat.com \
--cc=asias@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=stefanha@redhat.com \
--cc=target-devel@vger.kernel.org \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).