virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v11 0/4] tcm_vhost hotplug
@ 2013-04-25  7:35 Asias He
  2013-04-25  7:35 ` [PATCH v11 1/4] tcm_vhost: Refactor the lock nesting rule Asias He
                   ` (6 more replies)
  0 siblings, 7 replies; 12+ messages in thread
From: Asias He @ 2013-04-25  7:35 UTC (permalink / raw)
  To: Nicholas Bellinger
  Cc: kvm, Michael S. Tsirkin, virtualization, target-devel,
	Stefan Hajnoczi, Paolo Bonzini

Changes in v11
- Drop change log histroy in commit log

Changes in v10
- Drop comments about lun
- Add Enable VIRTIO_SCSI_F_HOTPLUG to this series

Changes in v9
- Drop tcm_vhost_check_feature
- Add Refactor the lock nesting rule to this sereis

Asias He (4):
  tcm_vhost: Refactor the lock nesting rule
  tcm_vhost: Add hotplug/hotunplug support
  tcm_vhost: Add ioctl to get and set events missed flag
  tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG

 drivers/vhost/tcm_vhost.c | 262 +++++++++++++++++++++++++++++++++++++++++++---
 drivers/vhost/tcm_vhost.h |  13 +++
 2 files changed, 259 insertions(+), 16 deletions(-)

-- 
1.8.1.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v11 1/4] tcm_vhost: Refactor the lock nesting rule
  2013-04-25  7:35 [PATCH v11 0/4] tcm_vhost hotplug Asias He
@ 2013-04-25  7:35 ` Asias He
  2013-04-25  7:39   ` Michael S. Tsirkin
  2013-04-25  7:35 ` [PATCH v11 2/4] tcm_vhost: Add hotplug/hotunplug support Asias He
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 12+ messages in thread
From: Asias He @ 2013-04-25  7:35 UTC (permalink / raw)
  To: Nicholas Bellinger
  Cc: kvm, Michael S. Tsirkin, virtualization, target-devel,
	Stefan Hajnoczi, Paolo Bonzini

We want to use tcm_vhost_mutex to make sure hotplug/hotunplug will not
happen when set_endpoint/clear_endpoint is in process.

Signed-off-by: Asias He <asias@redhat.com>
---
 drivers/vhost/tcm_vhost.c | 32 +++++++++++++++++++-------------
 1 file changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 957a0b9..822cd1f 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -808,6 +808,9 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
 /*
  * Called from vhost_scsi_ioctl() context to walk the list of available
  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ *
+ *  The lock nesting rule is:
+ *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
  */
 static int vhost_scsi_set_endpoint(
 	struct vhost_scsi *vs,
@@ -820,26 +823,27 @@ static int vhost_scsi_set_endpoint(
 	int index, ret, i, len;
 	bool match = false;
 
+	mutex_lock(&tcm_vhost_mutex);
 	mutex_lock(&vs->dev.mutex);
+
 	/* Verify that ring has been setup correctly. */
 	for (index = 0; index < vs->dev.nvqs; ++index) {
 		/* Verify that ring has been setup correctly. */
 		if (!vhost_vq_access_ok(&vs->vqs[index])) {
-			mutex_unlock(&vs->dev.mutex);
-			return -EFAULT;
+			ret = -EFAULT;
+			goto out;
 		}
 	}
 
 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
 	vs_tpg = kzalloc(len, GFP_KERNEL);
 	if (!vs_tpg) {
-		mutex_unlock(&vs->dev.mutex);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out;
 	}
 	if (vs->vs_tpg)
 		memcpy(vs_tpg, vs->vs_tpg, len);
 
-	mutex_lock(&tcm_vhost_mutex);
 	list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
 		mutex_lock(&tv_tpg->tv_tpg_mutex);
 		if (!tv_tpg->tpg_nexus) {
@@ -854,11 +858,10 @@ static int vhost_scsi_set_endpoint(
 
 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
 			if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
-				mutex_unlock(&tv_tpg->tv_tpg_mutex);
-				mutex_unlock(&tcm_vhost_mutex);
-				mutex_unlock(&vs->dev.mutex);
 				kfree(vs_tpg);
-				return -EEXIST;
+				mutex_unlock(&tv_tpg->tv_tpg_mutex);
+				ret = -EEXIST;
+				goto out;
 			}
 			tv_tpg->tv_tpg_vhost_count++;
 			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
@@ -867,7 +870,6 @@ static int vhost_scsi_set_endpoint(
 		}
 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
 	}
-	mutex_unlock(&tcm_vhost_mutex);
 
 	if (match) {
 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
@@ -893,7 +895,9 @@ static int vhost_scsi_set_endpoint(
 	kfree(vs->vs_tpg);
 	vs->vs_tpg = vs_tpg;
 
+out:
 	mutex_unlock(&vs->dev.mutex);
+	mutex_unlock(&tcm_vhost_mutex);
 	return ret;
 }
 
@@ -908,6 +912,7 @@ static int vhost_scsi_clear_endpoint(
 	int index, ret, i;
 	u8 target;
 
+	mutex_lock(&tcm_vhost_mutex);
 	mutex_lock(&vs->dev.mutex);
 	/* Verify that ring has been setup correctly. */
 	for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -918,8 +923,8 @@ static int vhost_scsi_clear_endpoint(
 	}
 
 	if (!vs->vs_tpg) {
-		mutex_unlock(&vs->dev.mutex);
-		return 0;
+		ret = 0;
+		goto err_dev;
 	}
 
 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
@@ -965,13 +970,14 @@ static int vhost_scsi_clear_endpoint(
 	kfree(vs->vs_tpg);
 	vs->vs_tpg = NULL;
 	mutex_unlock(&vs->dev.mutex);
-
+	mutex_unlock(&tcm_vhost_mutex);
 	return 0;
 
 err_tpg:
 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
 err_dev:
 	mutex_unlock(&vs->dev.mutex);
+	mutex_unlock(&tcm_vhost_mutex);
 	return ret;
 }
 
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v11 2/4] tcm_vhost: Add hotplug/hotunplug support
  2013-04-25  7:35 [PATCH v11 0/4] tcm_vhost hotplug Asias He
  2013-04-25  7:35 ` [PATCH v11 1/4] tcm_vhost: Refactor the lock nesting rule Asias He
@ 2013-04-25  7:35 ` Asias He
  2013-04-25  7:35 ` [PATCH v11 3/4] tcm_vhost: Add ioctl to get and set events missed flag Asias He
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Asias He @ 2013-04-25  7:35 UTC (permalink / raw)
  To: Nicholas Bellinger
  Cc: kvm, Michael S. Tsirkin, virtualization, target-devel,
	Stefan Hajnoczi, Paolo Bonzini

In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
virtio-scsi), hotplug support is added to virtio-scsi.

This patch adds hotplug and hotunplug support to tcm_vhost.

You can create or delete a LUN in targetcli to hotplug or hotunplug a
LUN in guest.

Signed-off-by: Asias He <asias@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 drivers/vhost/tcm_vhost.c | 210 +++++++++++++++++++++++++++++++++++++++++++++-
 drivers/vhost/tcm_vhost.h |  10 +++
 2 files changed, 218 insertions(+), 2 deletions(-)

diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 822cd1f..5340fd7 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -71,6 +71,7 @@ enum {
 
 #define VHOST_SCSI_MAX_TARGET	256
 #define VHOST_SCSI_MAX_VQ	128
+#define VHOST_SCSI_MAX_EVENT	128
 
 struct vhost_scsi {
 	/* Protected by vhost_scsi->dev.mutex */
@@ -82,6 +83,12 @@ struct vhost_scsi {
 
 	struct vhost_work vs_completion_work; /* cmd completion work item */
 	struct llist_head vs_completion_list; /* cmd completion queue */
+
+	struct vhost_work vs_event_work; /* evt injection work item */
+	struct llist_head vs_event_list; /* evt injection queue */
+
+	bool vs_events_missed; /* any missed events, protected by vq->mutex */
+	int vs_events_nr; /* num of pending events, protected by vq->mutex */
 };
 
 /* Local pointer to allocated TCM configfs fabric module */
@@ -349,6 +356,37 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
 	return 0;
 }
 
+static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+{
+	vs->vs_events_nr--;
+	kfree(evt);
+}
+
+static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
+	u32 event, u32 reason)
+{
+	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+	struct tcm_vhost_evt *evt;
+
+	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
+		vs->vs_events_missed = true;
+		return NULL;
+	}
+
+	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+	if (!evt) {
+		vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
+		vs->vs_events_missed = true;
+		return NULL;
+	}
+
+	evt->event.event = event;
+	evt->event.reason = reason;
+	vs->vs_events_nr++;
+
+	return evt;
+}
+
 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
 {
 	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
@@ -367,6 +405,75 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
 	kfree(tv_cmd);
 }
 
+static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
+	struct tcm_vhost_evt *evt)
+{
+	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+	struct virtio_scsi_event *event = &evt->event;
+	struct virtio_scsi_event __user *eventp;
+	unsigned out, in;
+	int head, ret;
+
+	if (!vq->private_data) {
+		vs->vs_events_missed = true;
+		return;
+	}
+
+again:
+	vhost_disable_notify(&vs->dev, vq);
+	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
+			ARRAY_SIZE(vq->iov), &out, &in,
+			NULL, NULL);
+	if (head < 0) {
+		vs->vs_events_missed = true;
+		return;
+	}
+	if (head == vq->num) {
+		if (vhost_enable_notify(&vs->dev, vq))
+			goto again;
+		vs->vs_events_missed = true;
+		return;
+	}
+
+	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
+		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
+				vq->iov[out].iov_len);
+		vs->vs_events_missed = true;
+		return;
+	}
+
+	if (vs->vs_events_missed) {
+		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
+		vs->vs_events_missed = false;
+	}
+
+	eventp = vq->iov[out].iov_base;
+	ret = __copy_to_user(eventp, event, sizeof(*event));
+	if (!ret)
+		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+	else
+		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
+}
+
+static void tcm_vhost_evt_work(struct vhost_work *work)
+{
+	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+					vs_event_work);
+	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+	struct tcm_vhost_evt *evt;
+	struct llist_node *llnode;
+
+	mutex_lock(&vq->mutex);
+	llnode = llist_del_all(&vs->vs_event_list);
+	while (llnode) {
+		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
+		llnode = llist_next(llnode);
+		tcm_vhost_do_evt_work(vs, evt);
+		tcm_vhost_free_evt(vs, evt);
+	}
+	mutex_unlock(&vq->mutex);
+}
+
 /* Fill in status and signal that we are done processing this command
  *
  * This is scheduled in the vhost work queue so we are called with the owner
@@ -777,9 +884,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
 	pr_debug("%s: The handling func for control queue.\n", __func__);
 }
 
+static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
+	struct se_lun *lun, u32 event, u32 reason)
+{
+	struct tcm_vhost_evt *evt;
+
+	evt = tcm_vhost_allocate_evt(vs, event, reason);
+	if (!evt)
+		return;
+
+	if (tpg && lun) {
+		/* TODO: share lun setup code with virtio-scsi.ko */
+		/*
+		 * Note: evt->event is zeroed when we allocate it and
+		 * lun[4-7] need to be zero according to virtio-scsi spec.
+		 */
+		evt->event.lun[0] = 0x01;
+		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
+		if (lun->unpacked_lun >= 256)
+			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
+		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
+	}
+
+	llist_add(&evt->list, &vs->vs_event_list);
+	vhost_work_queue(&vs->dev, &vs->vs_event_work);
+}
+
 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
 {
-	pr_debug("%s: The handling func for event queue.\n", __func__);
+	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+						poll.work);
+	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
+	mutex_lock(&vq->mutex);
+	if (!vq->private_data)
+		goto out;
+
+	if (vs->vs_events_missed)
+		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+out:
+	mutex_unlock(&vq->mutex);
 }
 
 static void vhost_scsi_handle_kick(struct vhost_work *work)
@@ -803,6 +947,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
 		vhost_scsi_flush_vq(vs, i);
 	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
+	vhost_work_flush(&vs->dev, &vs->vs_event_work);
 }
 
 /*
@@ -864,6 +1009,7 @@ static int vhost_scsi_set_endpoint(
 				goto out;
 			}
 			tv_tpg->tv_tpg_vhost_count++;
+			tv_tpg->vhost_scsi = vs;
 			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
 			smp_mb__after_atomic_inc();
 			match = true;
@@ -949,6 +1095,7 @@ static int vhost_scsi_clear_endpoint(
 			goto err_tpg;
 		}
 		tv_tpg->tv_tpg_vhost_count--;
+		tv_tpg->vhost_scsi = NULL;
 		vs->vs_tpg[target] = NULL;
 		match = true;
 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
@@ -969,6 +1116,7 @@ static int vhost_scsi_clear_endpoint(
 	vhost_scsi_flush(vs);
 	kfree(vs->vs_tpg);
 	vs->vs_tpg = NULL;
+	WARN_ON(vs->vs_events_nr);
 	mutex_unlock(&vs->dev.mutex);
 	mutex_unlock(&tcm_vhost_mutex);
 	return 0;
@@ -1009,6 +1157,10 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
 		return -ENOMEM;
 
 	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
+	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
+
+	s->vs_events_nr = 0;
+	s->vs_events_missed = false;
 
 	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
 	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
@@ -1035,6 +1187,8 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
 	vhost_scsi_clear_endpoint(s, &t);
 	vhost_dev_stop(&s->dev);
 	vhost_dev_cleanup(&s->dev, false);
+	/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
+	vhost_scsi_flush(s);
 	kfree(s);
 	return 0;
 }
@@ -1139,28 +1293,80 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
 	return "Unknown";
 }
 
+static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
+	struct se_lun *lun, bool plug)
+{
+
+	struct vhost_scsi *vs = tpg->vhost_scsi;
+	struct vhost_virtqueue *vq;
+	u32 reason;
+
+	if (!vs)
+		return;
+
+	mutex_lock(&vs->dev.mutex);
+	if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
+		mutex_unlock(&vs->dev.mutex);
+		return;
+	}
+
+	if (plug)
+		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
+	else
+		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
+
+	vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+	mutex_lock(&vq->mutex);
+	tcm_vhost_send_evt(vs, tpg, lun,
+			VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
+	mutex_unlock(&vq->mutex);
+	mutex_unlock(&vs->dev.mutex);
+}
+
+static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+{
+	tcm_vhost_do_plug(tpg, lun, true);
+}
+
+static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+{
+	tcm_vhost_do_plug(tpg, lun, false);
+}
+
 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
 	struct se_lun *lun)
 {
 	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
 				struct tcm_vhost_tpg, se_tpg);
 
+	mutex_lock(&tcm_vhost_mutex);
+
 	mutex_lock(&tv_tpg->tv_tpg_mutex);
 	tv_tpg->tv_tpg_port_count++;
 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
 
+	tcm_vhost_hotplug(tv_tpg, lun);
+
+	mutex_unlock(&tcm_vhost_mutex);
+
 	return 0;
 }
 
 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
-	struct se_lun *se_lun)
+	struct se_lun *lun)
 {
 	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
 				struct tcm_vhost_tpg, se_tpg);
 
+	mutex_lock(&tcm_vhost_mutex);
+
 	mutex_lock(&tv_tpg->tv_tpg_mutex);
 	tv_tpg->tv_tpg_port_count--;
 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
+
+	tcm_vhost_hotunplug(tv_tpg, lun);
+
+	mutex_unlock(&tcm_vhost_mutex);
 }
 
 static struct se_node_acl *tcm_vhost_make_nodeacl(
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index 1d2ae7a..a545a5b 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -53,6 +53,7 @@ struct tcm_vhost_nacl {
 	struct se_node_acl se_node_acl;
 };
 
+struct vhost_scsi;
 struct tcm_vhost_tpg {
 	/* Vhost port target portal group tag for TCM */
 	u16 tport_tpgt;
@@ -70,6 +71,8 @@ struct tcm_vhost_tpg {
 	struct tcm_vhost_tport *tport;
 	/* Returned by tcm_vhost_make_tpg() */
 	struct se_portal_group se_tpg;
+	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
+	struct vhost_scsi *vhost_scsi;
 };
 
 struct tcm_vhost_tport {
@@ -83,6 +86,13 @@ struct tcm_vhost_tport {
 	struct se_wwn tport_wwn;
 };
 
+struct tcm_vhost_evt {
+	/* event to be sent to guest */
+	struct virtio_scsi_event event;
+	/* event list, serviced from vhost worker thread */
+	struct llist_node list;
+};
+
 /*
  * As per request from MST, keep TCM_VHOST related ioctl defines out of
  * linux/vhost.h (user-space) for now..
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v11 3/4] tcm_vhost: Add ioctl to get and set events missed flag
  2013-04-25  7:35 [PATCH v11 0/4] tcm_vhost hotplug Asias He
  2013-04-25  7:35 ` [PATCH v11 1/4] tcm_vhost: Refactor the lock nesting rule Asias He
  2013-04-25  7:35 ` [PATCH v11 2/4] tcm_vhost: Add hotplug/hotunplug support Asias He
@ 2013-04-25  7:35 ` Asias He
  2013-04-25  7:40   ` Michael S. Tsirkin
  2013-04-25  7:35 ` [PATCH v11 4/4] tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG Asias He
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 12+ messages in thread
From: Asias He @ 2013-04-25  7:35 UTC (permalink / raw)
  To: Nicholas Bellinger
  Cc: kvm, Michael S. Tsirkin, virtualization, target-devel,
	Stefan Hajnoczi, Paolo Bonzini

Signed-off-by: Asias He <asias@redhat.com>
---
 drivers/vhost/tcm_vhost.c | 17 +++++++++++++++++
 drivers/vhost/tcm_vhost.h |  3 +++
 2 files changed, 20 insertions(+)

diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 5340fd7..07217d8 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -1200,8 +1200,11 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
 	struct vhost_scsi_target backend;
 	void __user *argp = (void __user *)arg;
 	u64 __user *featurep = argp;
+	u32 __user *eventsp = argp;
+	u32 events_missed;
 	u64 features;
 	int r, abi_version = VHOST_SCSI_ABI_VERSION;
+	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
 
 	switch (ioctl) {
 	case VHOST_SCSI_SET_ENDPOINT:
@@ -1222,6 +1225,20 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
 		if (copy_to_user(argp, &abi_version, sizeof abi_version))
 			return -EFAULT;
 		return 0;
+	case VHOST_SCSI_SET_EVENTS_MISSED:
+		if (get_user(events_missed, eventsp))
+			return -EFAULT;
+		mutex_lock(&vq->mutex);
+		vs->vs_events_missed = events_missed;
+		mutex_unlock(&vq->mutex);
+		return 0;
+	case VHOST_SCSI_GET_EVENTS_MISSED:
+		mutex_lock(&vq->mutex);
+		events_missed = vs->vs_events_missed;
+		mutex_unlock(&vq->mutex);
+		if (put_user(events_missed, eventsp))
+			return -EFAULT;
+		return 0;
 	case VHOST_GET_FEATURES:
 		features = VHOST_SCSI_FEATURES;
 		if (copy_to_user(featurep, &features, sizeof features))
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index a545a5b..514b9fd 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -123,3 +123,6 @@ struct vhost_scsi_target {
 #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
 /* Changing this breaks userspace. */
 #define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
+/* Set and get the events missed flag */
+#define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32)
+#define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32)
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v11 4/4] tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG
  2013-04-25  7:35 [PATCH v11 0/4] tcm_vhost hotplug Asias He
                   ` (2 preceding siblings ...)
  2013-04-25  7:35 ` [PATCH v11 3/4] tcm_vhost: Add ioctl to get and set events missed flag Asias He
@ 2013-04-25  7:35 ` Asias He
  2013-04-25  7:39 ` [PATCH v11 0/4] tcm_vhost hotplug Michael S. Tsirkin
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Asias He @ 2013-04-25  7:35 UTC (permalink / raw)
  To: Nicholas Bellinger
  Cc: kvm, Michael S. Tsirkin, virtualization, target-devel,
	Stefan Hajnoczi, Paolo Bonzini

Everything for hotplug is ready. Let's enable the feature bit.

Signed-off-by: Asias He <asias@redhat.com>
---
 drivers/vhost/tcm_vhost.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 07217d8..1677238 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -66,7 +66,8 @@ enum {
  * TODO: debug and remove the workaround.
  */
 enum {
-	VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)
+	VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) |
+			      (1ULL << VIRTIO_SCSI_F_HOTPLUG)
 };
 
 #define VHOST_SCSI_MAX_TARGET	256
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v11 0/4] tcm_vhost hotplug
  2013-04-25  7:35 [PATCH v11 0/4] tcm_vhost hotplug Asias He
                   ` (3 preceding siblings ...)
  2013-04-25  7:35 ` [PATCH v11 4/4] tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG Asias He
@ 2013-04-25  7:39 ` Michael S. Tsirkin
  2013-04-25  8:19   ` Nicholas A. Bellinger
       [not found] ` <1366875323-17639-3-git-send-email-asias@redhat.com>
       [not found] ` <1366875323-17639-5-git-send-email-asias@redhat.com>
  6 siblings, 1 reply; 12+ messages in thread
From: Michael S. Tsirkin @ 2013-04-25  7:39 UTC (permalink / raw)
  To: Asias He
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Thu, Apr 25, 2013 at 03:35:19PM +0800, Asias He wrote:
> Changes in v11
> - Drop change log histroy in commit log
> 
> Changes in v10
> - Drop comments about lun
> - Add Enable VIRTIO_SCSI_F_HOTPLUG to this series
> 
> Changes in v9
> - Drop tcm_vhost_check_feature
> - Add Refactor the lock nesting rule to this sereis
> 
> Asias He (4):
>   tcm_vhost: Refactor the lock nesting rule
>   tcm_vhost: Add hotplug/hotunplug support
>   tcm_vhost: Add ioctl to get and set events missed flag
>   tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG
> 
>  drivers/vhost/tcm_vhost.c | 262 +++++++++++++++++++++++++++++++++++++++++++---
>  drivers/vhost/tcm_vhost.h |  13 +++
>  2 files changed, 259 insertions(+), 16 deletions(-)


Acked-by: Michael S. Tsirkin <mst@redhat.com>

> -- 
> 1.8.1.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v11 1/4] tcm_vhost: Refactor the lock nesting rule
  2013-04-25  7:35 ` [PATCH v11 1/4] tcm_vhost: Refactor the lock nesting rule Asias He
@ 2013-04-25  7:39   ` Michael S. Tsirkin
  0 siblings, 0 replies; 12+ messages in thread
From: Michael S. Tsirkin @ 2013-04-25  7:39 UTC (permalink / raw)
  To: Asias He
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Thu, Apr 25, 2013 at 03:35:20PM +0800, Asias He wrote:
> We want to use tcm_vhost_mutex to make sure hotplug/hotunplug will not
> happen when set_endpoint/clear_endpoint is in process.
> 
> Signed-off-by: Asias He <asias@redhat.com>

Acked-by: Michael S. Tsirkin <mst@redhat.com>

> ---
>  drivers/vhost/tcm_vhost.c | 32 +++++++++++++++++++-------------
>  1 file changed, 19 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> index 957a0b9..822cd1f 100644
> --- a/drivers/vhost/tcm_vhost.c
> +++ b/drivers/vhost/tcm_vhost.c
> @@ -808,6 +808,9 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
>  /*
>   * Called from vhost_scsi_ioctl() context to walk the list of available
>   * tcm_vhost_tpg with an active struct tcm_vhost_nexus
> + *
> + *  The lock nesting rule is:
> + *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
>   */
>  static int vhost_scsi_set_endpoint(
>  	struct vhost_scsi *vs,
> @@ -820,26 +823,27 @@ static int vhost_scsi_set_endpoint(
>  	int index, ret, i, len;
>  	bool match = false;
>  
> +	mutex_lock(&tcm_vhost_mutex);
>  	mutex_lock(&vs->dev.mutex);
> +
>  	/* Verify that ring has been setup correctly. */
>  	for (index = 0; index < vs->dev.nvqs; ++index) {
>  		/* Verify that ring has been setup correctly. */
>  		if (!vhost_vq_access_ok(&vs->vqs[index])) {
> -			mutex_unlock(&vs->dev.mutex);
> -			return -EFAULT;
> +			ret = -EFAULT;
> +			goto out;
>  		}
>  	}
>  
>  	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
>  	vs_tpg = kzalloc(len, GFP_KERNEL);
>  	if (!vs_tpg) {
> -		mutex_unlock(&vs->dev.mutex);
> -		return -ENOMEM;
> +		ret = -ENOMEM;
> +		goto out;
>  	}
>  	if (vs->vs_tpg)
>  		memcpy(vs_tpg, vs->vs_tpg, len);
>  
> -	mutex_lock(&tcm_vhost_mutex);
>  	list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
>  		mutex_lock(&tv_tpg->tv_tpg_mutex);
>  		if (!tv_tpg->tpg_nexus) {
> @@ -854,11 +858,10 @@ static int vhost_scsi_set_endpoint(
>  
>  		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
>  			if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
> -				mutex_unlock(&tv_tpg->tv_tpg_mutex);
> -				mutex_unlock(&tcm_vhost_mutex);
> -				mutex_unlock(&vs->dev.mutex);
>  				kfree(vs_tpg);
> -				return -EEXIST;
> +				mutex_unlock(&tv_tpg->tv_tpg_mutex);
> +				ret = -EEXIST;
> +				goto out;
>  			}
>  			tv_tpg->tv_tpg_vhost_count++;
>  			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
> @@ -867,7 +870,6 @@ static int vhost_scsi_set_endpoint(
>  		}
>  		mutex_unlock(&tv_tpg->tv_tpg_mutex);
>  	}
> -	mutex_unlock(&tcm_vhost_mutex);
>  
>  	if (match) {
>  		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
> @@ -893,7 +895,9 @@ static int vhost_scsi_set_endpoint(
>  	kfree(vs->vs_tpg);
>  	vs->vs_tpg = vs_tpg;
>  
> +out:
>  	mutex_unlock(&vs->dev.mutex);
> +	mutex_unlock(&tcm_vhost_mutex);
>  	return ret;
>  }
>  
> @@ -908,6 +912,7 @@ static int vhost_scsi_clear_endpoint(
>  	int index, ret, i;
>  	u8 target;
>  
> +	mutex_lock(&tcm_vhost_mutex);
>  	mutex_lock(&vs->dev.mutex);
>  	/* Verify that ring has been setup correctly. */
>  	for (index = 0; index < vs->dev.nvqs; ++index) {
> @@ -918,8 +923,8 @@ static int vhost_scsi_clear_endpoint(
>  	}
>  
>  	if (!vs->vs_tpg) {
> -		mutex_unlock(&vs->dev.mutex);
> -		return 0;
> +		ret = 0;
> +		goto err_dev;
>  	}
>  
>  	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
> @@ -965,13 +970,14 @@ static int vhost_scsi_clear_endpoint(
>  	kfree(vs->vs_tpg);
>  	vs->vs_tpg = NULL;
>  	mutex_unlock(&vs->dev.mutex);
> -
> +	mutex_unlock(&tcm_vhost_mutex);
>  	return 0;
>  
>  err_tpg:
>  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
>  err_dev:
>  	mutex_unlock(&vs->dev.mutex);
> +	mutex_unlock(&tcm_vhost_mutex);
>  	return ret;
>  }
>  
> -- 
> 1.8.1.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v11 2/4] tcm_vhost: Add hotplug/hotunplug support
       [not found] ` <1366875323-17639-3-git-send-email-asias@redhat.com>
@ 2013-04-25  7:40   ` Michael S. Tsirkin
  0 siblings, 0 replies; 12+ messages in thread
From: Michael S. Tsirkin @ 2013-04-25  7:40 UTC (permalink / raw)
  To: Asias He
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Thu, Apr 25, 2013 at 03:35:21PM +0800, Asias He wrote:
> In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> virtio-scsi), hotplug support is added to virtio-scsi.
> 
> This patch adds hotplug and hotunplug support to tcm_vhost.
> 
> You can create or delete a LUN in targetcli to hotplug or hotunplug a
> LUN in guest.
> 
> Signed-off-by: Asias He <asias@redhat.com>
> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>

Acked-by: Michael S. Tsirkin <mst@redhat.com>

> ---
>  drivers/vhost/tcm_vhost.c | 210 +++++++++++++++++++++++++++++++++++++++++++++-
>  drivers/vhost/tcm_vhost.h |  10 +++
>  2 files changed, 218 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> index 822cd1f..5340fd7 100644
> --- a/drivers/vhost/tcm_vhost.c
> +++ b/drivers/vhost/tcm_vhost.c
> @@ -71,6 +71,7 @@ enum {
>  
>  #define VHOST_SCSI_MAX_TARGET	256
>  #define VHOST_SCSI_MAX_VQ	128
> +#define VHOST_SCSI_MAX_EVENT	128
>  
>  struct vhost_scsi {
>  	/* Protected by vhost_scsi->dev.mutex */
> @@ -82,6 +83,12 @@ struct vhost_scsi {
>  
>  	struct vhost_work vs_completion_work; /* cmd completion work item */
>  	struct llist_head vs_completion_list; /* cmd completion queue */
> +
> +	struct vhost_work vs_event_work; /* evt injection work item */
> +	struct llist_head vs_event_list; /* evt injection queue */
> +
> +	bool vs_events_missed; /* any missed events, protected by vq->mutex */
> +	int vs_events_nr; /* num of pending events, protected by vq->mutex */
>  };
>  
>  /* Local pointer to allocated TCM configfs fabric module */
> @@ -349,6 +356,37 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
>  	return 0;
>  }
>  
> +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
> +{
> +	vs->vs_events_nr--;
> +	kfree(evt);
> +}
> +
> +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
> +	u32 event, u32 reason)
> +{
> +	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> +	struct tcm_vhost_evt *evt;
> +
> +	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> +		vs->vs_events_missed = true;
> +		return NULL;
> +	}
> +
> +	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> +	if (!evt) {
> +		vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
> +		vs->vs_events_missed = true;
> +		return NULL;
> +	}
> +
> +	evt->event.event = event;
> +	evt->event.reason = reason;
> +	vs->vs_events_nr++;
> +
> +	return evt;
> +}
> +
>  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
>  {
>  	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> @@ -367,6 +405,75 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
>  	kfree(tv_cmd);
>  }
>  
> +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
> +	struct tcm_vhost_evt *evt)
> +{
> +	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> +	struct virtio_scsi_event *event = &evt->event;
> +	struct virtio_scsi_event __user *eventp;
> +	unsigned out, in;
> +	int head, ret;
> +
> +	if (!vq->private_data) {
> +		vs->vs_events_missed = true;
> +		return;
> +	}
> +
> +again:
> +	vhost_disable_notify(&vs->dev, vq);
> +	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
> +			ARRAY_SIZE(vq->iov), &out, &in,
> +			NULL, NULL);
> +	if (head < 0) {
> +		vs->vs_events_missed = true;
> +		return;
> +	}
> +	if (head == vq->num) {
> +		if (vhost_enable_notify(&vs->dev, vq))
> +			goto again;
> +		vs->vs_events_missed = true;
> +		return;
> +	}
> +
> +	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
> +		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
> +				vq->iov[out].iov_len);
> +		vs->vs_events_missed = true;
> +		return;
> +	}
> +
> +	if (vs->vs_events_missed) {
> +		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
> +		vs->vs_events_missed = false;
> +	}
> +
> +	eventp = vq->iov[out].iov_base;
> +	ret = __copy_to_user(eventp, event, sizeof(*event));
> +	if (!ret)
> +		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
> +	else
> +		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
> +}
> +
> +static void tcm_vhost_evt_work(struct vhost_work *work)
> +{
> +	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
> +					vs_event_work);
> +	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> +	struct tcm_vhost_evt *evt;
> +	struct llist_node *llnode;
> +
> +	mutex_lock(&vq->mutex);
> +	llnode = llist_del_all(&vs->vs_event_list);
> +	while (llnode) {
> +		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
> +		llnode = llist_next(llnode);
> +		tcm_vhost_do_evt_work(vs, evt);
> +		tcm_vhost_free_evt(vs, evt);
> +	}
> +	mutex_unlock(&vq->mutex);
> +}
> +
>  /* Fill in status and signal that we are done processing this command
>   *
>   * This is scheduled in the vhost work queue so we are called with the owner
> @@ -777,9 +884,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
>  	pr_debug("%s: The handling func for control queue.\n", __func__);
>  }
>  
> +static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
> +	struct se_lun *lun, u32 event, u32 reason)
> +{
> +	struct tcm_vhost_evt *evt;
> +
> +	evt = tcm_vhost_allocate_evt(vs, event, reason);
> +	if (!evt)
> +		return;
> +
> +	if (tpg && lun) {
> +		/* TODO: share lun setup code with virtio-scsi.ko */
> +		/*
> +		 * Note: evt->event is zeroed when we allocate it and
> +		 * lun[4-7] need to be zero according to virtio-scsi spec.
> +		 */
> +		evt->event.lun[0] = 0x01;
> +		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
> +		if (lun->unpacked_lun >= 256)
> +			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
> +		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
> +	}
> +
> +	llist_add(&evt->list, &vs->vs_event_list);
> +	vhost_work_queue(&vs->dev, &vs->vs_event_work);
> +}
> +
>  static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
>  {
> -	pr_debug("%s: The handling func for event queue.\n", __func__);
> +	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
> +						poll.work);
> +	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
> +
> +	mutex_lock(&vq->mutex);
> +	if (!vq->private_data)
> +		goto out;
> +
> +	if (vs->vs_events_missed)
> +		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
> +out:
> +	mutex_unlock(&vq->mutex);
>  }
>  
>  static void vhost_scsi_handle_kick(struct vhost_work *work)
> @@ -803,6 +947,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
>  	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
>  		vhost_scsi_flush_vq(vs, i);
>  	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
> +	vhost_work_flush(&vs->dev, &vs->vs_event_work);
>  }
>  
>  /*
> @@ -864,6 +1009,7 @@ static int vhost_scsi_set_endpoint(
>  				goto out;
>  			}
>  			tv_tpg->tv_tpg_vhost_count++;
> +			tv_tpg->vhost_scsi = vs;
>  			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
>  			smp_mb__after_atomic_inc();
>  			match = true;
> @@ -949,6 +1095,7 @@ static int vhost_scsi_clear_endpoint(
>  			goto err_tpg;
>  		}
>  		tv_tpg->tv_tpg_vhost_count--;
> +		tv_tpg->vhost_scsi = NULL;
>  		vs->vs_tpg[target] = NULL;
>  		match = true;
>  		mutex_unlock(&tv_tpg->tv_tpg_mutex);
> @@ -969,6 +1116,7 @@ static int vhost_scsi_clear_endpoint(
>  	vhost_scsi_flush(vs);
>  	kfree(vs->vs_tpg);
>  	vs->vs_tpg = NULL;
> +	WARN_ON(vs->vs_events_nr);
>  	mutex_unlock(&vs->dev.mutex);
>  	mutex_unlock(&tcm_vhost_mutex);
>  	return 0;
> @@ -1009,6 +1157,10 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
>  		return -ENOMEM;
>  
>  	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
> +	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
> +
> +	s->vs_events_nr = 0;
> +	s->vs_events_missed = false;
>  
>  	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
>  	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
> @@ -1035,6 +1187,8 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
>  	vhost_scsi_clear_endpoint(s, &t);
>  	vhost_dev_stop(&s->dev);
>  	vhost_dev_cleanup(&s->dev, false);
> +	/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
> +	vhost_scsi_flush(s);
>  	kfree(s);
>  	return 0;
>  }
> @@ -1139,28 +1293,80 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
>  	return "Unknown";
>  }
>  
> +static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
> +	struct se_lun *lun, bool plug)
> +{
> +
> +	struct vhost_scsi *vs = tpg->vhost_scsi;
> +	struct vhost_virtqueue *vq;
> +	u32 reason;
> +
> +	if (!vs)
> +		return;
> +
> +	mutex_lock(&vs->dev.mutex);
> +	if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
> +		mutex_unlock(&vs->dev.mutex);
> +		return;
> +	}
> +
> +	if (plug)
> +		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
> +	else
> +		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
> +
> +	vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> +	mutex_lock(&vq->mutex);
> +	tcm_vhost_send_evt(vs, tpg, lun,
> +			VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
> +	mutex_unlock(&vq->mutex);
> +	mutex_unlock(&vs->dev.mutex);
> +}
> +
> +static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> +{
> +	tcm_vhost_do_plug(tpg, lun, true);
> +}
> +
> +static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> +{
> +	tcm_vhost_do_plug(tpg, lun, false);
> +}
> +
>  static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
>  	struct se_lun *lun)
>  {
>  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
>  				struct tcm_vhost_tpg, se_tpg);
>  
> +	mutex_lock(&tcm_vhost_mutex);
> +
>  	mutex_lock(&tv_tpg->tv_tpg_mutex);
>  	tv_tpg->tv_tpg_port_count++;
>  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
>  
> +	tcm_vhost_hotplug(tv_tpg, lun);
> +
> +	mutex_unlock(&tcm_vhost_mutex);
> +
>  	return 0;
>  }
>  
>  static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
> -	struct se_lun *se_lun)
> +	struct se_lun *lun)
>  {
>  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
>  				struct tcm_vhost_tpg, se_tpg);
>  
> +	mutex_lock(&tcm_vhost_mutex);
> +
>  	mutex_lock(&tv_tpg->tv_tpg_mutex);
>  	tv_tpg->tv_tpg_port_count--;
>  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> +
> +	tcm_vhost_hotunplug(tv_tpg, lun);
> +
> +	mutex_unlock(&tcm_vhost_mutex);
>  }
>  
>  static struct se_node_acl *tcm_vhost_make_nodeacl(
> diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
> index 1d2ae7a..a545a5b 100644
> --- a/drivers/vhost/tcm_vhost.h
> +++ b/drivers/vhost/tcm_vhost.h
> @@ -53,6 +53,7 @@ struct tcm_vhost_nacl {
>  	struct se_node_acl se_node_acl;
>  };
>  
> +struct vhost_scsi;
>  struct tcm_vhost_tpg {
>  	/* Vhost port target portal group tag for TCM */
>  	u16 tport_tpgt;
> @@ -70,6 +71,8 @@ struct tcm_vhost_tpg {
>  	struct tcm_vhost_tport *tport;
>  	/* Returned by tcm_vhost_make_tpg() */
>  	struct se_portal_group se_tpg;
> +	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
> +	struct vhost_scsi *vhost_scsi;
>  };
>  
>  struct tcm_vhost_tport {
> @@ -83,6 +86,13 @@ struct tcm_vhost_tport {
>  	struct se_wwn tport_wwn;
>  };
>  
> +struct tcm_vhost_evt {
> +	/* event to be sent to guest */
> +	struct virtio_scsi_event event;
> +	/* event list, serviced from vhost worker thread */
> +	struct llist_node list;
> +};
> +
>  /*
>   * As per request from MST, keep TCM_VHOST related ioctl defines out of
>   * linux/vhost.h (user-space) for now..
> -- 
> 1.8.1.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v11 3/4] tcm_vhost: Add ioctl to get and set events missed flag
  2013-04-25  7:35 ` [PATCH v11 3/4] tcm_vhost: Add ioctl to get and set events missed flag Asias He
@ 2013-04-25  7:40   ` Michael S. Tsirkin
  0 siblings, 0 replies; 12+ messages in thread
From: Michael S. Tsirkin @ 2013-04-25  7:40 UTC (permalink / raw)
  To: Asias He
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Thu, Apr 25, 2013 at 03:35:22PM +0800, Asias He wrote:
> Signed-off-by: Asias He <asias@redhat.com>

Acked-by: Michael S. Tsirkin <mst@redhat.com>

> ---
>  drivers/vhost/tcm_vhost.c | 17 +++++++++++++++++
>  drivers/vhost/tcm_vhost.h |  3 +++
>  2 files changed, 20 insertions(+)
> 
> diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> index 5340fd7..07217d8 100644
> --- a/drivers/vhost/tcm_vhost.c
> +++ b/drivers/vhost/tcm_vhost.c
> @@ -1200,8 +1200,11 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
>  	struct vhost_scsi_target backend;
>  	void __user *argp = (void __user *)arg;
>  	u64 __user *featurep = argp;
> +	u32 __user *eventsp = argp;
> +	u32 events_missed;
>  	u64 features;
>  	int r, abi_version = VHOST_SCSI_ABI_VERSION;
> +	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
>  
>  	switch (ioctl) {
>  	case VHOST_SCSI_SET_ENDPOINT:
> @@ -1222,6 +1225,20 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
>  		if (copy_to_user(argp, &abi_version, sizeof abi_version))
>  			return -EFAULT;
>  		return 0;
> +	case VHOST_SCSI_SET_EVENTS_MISSED:
> +		if (get_user(events_missed, eventsp))
> +			return -EFAULT;
> +		mutex_lock(&vq->mutex);
> +		vs->vs_events_missed = events_missed;
> +		mutex_unlock(&vq->mutex);
> +		return 0;
> +	case VHOST_SCSI_GET_EVENTS_MISSED:
> +		mutex_lock(&vq->mutex);
> +		events_missed = vs->vs_events_missed;
> +		mutex_unlock(&vq->mutex);
> +		if (put_user(events_missed, eventsp))
> +			return -EFAULT;
> +		return 0;
>  	case VHOST_GET_FEATURES:
>  		features = VHOST_SCSI_FEATURES;
>  		if (copy_to_user(featurep, &features, sizeof features))
> diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
> index a545a5b..514b9fd 100644
> --- a/drivers/vhost/tcm_vhost.h
> +++ b/drivers/vhost/tcm_vhost.h
> @@ -123,3 +123,6 @@ struct vhost_scsi_target {
>  #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
>  /* Changing this breaks userspace. */
>  #define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
> +/* Set and get the events missed flag */
> +#define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32)
> +#define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32)
> -- 
> 1.8.1.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v11 4/4] tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG
       [not found] ` <1366875323-17639-5-git-send-email-asias@redhat.com>
@ 2013-04-25  7:40   ` Michael S. Tsirkin
  2013-04-25  9:32   ` [PATCH untested] vhost: allow device specific fields per vq Michael S. Tsirkin
  1 sibling, 0 replies; 12+ messages in thread
From: Michael S. Tsirkin @ 2013-04-25  7:40 UTC (permalink / raw)
  To: Asias He
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Thu, Apr 25, 2013 at 03:35:23PM +0800, Asias He wrote:
> Everything for hotplug is ready. Let's enable the feature bit.
> 
> Signed-off-by: Asias He <asias@redhat.com>

Acked-by: Michael S. Tsirkin <mst@redhat.com>

> ---
>  drivers/vhost/tcm_vhost.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> index 07217d8..1677238 100644
> --- a/drivers/vhost/tcm_vhost.c
> +++ b/drivers/vhost/tcm_vhost.c
> @@ -66,7 +66,8 @@ enum {
>   * TODO: debug and remove the workaround.
>   */
>  enum {
> -	VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)
> +	VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) |
> +			      (1ULL << VIRTIO_SCSI_F_HOTPLUG)
>  };
>  
>  #define VHOST_SCSI_MAX_TARGET	256
> -- 
> 1.8.1.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v11 0/4] tcm_vhost hotplug
  2013-04-25  7:39 ` [PATCH v11 0/4] tcm_vhost hotplug Michael S. Tsirkin
@ 2013-04-25  8:19   ` Nicholas A. Bellinger
  0 siblings, 0 replies; 12+ messages in thread
From: Nicholas A. Bellinger @ 2013-04-25  8:19 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Thu, 2013-04-25 at 10:39 +0300, Michael S. Tsirkin wrote:
> On Thu, Apr 25, 2013 at 03:35:19PM +0800, Asias He wrote:
> > Changes in v11
> > - Drop change log histroy in commit log
> > 
> > Changes in v10
> > - Drop comments about lun
> > - Add Enable VIRTIO_SCSI_F_HOTPLUG to this series
> > 
> > Changes in v9
> > - Drop tcm_vhost_check_feature
> > - Add Refactor the lock nesting rule to this sereis
> > 
> > Asias He (4):
> >   tcm_vhost: Refactor the lock nesting rule
> >   tcm_vhost: Add hotplug/hotunplug support
> >   tcm_vhost: Add ioctl to get and set events missed flag
> >   tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG
> > 
> >  drivers/vhost/tcm_vhost.c | 262 +++++++++++++++++++++++++++++++++++++++++++---
> >  drivers/vhost/tcm_vhost.h |  13 +++
> >  2 files changed, 259 insertions(+), 16 deletions(-)
> 
> 
> Acked-by: Michael S. Tsirkin <mst@redhat.com>
> 

Applied to target-pending/for-next.

Nice work Asias & MST !

--nab

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH untested] vhost: allow device specific fields per vq
       [not found] ` <1366875323-17639-5-git-send-email-asias@redhat.com>
  2013-04-25  7:40   ` [PATCH v11 4/4] tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG Michael S. Tsirkin
@ 2013-04-25  9:32   ` Michael S. Tsirkin
  1 sibling, 0 replies; 12+ messages in thread
From: Michael S. Tsirkin @ 2013-04-25  9:32 UTC (permalink / raw)
  To: Asias He
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

Off-list, Asias asked about adding scsi specific fields per vq.
Something like the following would be helpful: untested, just to give
you the idea.

On top of this we can add patches to move things like ubufs
from vhost.h out to net.c

Warning: completely untested.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

---

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ec6fb3f..e8fa9b6 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -70,9 +70,13 @@ enum vhost_net_poll_state {
 	VHOST_NET_POLL_STOPPED = 2,
 };
 
+struct vhost_net_virtqueue {
+	struct vhost_virtqueue vq;
+};
+
 struct vhost_net {
 	struct vhost_dev dev;
-	struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
+	struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
 	struct vhost_poll poll[VHOST_NET_VQ_MAX];
 	/* Tells us whether we are polling a socket for TX.
 	 * We only do this when socket buffer fills up.
@@ -612,17 +616,26 @@ static int vhost_net_open(struct inode *inode, struct file *f)
 {
 	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
 	struct vhost_dev *dev;
+	struct vhost_virtqueue **vqs;
 	int r;
 
 	if (!n)
 		return -ENOMEM;
+	vqs = kmalloc(VHOST_NET_VQ_MAX, sizeof *vqs);
+	if (!vqs) {
+		kfree(n);
+		return -ENOMEM;
+	}
 
 	dev = &n->dev;
-	n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
-	n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
-	r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
+	vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
+	vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
+	n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
+	n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
+	r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
 	if (r < 0) {
 		kfree(n);
+		kfree(vqs);
 		return r;
 	}
 
@@ -727,6 +740,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
 	/* We do an extra flush before freeing memory,
 	 * since jobs can re-queue themselves. */
 	vhost_net_flush(n);
+	kfree(n->dev->vqs);
 	kfree(n);
 	return 0;
 }
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 2968b49..ba54b3c 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -72,6 +72,10 @@ enum {
 #define VHOST_SCSI_MAX_TARGET	256
 #define VHOST_SCSI_MAX_VQ	128
 
+struct vhost_scsi_virtqueue {
+	struct vhost_virtqueue vq;
+};
+
 struct vhost_scsi {
 	/* Protected by vhost_scsi->dev.mutex */
 	struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
@@ -79,7 +83,7 @@ struct vhost_scsi {
 	bool vs_endpoint;
 
 	struct vhost_dev dev;
-	struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
+	struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
 
 	struct vhost_work vs_completion_work; /* cmd completion work item */
 	struct llist_head vs_completion_list; /* cmd completion queue */
@@ -902,20 +906,32 @@ err_dev:
 static int vhost_scsi_open(struct inode *inode, struct file *f)
 {
 	struct vhost_scsi *s;
+	struct vhost_scsi_virtqueue *vqs;
 	int r, i;
 
 	s = kzalloc(sizeof(*s), GFP_KERNEL);
 	if (!s)
 		return -ENOMEM;
 
+	vqs = kmalloc(VHOST_SCSI_MAX_VQ, sizeof *vqs);
+	if (!vqs) {
+		kfree(s);
+		return -ENOMEM;
+	}
+
 	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
 
-	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
-	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
-	for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++)
-		s->vqs[i].handle_kick = vhost_scsi_handle_kick;
-	r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ);
+	vqs[VHOST_SCSI_VQ_CTL] = &n->vqs[VHOST_SCSI_VQ_CTL].vq;
+	vqs[VHOST_SCSI_VQ_EVT] = &n->vqs[VHOST_SCSI_VQ_EVT].vq;
+	s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
+	s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
+	for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+		vqs[i] = &s->vqs[i].vq;
+		s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
+	}
+	r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
 	if (r < 0) {
+		kfree(vqs);
 		kfree(s);
 		return r;
 	}
@@ -935,6 +951,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
 	vhost_scsi_clear_endpoint(s, &t);
 	vhost_dev_stop(&s->dev);
 	vhost_dev_cleanup(&s->dev, false);
+	kfree(s->dev->vqs);
 	kfree(s);
 	return 0;
 }
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9759249..666ed34 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -266,20 +266,20 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 	bool zcopy;
 
 	for (i = 0; i < dev->nvqs; ++i) {
-		dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
+		dev->vqs[i]->indirect = kmalloc(sizeof *dev->vqs[i]->indirect *
 					       UIO_MAXIOV, GFP_KERNEL);
-		dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
+		dev->vqs[i]->log = kmalloc(sizeof *dev->vqs[i]->log * UIO_MAXIOV,
 					  GFP_KERNEL);
-		dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
+		dev->vqs[i]->heads = kmalloc(sizeof *dev->vqs[i]->heads *
 					    UIO_MAXIOV, GFP_KERNEL);
 		zcopy = vhost_zcopy_mask & (0x1 << i);
 		if (zcopy)
-			dev->vqs[i].ubuf_info =
-				kmalloc(sizeof *dev->vqs[i].ubuf_info *
+			dev->vqs[i]->ubuf_info =
+				kmalloc(sizeof *dev->vqs[i]->ubuf_info *
 					UIO_MAXIOV, GFP_KERNEL);
-		if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
-			!dev->vqs[i].heads ||
-			(zcopy && !dev->vqs[i].ubuf_info))
+		if (!dev->vqs[i]->indirect || !dev->vqs[i]->log ||
+			!dev->vqs[i]->heads ||
+			(zcopy && !dev->vqs[i]->ubuf_info))
 			goto err_nomem;
 	}
 	return 0;
@@ -315,16 +315,16 @@ long vhost_dev_init(struct vhost_dev *dev,
 	dev->worker = NULL;
 
 	for (i = 0; i < dev->nvqs; ++i) {
-		dev->vqs[i].log = NULL;
-		dev->vqs[i].indirect = NULL;
-		dev->vqs[i].heads = NULL;
-		dev->vqs[i].ubuf_info = NULL;
-		dev->vqs[i].dev = dev;
-		mutex_init(&dev->vqs[i].mutex);
+		dev->vqs[i]->log = NULL;
+		dev->vqs[i]->indirect = NULL;
+		dev->vqs[i]->heads = NULL;
+		dev->vqs[i]->ubuf_info = NULL;
+		dev->vqs[i]->dev = dev;
+		mutex_init(&dev->vqs[i]->mutex);
 		vhost_vq_reset(dev, dev->vqs + i);
-		if (dev->vqs[i].handle_kick)
-			vhost_poll_init(&dev->vqs[i].poll,
-					dev->vqs[i].handle_kick, POLLIN, dev);
+		if (dev->vqs[i]->handle_kick)
+			vhost_poll_init(&dev->vqs[i]->poll,
+					dev->vqs[i]->handle_kick, POLLIN, dev);
 	}
 
 	return 0;
@@ -427,9 +427,9 @@ void vhost_dev_stop(struct vhost_dev *dev)
 	int i;
 
 	for (i = 0; i < dev->nvqs; ++i) {
-		if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
-			vhost_poll_stop(&dev->vqs[i].poll);
-			vhost_poll_flush(&dev->vqs[i].poll);
+		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
+			vhost_poll_stop(&dev->vqs[i]->poll);
+			vhost_poll_flush(&dev->vqs[i]->poll);
 		}
 	}
 }
@@ -440,16 +440,16 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
 	int i;
 
 	for (i = 0; i < dev->nvqs; ++i) {
-		if (dev->vqs[i].error_ctx)
-			eventfd_ctx_put(dev->vqs[i].error_ctx);
-		if (dev->vqs[i].error)
-			fput(dev->vqs[i].error);
-		if (dev->vqs[i].kick)
-			fput(dev->vqs[i].kick);
-		if (dev->vqs[i].call_ctx)
-			eventfd_ctx_put(dev->vqs[i].call_ctx);
-		if (dev->vqs[i].call)
-			fput(dev->vqs[i].call);
+		if (dev->vqs[i]->error_ctx)
+			eventfd_ctx_put(dev->vqs[i]->error_ctx);
+		if (dev->vqs[i]->error)
+			fput(dev->vqs[i]->error);
+		if (dev->vqs[i]->kick)
+			fput(dev->vqs[i]->kick);
+		if (dev->vqs[i]->call_ctx)
+			eventfd_ctx_put(dev->vqs[i]->call_ctx);
+		if (dev->vqs[i]->call)
+			fput(dev->vqs[i]->call);
 		vhost_vq_reset(dev, dev->vqs + i);
 	}
 	vhost_dev_free_iovecs(dev);
@@ -521,14 +521,14 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
 
 	for (i = 0; i < d->nvqs; ++i) {
 		int ok;
-		mutex_lock(&d->vqs[i].mutex);
+		mutex_lock(&d->vqs[i]->mutex);
 		/* If ring is inactive, will check when it's enabled. */
-		if (d->vqs[i].private_data)
-			ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
+		if (d->vqs[i]->private_data)
+			ok = vq_memory_access_ok(d->vqs[i]->log_base, mem,
 						 log_all);
 		else
 			ok = 1;
-		mutex_unlock(&d->vqs[i].mutex);
+		mutex_unlock(&d->vqs[i]->mutex);
 		if (!ok)
 			return 0;
 	}
@@ -876,9 +876,9 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
 		} else
 			filep = eventfp;
 		for (i = 0; i < d->nvqs; ++i) {
-			mutex_lock(&d->vqs[i].mutex);
-			d->vqs[i].log_ctx = d->log_ctx;
-			mutex_unlock(&d->vqs[i].mutex);
+			mutex_lock(&d->vqs[i]->mutex);
+			d->vqs[i]->log_ctx = d->log_ctx;
+			mutex_unlock(&d->vqs[i]->mutex);
 		}
 		if (ctx)
 			eventfd_ctx_put(ctx);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 17261e2..eeb3433 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -150,7 +150,7 @@ struct vhost_dev {
 	struct mm_struct *mm;
 	struct mutex mutex;
 	unsigned acked_features;
-	struct vhost_virtqueue *vqs;
+	struct vhost_virtqueue **vqs;
 	int nvqs;
 	struct file *log_file;
 	struct eventfd_ctx *log_ctx;

^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2013-04-25  9:32 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-04-25  7:35 [PATCH v11 0/4] tcm_vhost hotplug Asias He
2013-04-25  7:35 ` [PATCH v11 1/4] tcm_vhost: Refactor the lock nesting rule Asias He
2013-04-25  7:39   ` Michael S. Tsirkin
2013-04-25  7:35 ` [PATCH v11 2/4] tcm_vhost: Add hotplug/hotunplug support Asias He
2013-04-25  7:35 ` [PATCH v11 3/4] tcm_vhost: Add ioctl to get and set events missed flag Asias He
2013-04-25  7:40   ` Michael S. Tsirkin
2013-04-25  7:35 ` [PATCH v11 4/4] tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG Asias He
2013-04-25  7:39 ` [PATCH v11 0/4] tcm_vhost hotplug Michael S. Tsirkin
2013-04-25  8:19   ` Nicholas A. Bellinger
     [not found] ` <1366875323-17639-3-git-send-email-asias@redhat.com>
2013-04-25  7:40   ` [PATCH v11 2/4] tcm_vhost: Add hotplug/hotunplug support Michael S. Tsirkin
     [not found] ` <1366875323-17639-5-git-send-email-asias@redhat.com>
2013-04-25  7:40   ` [PATCH v11 4/4] tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG Michael S. Tsirkin
2013-04-25  9:32   ` [PATCH untested] vhost: allow device specific fields per vq Michael S. Tsirkin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).